1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-08 19:43:58 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
c520e52696 filter: add metadata filters FIXME needs docs and tests
Fixes #6353
2022-08-04 18:19:45 +01:00
870 changed files with 61189 additions and 130307 deletions

4
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,4 @@
github: [ncw]
patreon: njcw
liberapay: ncw
custom: ["https://rclone.org/donate/"]

View File

@@ -1,6 +0,0 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

View File

@@ -8,31 +8,29 @@ name: build
on:
push:
branches:
- '**'
- '*'
tags:
- '**'
- '*'
pull_request:
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
required: true
default: true
jobs:
build:
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.19', 'go1.20']
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.17', 'go1.18']
include:
- job_name: linux
os: ubuntu-latest
go: '1.21'
go: '1.19.x'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -43,14 +41,14 @@ jobs:
- job_name: linux_386
os: ubuntu-latest
go: '1.21'
go: '1.19.x'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64
os: macos-11
go: '1.21'
go: '1.19.x'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -59,14 +57,14 @@ jobs:
- job_name: mac_arm64
os: macos-11
go: '1.21'
go: '1.19.x'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows
os: windows-latest
go: '1.21'
go: '1.19.x'
gotags: cmount
cgo: '0'
build_flags: '-include "^windows/"'
@@ -76,20 +74,20 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '1.21'
go: '1.19.x'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.19
- job_name: go1.17
os: ubuntu-latest
go: '1.19'
go: '1.17.x'
quicktest: true
racequicktest: true
- job_name: go1.20
- job_name: go1.18
os: ubuntu-latest
go: '1.20'
go: '1.18.x'
quicktest: true
racequicktest: true
@@ -99,13 +97,14 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v4
uses: actions/setup-go@v2
with:
stable: 'false'
go-version: ${{ matrix.go }}
check-latest: true
@@ -124,17 +123,12 @@ jobs:
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse3 libfuse-dev rpm pkg-config
sudo apt-get install fuse libfuse-dev rpm pkg-config
if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS
shell: bash
run: |
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
unset HOMEBREW_NO_INSTALL_FROM_API
brew untap --force homebrew/core
brew untap --force homebrew/cask
brew update
brew install --cask macfuse
if: matrix.os == 'macos-11'
@@ -168,7 +162,7 @@ jobs:
env
- name: Go module cache
uses: actions/cache@v3
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@@ -222,17 +216,20 @@ jobs:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# working-directory: '$(modulePath)'
# Deploy binaries if enabled in config && not a PR && not a fork
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
lint:
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30
name: "lint"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
# FIXME temporary until golangci-lint supports go1.19
go-version: 1.18.x
- name: Code quality test
uses: golangci/golangci-lint-action@v3
@@ -240,39 +237,26 @@ jobs:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: latest
# Run govulncheck on the latest go version, the one we build binaries with
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: '1.21'
check-latest: true
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Scan for vulnerabilities
run: govulncheck ./...
android:
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30
name: "android-all"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
fetch-depth: 0
# Upgrade together with NDK version
- name: Set up Go
uses: actions/setup-go@v4
uses: actions/setup-go@v1
with:
go-version: '1.21'
go-version: 1.19.x
- name: Go module cache
uses: actions/cache@v3
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@@ -357,4 +341,4 @@ jobs:
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'
if: github.head_ref == '' && github.repository == 'rclone/rclone'

View File

@@ -1,61 +0,0 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/${{ github.repository }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
# GitHub Actions at the start of a workflow run to identify the job.
# This is used to authenticate against GitHub Container Registry.
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
# for more detailed information.
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and publish image
uses: docker/build-push-action@v5
with:
file: Dockerfile
context: .
push: true # push the image to ghcr
tags: |
ghcr.io/rclone/rclone:beta
rclone/rclone:beta
labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
cache-from: type=gha
cache-to: type=gha,mode=max
provenance: false
# Eventually cache will need to be cleared if builds more frequent than once a week
# https://github.com/docker/build-push-action/issues/252

View File

@@ -0,0 +1,26 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Build and publish image
uses: ilteoood/docker_buildx@1.1.0
with:
tag: beta
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

View File

@@ -11,7 +11,7 @@ jobs:
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Get actual patch version
@@ -40,7 +40,7 @@ jobs:
name: Build docker plugin job
steps:
- name: Checkout master
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Build and publish docker plugin

View File

@@ -1,14 +0,0 @@
name: Publish to Winget
on:
release:
types: [released]
jobs:
publish:
runs-on: ubuntu-latest
steps:
- uses: vedantmgoyal2009/winget-releaser@v2
with:
identifier: Rclone.Rclone
installers-regex: '-windows-\w+\.zip$'
token: ${{ secrets.WINGET_TOKEN }}

2
.gitignore vendored
View File

@@ -8,10 +8,10 @@ rclone.iml
.idea
.history
*.test
*.log
*.iml
fuzz-build.zip
*.orig
*.rej
Thumbs.db
__pycache__
.DS_Store

View File

@@ -2,17 +2,15 @@
linters:
enable:
- deadcode
- errcheck
- goimports
- revive
- ineffassign
- structcheck
- varcheck
- govet
- unconvert
- staticcheck
- gosimple
- stylecheck
- unused
- misspell
#- prealloc
#- maligned
disable-all: true
@@ -22,79 +20,11 @@ issues:
exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-issues-per-linter: 0
max-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0
exclude-rules:
- linters:
- staticcheck
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
# don't disable the revive messages about comments on exported functions
include:
- EXC0012
- EXC0013
- EXC0014
- EXC0015
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m
linters-settings:
revive:
# setting rules seems to disable all the rules, so re-enable them here
rules:
- name: blank-imports
disabled: false
- name: context-as-argument
disabled: false
- name: context-keys-type
disabled: false
- name: dot-imports
disabled: false
- name: empty-block
disabled: true
- name: error-naming
disabled: false
- name: error-return
disabled: false
- name: error-strings
disabled: false
- name: errorf
disabled: false
- name: exported
disabled: false
- name: increment-decrement
disabled: true
- name: indent-error-flow
disabled: false
- name: package-comments
disabled: false
- name: range
disabled: false
- name: receiver-naming
disabled: false
- name: redefines-builtin-id
disabled: true
- name: superfluous-else
disabled: true
- name: time-naming
disabled: false
- name: unexported-return
disabled: false
- name: unreachable-code
disabled: true
- name: unused-parameter
disabled: true
- name: var-declaration
disabled: false
- name: var-naming
disabled: false
stylecheck:
# Only enable the checks performed by the staticcheck stand-alone tool,
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]

View File

@@ -77,7 +77,7 @@ Make sure you
* Add [documentation](#writing-documentation) for a new feature.
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
When you are done with that push your changes to GitHub:
When you are done with that push your changes to Github:
git push -u origin my-new-feature
@@ -88,7 +88,7 @@ Your changes will then get reviewed and you might get asked to fix some stuff. I
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
## Using Git and GitHub ##
## Using Git and Github ##
### Committing your changes ###
@@ -419,7 +419,7 @@ remote or an fs.
Research
* Look at the interfaces defined in `fs/types.go`
* Look at the interfaces defined in `fs/fs.go`
* Study one or more of the existing remotes
Getting going
@@ -428,19 +428,14 @@ Getting going
* box is a good one to start from if you have a directory-based remote
* b2 is a good one to start from if you have a bucket-based remote
* Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good go SDK then use that instead.
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable.
* Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* `rclone purge -v TestRemote:rclone-info`
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
* open `remote.csv` in a spreadsheet and examine
Important:
* Please use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend. It makes maintenance much easier.
* If your backend is HTTP based then please use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
Unit tests
* Create a config entry called `TestRemote` for the unit tests to use
@@ -474,7 +469,7 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
* `README.md` - main GitHub page
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
* update them in your backend with `bin/make_backend_docs.py remote`
* update them with `make backenddocs` - revert any changes in other backends
* `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section
* `docs/content/_index.md` - front page of rclone.org
@@ -484,46 +479,6 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work.
## Adding a new s3 provider
It is quite easy to add a new S3 provider to rclone.
You'll need to modify the following files
- `backend/s3/s3.go`
- Add the provider to `providerOption` at the top of the file
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
- Exclude your provider from genric config questions (eg `region` and `endpoint).
- Add the provider to the `setQuirks` function - see the documentation there.
- `docs/content/s3.md`
- Add the provider at the top of the page.
- Add a section about the provider linked from there.
- Add a transcript of a trial `rclone config` session
- Edit the transcript to remove things which might change in subsequent versions
- **Do not** alter or add to the autogenerated parts of `s3.md`
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
- `README.md` - this is the home page in github
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
- `docs/content/_index.md` - this is the home page of rclone.org
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
When adding the provider, endpoints, quirks, docs etc keep them in
alphabetical order by `Provider` name, but with `AWS` first and
`Other` last.
Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work.
Once you've written the code, test `rclone config` works to your
satisfaction, and check the integration tests work `go test -v -remote
NewS3Provider:`. You may need to adjust the quirks to get them to
pass. Some providers just can't pass the tests with control characters
in the names so if these fail and the provider doesn't support
`urlEncodeListings` in the quirks then ignore them. Note that the
`SetTier` test may also fail on non AWS providers.
For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone/rclone/commit/eb3082a1ebdb76d5625f14cedec3f5154a5e7b10).
## Writing a plugin ##
New features (backends, commands) can also be added "out-of-tree", through Go plugins.

View File

@@ -11,7 +11,7 @@ RUN ./rclone version
# Begin final image
FROM alpine:latest
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
RUN apk --no-cache add ca-certificates fuse tzdata && \
echo "user_allow_other" >> /etc/fuse.conf
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/

View File

@@ -16,11 +16,6 @@ Current active maintainers of rclone are:
| Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend |
| Caleb Case | @calebcase | storj backend |
| wiserain | @wiserain | pikpak backend |
| albertony | @albertony | |
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
| Hideo Aoyama | @boukendesho | snap packaging |
| nielash | @nielash | bisync |
**This is a work in progress Draft**

33819
MANUAL.html generated

File diff suppressed because it is too large Load Diff

13335
MANUAL.md generated

File diff suppressed because it is too large Load Diff

34391
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -66,10 +66,6 @@ btest:
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
@echo "Copied markdown of beta release to clip board"
btesth:
@echo "<a href="$(BETA_URL)">$(TAG)</a> on branch <a href="https://github.com/rclone/rclone/tree/$(BRANCH)">$(BRANCH)</a> (uploaded in 15-30 mins)" | xclip -r -sel clip -t text/html
@echo "Copied beta release in HTML to clip board"
version:
@echo '$(TAG)'
@@ -85,9 +81,6 @@ quicktest:
racequicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
compiletest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
# Do source code quality checks
check: rclone
@echo "-- START CODE QUALITY REPORT -------------------------------"
@@ -100,7 +93,7 @@ build_dep:
# Get the release dependencies we only install on linux
release_dep_linux:
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
# Get the release dependencies we only install on Windows
release_dep_windows:

View File

@@ -25,19 +25,18 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
* Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
@@ -46,14 +45,11 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
* Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
@@ -63,32 +59,23 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
* OVH [:page_facing_up:](https://rclone.org/swift/)
* Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* Storj [:page_facing_up:](https://rclone.org/storj/)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)

View File

@@ -10,7 +10,7 @@ This file describes how to make the various kinds of releases
## Making a release
* git checkout master # see below for stable branch
* git pull # IMPORTANT
* git pull
* git status - make sure everything is checked in
* Check GitHub actions build for master is Green
* make test # see integration test server or run locally
@@ -21,7 +21,6 @@ This file describes how to make the various kinds of releases
* git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0"
* make retag
* git push origin # without --follow-tags so it doesn't push the tag if it fails
* git push --follow-tags origin
* # Wait for the GitHub builds to complete then...
* make fetch_binaries
@@ -54,14 +53,6 @@ doing that so it may be necessary to roll back dependencies to the
version specified by `make updatedirect` in order to get rclone to
build.
## Tidy beta
At some point after the release run
bin/tidy-beta v1.55
where the version number is that of a couple ago to remove old beta binaries.
## Making a point release
If rclone needs a point release due to some horrendous bug:
@@ -75,7 +66,8 @@ Set vars
First make the release branch. If this is a second point release then
this will be done already.
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
* git branch ${BASE_TAG} ${BASE_TAG}-stable
* git co ${BASE_TAG}-stable
* make startstable
Now
@@ -90,28 +82,6 @@ Now
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
* git push
## Update the website between releases
Create an update website branch based off the last release
git co -b update-website
If the branch already exists, double check there are no commits that need saving.
Now reset the branch to the last release
git reset --hard v1.64.0
Create the changes, check them in, test with `make serve` then
make upload_test_website
Check out https://test.rclone.org and when happy
make upload_website
Cherry pick any changes back to master and the stable branch if it is active.
## Making a manual build of docker
The rclone docker image should autobuild on via GitHub actions. If it doesn't

View File

@@ -1 +1 @@
v1.65.0
v1.60.0

View File

@@ -1,4 +1,3 @@
// Package alias implements a virtual provider to rename existing remotes.
package alias
import (

View File

@@ -1,4 +1,3 @@
// Package all imports all the backends
package all
import (
@@ -24,6 +23,7 @@ import (
_ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/hidrive"
_ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/hubic"
_ "github.com/rclone/rclone/backend/internetarchive"
_ "github.com/rclone/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/koofr"
@@ -34,20 +34,15 @@ import (
_ "github.com/rclone/rclone/backend/netstorage"
_ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
_ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/pikpak"
_ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/protondrive"
_ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/quatrix"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sia"
_ "github.com/rclone/rclone/backend/smb"
_ "github.com/rclone/rclone/backend/storj"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"

View File

@@ -556,9 +556,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
//
// This is a workaround for Amazon sometimes returning
//
// - 408 REQUEST_TIMEOUT
// - 504 GATEWAY_TIMEOUT
// - 500 Internal server error
// * 408 REQUEST_TIMEOUT
// * 504 GATEWAY_TIMEOUT
// * 500 Internal server error
//
// At the end of large uploads. The speculation is that the timeout
// is waiting for the sha1 hashing to complete and the file may well
@@ -626,7 +626,7 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
// Put the object into the container
//
// Copy the reader in to the new object which is returned.
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -685,9 +685,9 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1002,6 +1002,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {

File diff suppressed because it is too large Load Diff

View File

@@ -20,18 +20,17 @@ func (f *Fs) InternalTest(t *testing.T) {
func TestIncrement(t *testing.T) {
for _, test := range []struct {
in [8]byte
want [8]byte
in []byte
want []byte
}{
{[8]byte{0, 0, 0, 0}, [8]byte{1, 0, 0, 0}},
{[8]byte{0xFE, 0, 0, 0}, [8]byte{0xFF, 0, 0, 0}},
{[8]byte{0xFF, 0, 0, 0}, [8]byte{0, 1, 0, 0}},
{[8]byte{0, 1, 0, 0}, [8]byte{1, 1, 0, 0}},
{[8]byte{0xFF, 0xFF, 0xFF, 0xFE}, [8]byte{0, 0, 0, 0xFF}},
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 1}},
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 0, 0, 0}},
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
} {
increment(&test.in)
increment(test.in)
assert.Equal(t, test.want, test.in)
}
}

View File

@@ -6,10 +6,10 @@
package azureblob
import (
"context"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
)
@@ -17,31 +17,10 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize,
},
})
}
// TestIntegration2 runs integration tests against the remote
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
name := "TestAzureBlob"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize,
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"},
},
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{},
})
}
@@ -53,6 +32,36 @@ var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
)
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
func TestServicePrincipalFileSuccess(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"password": "my secret",
"tenant": "my active directory tenant ID"
}
`
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
if assert.NoError(t, err) {
assert.NotNil(t, tokenRefresher)
}
}
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
func TestServicePrincipalFileFailure(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"tenant": "my active directory tenant ID"
}
`
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
assert.Error(t, err)
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
}
func TestValidateAccessTier(t *testing.T) {
tests := map[string]struct {
accessTier string

137
backend/azureblob/imds.go Normal file
View File

@@ -0,0 +1,137 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
)
const (
azureResource = "https://storage.azure.com"
imdsAPIVersion = "2018-02-01"
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
)
// This custom type is used to add the port the test server has bound to
// to the request context.
type testPortKey string
type msiIdentifierType int
const (
msiClientID msiIdentifierType = iota
msiObjectID
msiResourceID
)
type userMSI struct {
Type msiIdentifierType
Value string
}
type httpError struct {
Response *http.Response
}
func (e httpError) Error() string {
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
}
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
// Metadata Service.
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
// Attempt to get an MSI token; silently continue if unsuccessful.
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
result := adal.Token{}
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
if err != nil {
fs.Debugf(nil, "Failed to create request: %v", err)
return result, err
}
params := req.URL.Query()
params.Set("resource", azureResource)
params.Set("api-version", imdsAPIVersion)
// Specify user-assigned identity if requested.
if identity != nil {
switch identity.Type {
case msiClientID:
params.Set("client_id", identity.Value)
case msiObjectID:
params.Set("object_id", identity.Value)
case msiResourceID:
params.Set("mi_res_id", identity.Value)
default:
// If this happens, the calling function and this one don't agree on
// what valid ID types exist.
return result, fmt.Errorf("unknown MSI identity type specified")
}
}
req.URL.RawQuery = params.Encode()
// The Metadata header is required by all calls to IMDS.
req.Header.Set("Metadata", "true")
// If this function is run in a test, query the test server instead of IMDS.
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
if isTest {
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
req.Host = req.URL.Host
}
// Send request
httpClient := fshttp.NewClient(ctx)
resp, err := httpClient.Do(req)
if err != nil {
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
}
defer func() { // resp and Body should not be nil
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
}
err = resp.Body.Close()
if err != nil {
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
}
}()
// Check if the status code indicates success
// The request returns 200 currently, add 201 and 202 as well for possible extension.
switch resp.StatusCode {
case 200, 201, 202:
break
default:
body, _ := ioutil.ReadAll(resp.Body)
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
return result, httpError{Response: resp}
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, fmt.Errorf("couldn't read IMDS response: %w", err)
}
// Remove BOM, if any. azcopy does this so I'm following along.
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
// This would be a good place to persist the token if a large number of rclone
// invocations are being made in a short amount of time. If the token is
// persisted, the azureblob code will need to check for expiry before every
// storage API call.
err = json.Unmarshal(b, &result)
if err != nil {
return result, fmt.Errorf("couldn't unmarshal IMDS response: %w", err)
}
return result, nil
}

View File

@@ -0,0 +1,118 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
require.NoError(t, err)
parameters := r.URL.Query()
(*actual)["path"] = r.URL.Path
(*actual)["Metadata"] = r.Header.Get("Metadata")
(*actual)["method"] = r.Method
for paramName := range parameters {
(*actual)[paramName] = parameters.Get(paramName)
}
// Make response.
response := adal.Token{}
responseBytes, err := json.Marshal(response)
require.NoError(t, err)
_, err = w.Write(responseBytes)
require.NoError(t, err)
}
}
func TestManagedIdentity(t *testing.T) {
// test user-assigned identity specifiers to use
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
tests := []struct {
identity *userMSI
identityParameterName string
expectedAbsent []string
}{
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
}
alwaysExpected := map[string]string{
"path": "/metadata/identity/oauth2/token",
"resource": "https://storage.azure.com",
"Metadata": "true",
"api-version": "2018-02-01",
"method": "GET",
}
for _, test := range tests {
actual := make(map[string]string, 10)
testServer := httptest.NewServer(handler(t, &actual))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, test.identity)
require.NoError(t, err)
// Validate expected query parameters present
expected := make(map[string]string)
for k, v := range alwaysExpected {
expected[k] = v
}
if test.identity != nil {
expected[test.identityParameterName] = test.identity.Value
}
for key := range expected {
value, exists := actual[key]
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
test.identityParameterName, key) {
assert.Equalf(t, expected[key], value,
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
}
}
// Validate unexpected query parameters absent
for _, key := range test.expectedAbsent {
_, exists := actual[key]
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
}
}
}
func errorHandler(resultCode int) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Test error generated", resultCode)
}
}
func TestIMDSErrors(t *testing.T) {
errorCodes := []int{404, 429, 500}
for _, code := range errorCodes {
testServer := httptest.NewServer(errorHandler(code))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, nil)
require.Error(t, err)
httpErr, ok := err.(httpError)
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
}
}

View File

@@ -1,4 +1,3 @@
// Package api provides types used by the Backblaze B2 API.
package api
import (
@@ -33,18 +32,10 @@ var _ fserrors.Fataler = (*Error)(nil)
// Bucket describes a B2 bucket
type Bucket struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
}
// LifecycleRule is a single lifecycle rule
type LifecycleRule struct {
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
FileNamePrefix string `json:"fileNamePrefix"`
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
}
// Timestamp is a UTC time when this file was uploaded. It is a base
@@ -214,10 +205,9 @@ type FileInfo struct {
// CreateBucketRequest is used to create a bucket
type CreateBucketRequest struct {
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
}
// DeleteBucketRequest is used to create a bucket
@@ -248,7 +238,7 @@ type GetFileInfoRequest struct {
// If the original source of the file being uploaded has a last
// modified time concept, Backblaze recommends using
// src_last_modified_millis as the name, and a string holding the base
// 10 number of milliseconds since midnight, January 1, 1970
// 10 number number of milliseconds since midnight, January 1, 1970
// UTC. This fits in a 64 bit integer such as the type "long" in the
// programming language Java. It is intended to be compatible with
// Java's time long. For example, it can be passed directly into the
@@ -340,11 +330,3 @@ type CopyPartRequest struct {
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
}
// UpdateBucketRequest describes a request to modify a B2 bucket
type UpdateBucketRequest struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Type string `json:"bucketType,omitempty"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
}

View File

@@ -1,4 +1,4 @@
// Package b2 provides an interface to the Backblaze B2 object storage system.
// Package b2 provides an interface to the Backblaze B2 object storage system
package b2
// FIXME should we remove sha1 checks from here as rclone now supports
@@ -32,7 +32,6 @@ import (
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest"
@@ -58,7 +57,9 @@ const (
minChunkSize = 5 * fs.Mebi
defaultChunkSize = 96 * fs.Mebi
defaultUploadCutoff = 200 * fs.Mebi
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
)
// Globals
@@ -73,17 +74,14 @@ func init() {
Name: "b2",
Description: "Backblaze B2",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "account",
Help: "Account ID or Application Key ID.",
Required: true,
Sensitive: true,
Name: "account",
Help: "Account ID or Application Key ID.",
Required: true,
}, {
Name: "key",
Help: "Application Key.",
Required: true,
Sensitive: true,
Name: "key",
Help: "Application Key.",
Required: true,
}, {
Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.",
@@ -149,18 +147,6 @@ might a maximum of "--transfers" chunks in progress at once.
5,000,000 Bytes is the minimum size.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
Note that chunks are stored in memory and there may be up to
"--transfers" * "--b2-upload-concurrency" chunks stored at once
in memory.`,
Default: 4,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files.
@@ -200,41 +186,16 @@ The minimum value is 1 second. The maximum value is one week.`,
Advanced: true,
}, {
Name: "memory_pool_flush_time",
Default: fs.Duration(time.Minute),
Default: memoryPoolFlushTime,
Advanced: true,
Hide: fs.OptionHideBoth,
Help: `How often internal memory buffer pools will be flushed. (no longer used)`,
Help: `How often internal memory buffer pools will be flushed.
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
}, {
Name: "memory_pool_use_mmap",
Default: false,
Advanced: true,
Hide: fs.OptionHideBoth,
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
}, {
Name: "lifecycle",
Help: `Set the number of days deleted files should be kept when creating a bucket.
On bucket creation, this parameter is used to create a lifecycle rule
for the entire bucket.
If lifecycle is 0 (the default) it does not create a lifecycle rule so
the default B2 behaviour applies. This is to create versions of files
on delete and overwrite and to keep them indefinitely.
If lifecycle is >0 then it creates a single rule setting the number of
days before a file that is deleted or overwritten is deleted
permanently. This is known as daysFromHidingToDeleting in the b2 docs.
The minimum value for this parameter is 1 day.
You can also enable hard_delete in the config also which will mean
deletions won't cause versions but overwrites will still cause
versions to be made.
See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket creation.
`,
Default: 0,
Default: memoryPoolUseMmap,
Advanced: true,
Help: `Whether to use mmap buffers in internal memory pool.`,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -261,11 +222,11 @@ type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
DisableCheckSum bool `config:"disable_checksum"`
DownloadURL string `config:"download_url"`
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
Lifecycle int `config:"lifecycle"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -290,6 +251,7 @@ type Fs struct {
authMu sync.Mutex // lock for authorizing the account
pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
}
// Object describes a b2 object
@@ -494,6 +456,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
uploads: make(map[string][]*api.GetUploadURLResponse),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
ci.Transfers,
opt.MemoryPoolUseMmap,
),
}
f.setRoot(root)
f.features = (&fs.Features{
@@ -627,24 +595,23 @@ func (f *Fs) clearUploadURL(bucketID string) {
f.uploadMu.Unlock()
}
// getRW gets a RW buffer and an upload token
// getBuf gets a buffer of f.opt.ChunkSize and an upload token
//
// If noBuf is set then it just gets an upload token
func (f *Fs) getRW(noBuf bool) (rw *pool.RW) {
func (f *Fs) getBuf(noBuf bool) (buf []byte) {
f.uploadToken.Get()
if !noBuf {
rw = multipart.NewRW()
buf = f.pool.Get()
}
return rw
return buf
}
// putRW returns a RW buffer to the memory pool and returns an upload
// token
// putBuf returns a buffer to the memory pool and an upload token
//
// If buf is nil then it just returns the upload token
func (f *Fs) putRW(rw *pool.RW) {
if rw != nil {
_ = rw.Close()
// If noBuf is set then it just returns the upload token
func (f *Fs) putBuf(buf []byte, noBuf bool) {
if !noBuf {
f.pool.Put(buf)
}
f.uploadToken.Put()
}
@@ -689,15 +656,15 @@ var errEndList = errors.New("end list")
//
// (bucket, directory) is the starting directory
//
// If prefix is set then it is removed from all file names.
// If prefix is set then it is removed from all file names
//
// If addBucket is set then it adds the bucket to the start of the
// remotes generated.
// remotes generated
//
// If recurse is set the function will recursively list.
// If recurse is set the function will recursively list
//
// If limit is > 0 then it limits to that many files (must be less
// than 1000).
// than 1000)
//
// If hidden is set then it will list the hidden (deleted) files too.
//
@@ -851,7 +818,7 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
// listBuckets returns all the buckets to out
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
err = f.listBucketsToFn(ctx, "", func(bucket *api.Bucket) error {
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
d := fs.NewDir(bucket.Name, time.Time{})
entries = append(entries, d)
return nil
@@ -944,14 +911,11 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
type listBucketFn func(*api.Bucket) error
// listBucketsToFn lists the buckets to the function supplied
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error {
func (f *Fs) listBucketsToFn(ctx context.Context, fn listBucketFn) error {
var account = api.ListBucketsRequest{
AccountID: f.info.AccountID,
BucketID: f.info.Allowed.BucketID,
}
if bucketName != "" && account.BucketID == "" {
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
}
var response api.ListBucketsResponse
opts := rest.Opts{
@@ -997,7 +961,7 @@ func (f *Fs) getbucketType(ctx context.Context, bucket string) (bucketType strin
if bucketType != "" {
return bucketType, nil
}
err = f.listBucketsToFn(ctx, bucket, func(bucket *api.Bucket) error {
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
// listBucketsToFn reads bucket Types
return nil
})
@@ -1032,7 +996,7 @@ func (f *Fs) getBucketID(ctx context.Context, bucket string) (bucketID string, e
if bucketID != "" {
return bucketID, nil
}
err = f.listBucketsToFn(ctx, bucket, func(bucket *api.Bucket) error {
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
// listBucketsToFn sets IDs
return nil
})
@@ -1061,7 +1025,7 @@ func (f *Fs) clearBucketID(bucket string) {
// Put the object into the bucket
//
// Copy the reader in to the new object which is returned.
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -1096,11 +1060,6 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
Name: f.opt.Enc.FromStandardName(bucket),
Type: "allPrivate",
}
if f.opt.Lifecycle > 0 {
request.LifecycleRules = []api.LifecycleRule{{
DaysFromHidingToDeleting: &f.opt.Lifecycle,
}}
}
var response api.Bucket
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
@@ -1262,7 +1221,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
fs.Errorf(object.Name, "Can't create object %v", err)
continue
}
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "deleting")
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
err = f.deleteByID(ctx, object.ID, object.Name)
checkErr(err)
tr.Done(ctx, err)
@@ -1276,7 +1235,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
if err != nil {
fs.Errorf(object, "Can't create object %+v", err)
}
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
if oldOnly && last != remote {
// Check current version of the file
if object.Action == "hide" {
@@ -1332,7 +1291,7 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
if err != nil {
return err
}
return up.Copy(ctx)
return up.Upload(ctx)
}
dstBucket, dstPath := dstObj.split()
@@ -1375,9 +1334,9 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1461,7 +1420,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
if err != nil {
return "", err
}
absPath := "/" + urlEncode(bucketPath)
absPath := "/" + bucketPath
link = RootURL + "/file/" + urlEncode(bucket) + absPath
bucketType, err := f.getbucketType(ctx, bucket)
if err != nil {
@@ -1519,7 +1478,7 @@ func (o *Object) Size() int64 {
// Clean the SHA1
//
// Make sure it is lower case.
// Make sure it is lower case
//
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
// Some tools (e.g. Cyberduck) use this
@@ -1531,11 +1490,10 @@ func cleanSHA1(sha1 string) string {
// decodeMetaDataRaw sets the metadata from the data passed in
//
// Sets
//
// o.id
// o.modTime
// o.size
// o.sha1
// o.id
// o.modTime
// o.size
// o.sha1
func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp api.Timestamp, Info map[string]string, mimeType string) (err error) {
o.id = ID
o.sha1 = SHA1
@@ -1554,11 +1512,10 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
// decodeMetaData sets the metadata in the object from an api.File
//
// Sets
//
// o.id
// o.modTime
// o.size
// o.sha1
// o.id
// o.modTime
// o.size
// o.sha1
func (o *Object) decodeMetaData(info *api.File) (err error) {
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
}
@@ -1566,11 +1523,10 @@ func (o *Object) decodeMetaData(info *api.File) (err error) {
// decodeMetaDataFileInfo sets the metadata in the object from an api.FileInfo
//
// Sets
//
// o.id
// o.modTime
// o.size
// o.sha1
// o.id
// o.modTime
// o.size
// o.sha1
func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
}
@@ -1628,11 +1584,10 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
// readMetaData gets the metadata if it hasn't already been fetched
//
// Sets
//
// o.id
// o.modTime
// o.size
// o.sha1
// o.id
// o.modTime
// o.size
// o.sha1
func (o *Object) readMetaData(ctx context.Context) (err error) {
if o.id != "" {
return nil
@@ -1900,11 +1855,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return err
}
if size < 0 {
if size == -1 {
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
rw := o.fs.getRW(false)
buf := o.fs.getBuf(false)
n, err := io.CopyN(rw, in, int64(o.fs.opt.ChunkSize))
n, err := io.ReadFull(in, buf)
if err == nil {
bufReader := bufio.NewReader(in)
in = bufReader
@@ -1915,30 +1870,26 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "File is big enough for chunked streaming")
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
if err != nil {
o.fs.putRW(rw)
o.fs.putBuf(buf, false)
return err
}
// NB Stream returns the buffer and token
return up.Stream(ctx, rw)
} else if err == io.EOF {
return up.Stream(ctx, buf)
} else if err == io.EOF || err == io.ErrUnexpectedEOF {
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
defer o.fs.putRW(rw)
size = n
in = rw
defer o.fs.putBuf(buf, false)
size = int64(n)
in = bytes.NewReader(buf[:n])
} else {
o.fs.putRW(rw)
o.fs.putBuf(buf, false)
return err
}
} else if size > int64(o.fs.opt.UploadCutoff) {
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
Open: o.fs,
OpenOptions: options,
})
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
if err != nil {
return err
}
up := chunkWriter.(*largeUpload)
return o.decodeMetaDataFileInfo(up.info)
return up.Upload(ctx)
}
modTime := src.ModTime(ctx)
@@ -2046,41 +1997,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.decodeMetaDataFileInfo(&response)
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
// FIXME what if file is smaller than 1 chunk?
if f.opt.Versions {
return info, nil, errNotWithVersions
}
if f.opt.VersionAt.IsSet() {
return info, nil, errNotWithVersionAt
}
//size := src.Size()
// Temporary Object under construction
o := &Object{
fs: f,
remote: src.Remote(),
}
bucket, _ := o.split()
err = f.makeBucket(ctx, bucket)
if err != nil {
return info, nil, err
}
info = fs.ChunkWriterInfo{
ChunkSize: int64(f.opt.ChunkSize),
Concurrency: o.fs.opt.UploadConcurrency,
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil)
return info, up, err
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
bucket, bucketPath := o.split()
@@ -2106,149 +2022,16 @@ func (o *Object) ID() string {
return o.id
}
var lifecycleHelp = fs.CommandHelp{
Name: "lifecycle",
Short: "Read or set the lifecycle for a bucket",
Long: `This command can be used to read or set the lifecycle for a bucket.
Usage Examples:
To show the current lifecycle rules:
rclone backend lifecycle b2:bucket
This will dump something like this showing the lifecycle rules.
[
{
"daysFromHidingToDeleting": 1,
"daysFromUploadingToHiding": null,
"fileNamePrefix": ""
}
]
If there are no lifecycle rules (the default) then it will just return [].
To reset the current lifecycle rules:
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
This will run and then print the new lifecycle rules as above.
Rclone only lets you set lifecycles for the whole bucket with the
fileNamePrefix = "".
You can't disable versioning with B2. The best you can do is to set
the daysFromHidingToDeleting to 1 day. You can enable hard_delete in
the config also which will mean deletions won't cause versions but
overwrites will still cause versions to be made.
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
`,
Opts: map[string]string{
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
},
}
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
var newRule api.LifecycleRule
if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" {
days, err := strconv.Atoi(daysStr)
if err != nil {
return nil, fmt.Errorf("bad daysFromHidingToDeleting: %w", err)
}
newRule.DaysFromHidingToDeleting = &days
}
if daysStr := opt["daysFromUploadingToHiding"]; daysStr != "" {
days, err := strconv.Atoi(daysStr)
if err != nil {
return nil, fmt.Errorf("bad daysFromUploadingToHiding: %w", err)
}
newRule.DaysFromUploadingToHiding = &days
}
bucketName, _ := f.split("")
if bucketName == "" {
return nil, errors.New("bucket required")
}
var bucket *api.Bucket
if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil {
bucketID, err := f.getBucketID(ctx, bucketName)
if err != nil {
return nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_update_bucket",
}
var request = api.UpdateBucketRequest{
ID: bucketID,
AccountID: f.info.AccountID,
LifecycleRules: []api.LifecycleRule{newRule},
}
var response api.Bucket
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
bucket = &response
} else {
err = f.listBucketsToFn(ctx, bucketName, func(b *api.Bucket) error {
bucket = b
return nil
})
if err != nil {
return nil, err
}
}
if bucket == nil {
return nil, fs.ErrorDirNotFound
}
return bucket.LifecycleRules, nil
}
var commandHelp = []fs.CommandHelp{
lifecycleHelp,
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "lifecycle":
return f.lifecycleCommand(ctx, name, arg, opt)
default:
return nil, fs.ErrorCommandNotFound
}
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.PublicLinker = &Fs{}
_ fs.OpenChunkWriter = &Fs{}
_ fs.Commander = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.PublicLinker = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}
)

View File

@@ -5,6 +5,7 @@
package b2
import (
"bytes"
"context"
"crypto/sha1"
"encoding/hex"
@@ -20,7 +21,6 @@ import (
"github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/sync/errgroup"
)
@@ -78,14 +78,12 @@ type largeUpload struct {
wrap accounting.WrapFn // account parts being transferred
id string // ID of the file being uploaded
size int64 // total size
parts int // calculated number of parts, if known
sha1smu sync.Mutex // mutex to protect sha1s
parts int64 // calculated number of parts, if known
sha1s []string // slice of SHA1s for each part
uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
chunkSize int64 // chunk size to use
src *Object // if copying, object we are reading from
info *api.FileInfo // final response with info about the object
}
// newLargeUpload starts an upload of object o from in with metadata in src
@@ -93,16 +91,18 @@ type largeUpload struct {
// If newInfo is set then metadata from that will be used instead of reading it from src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
size := src.Size()
parts := 0
parts := int64(0)
sha1SliceSize := int64(maxParts)
chunkSize := defaultChunkSize
if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else {
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
parts = int(size / int64(chunkSize))
chunkSize = chunksize.Calculator(src, maxParts, defaultChunkSize)
parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 {
parts++
}
sha1SliceSize = parts
}
opts := rest.Opts{
@@ -150,7 +150,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
id: response.ID,
size: size,
parts: parts,
sha1s: make([]string, 0, 16),
sha1s: make([]string, sha1SliceSize),
chunkSize: int64(chunkSize),
}
// unwrap the accounting from the input, we use wrap to put it
@@ -169,26 +169,24 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
// This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock()
if len(up.uploads) > 0 {
defer up.uploadMu.Unlock()
if len(up.uploads) == 0 {
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
} else {
upload, up.uploads = up.uploads[0], up.uploads[1:]
up.uploadMu.Unlock()
return upload, nil
}
up.uploadMu.Unlock()
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err = up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
return upload, nil
}
@@ -203,39 +201,10 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
up.uploadMu.Unlock()
}
// Add an sha1 to the being built up sha1s
func (up *largeUpload) addSha1(chunkNumber int, sha1 string) {
up.sha1smu.Lock()
defer up.sha1smu.Unlock()
if len(up.sha1s) < chunkNumber+1 {
up.sha1s = append(up.sha1s, make([]string, chunkNumber+1-len(up.sha1s))...)
}
up.sha1s[chunkNumber] = sha1
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) {
// Only account after the checksum reads have been done
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
do.DelayAccounting(1)
}
err = up.f.pacer.Call(func() (bool, error) {
// Discover the size by seeking to the end
size, err = reader.Seek(0, io.SeekEnd)
if err != nil {
return false, err
}
// rewind the reader on retry and after reading size
_, err = reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
fs.Debugf(up.o, "Sending chunk %d length %d", chunkNumber, size)
// Transfer a chunk
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
// Get upload URL
upload, err := up.getUploadURL(ctx)
@@ -243,8 +212,8 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
return false, err
}
in := newHashAppendingReader(reader, sha1.New())
sizeWithHash := size + int64(in.AdditionalLength())
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
size := int64(len(body)) + int64(in.AdditionalLength())
// Authorization
//
@@ -274,10 +243,10 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
Body: up.wrap(in),
ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken,
"X-Bz-Part-Number": fmt.Sprintf("%d", chunkNumber+1),
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
sha1Header: "hex_digits_at_end",
},
ContentLength: &sizeWithHash,
ContentLength: &size,
}
var response api.UploadPartResponse
@@ -285,7 +254,7 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
retry, err := up.f.shouldRetry(ctx, resp, err)
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err)
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
}
// On retryable error clear PartUploadURL
if retry {
@@ -293,30 +262,30 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
upload = nil
}
up.returnUploadURL(upload)
up.addSha1(chunkNumber, in.HexSum())
up.sha1s[part-1] = in.HexSum()
return retry, err
})
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d: %v", chunkNumber, err)
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
} else {
fs.Debugf(up.o, "Done sending chunk %d", chunkNumber)
fs.Debugf(up.o, "Done sending chunk %d", part)
}
return size, err
return err
}
// Copy a chunk
func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64) error {
func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_part",
}
offset := int64(part) * up.chunkSize // where we are in the source file
offset := (part - 1) * up.chunkSize // where we are in the source file
var request = api.CopyPartRequest{
SourceID: up.src.id,
LargeFileID: up.id,
PartNumber: int64(part + 1),
PartNumber: part,
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
}
var response api.UploadPartResponse
@@ -325,7 +294,7 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
if err != nil {
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
}
up.addSha1(part, response.SHA1)
up.sha1s[part-1] = response.SHA1
return retry, err
})
if err != nil {
@@ -336,8 +305,8 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
return err
}
// Close closes off the large upload
func (up *largeUpload) Close(ctx context.Context) error {
// finish closes off the large upload
func (up *largeUpload) finish(ctx context.Context) error {
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
opts := rest.Opts{
Method: "POST",
@@ -355,12 +324,11 @@ func (up *largeUpload) Close(ctx context.Context) error {
if err != nil {
return err
}
up.info = &response
return nil
return up.o.decodeMetaDataFileInfo(&response)
}
// Abort aborts the large upload
func (up *largeUpload) Abort(ctx context.Context) error {
// cancel aborts the large upload
func (up *largeUpload) cancel(ctx context.Context) error {
fs.Debugf(up.o, "Cancelling large file %s", up.what)
opts := rest.Opts{
Method: "POST",
@@ -385,98 +353,128 @@ func (up *largeUpload) Abort(ctx context.Context) error {
// reaches EOF.
//
// Note that initialUploadBlock must be returned to f.putBuf()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
var (
g, gCtx = errgroup.WithContext(ctx)
hasMoreParts = true
)
up.size = initialUploadBlock.Size()
for part := 0; hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
var rw *pool.RW
if part == 1 {
rw = initialUploadBlock
} else {
rw = up.f.getRW(false)
}
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putRW(rw)
break
}
// Read the chunk
var n int64
if part == 1 {
n = rw.Size()
} else {
n, err = io.CopyN(rw, up.in, up.chunkSize)
if err == io.EOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
hasMoreParts = false
} else if err != nil {
// other kinds of errors indicate failure
up.f.putRW(rw)
return err
up.size = int64(len(initialUploadBlock))
g.Go(func() error {
for part := int64(1); hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
var buf []byte
if part == 1 {
buf = initialUploadBlock
} else {
buf = up.f.getBuf(false)
}
}
// Keep stats up to date
up.parts = part
up.size += n
if part > maxParts {
up.f.putRW(rw)
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putBuf(buf, false)
return nil
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putRW(rw)
_, err = up.WriteChunk(gCtx, part, rw)
return err
})
}
// Read the chunk
var n int
if part == 1 {
n = len(buf)
} else {
n, err = io.ReadFull(up.in, buf)
if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putBuf(buf, false)
return nil
} else if err != nil {
// other kinds of errors indicate failure
up.f.putBuf(buf, false)
return err
}
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
up.f.putBuf(buf, false)
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, false)
return up.transferChunk(gCtx, part, buf)
})
}
return nil
})
err = g.Wait()
if err != nil {
return err
}
return up.Close(ctx)
up.sha1s = up.sha1s[:up.parts]
return up.finish(ctx)
}
// Copy the chunks from the source to the destination
func (up *largeUpload) Copy(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
// Upload uploads the chunks from the input
func (up *largeUpload) Upload(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
var (
g, gCtx = errgroup.WithContext(ctx)
remaining = up.size
)
g.SetLimit(up.f.opt.UploadConcurrency)
for part := 0; part <= up.parts; part++ {
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in copying all the other parts.
if gCtx.Err() != nil {
break
}
g.Go(func() error {
for part := int64(1); part <= up.parts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
buf := up.f.getBuf(up.doCopy)
reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putBuf(buf, up.doCopy)
return nil
}
part := part // for the closure
g.Go(func() (err error) {
return up.copyChunk(gCtx, part, reqSize)
})
remaining -= reqSize
}
reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
if !up.doCopy {
// Read the chunk
buf = buf[:reqSize]
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putBuf(buf, up.doCopy)
return err
}
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, up.doCopy)
if !up.doCopy {
err = up.transferChunk(gCtx, part, buf)
} else {
err = up.copyChunk(gCtx, part, reqSize)
}
return err
})
remaining -= reqSize
}
return nil
})
err = g.Wait()
if err != nil {
return err
}
return up.Close(ctx)
return up.finish(ctx)
}

View File

@@ -14,7 +14,7 @@ const (
timeFormat = `"` + time.RFC3339 + `"`
)
// Time represents date and time information for the
// Time represents represents date and time information for the
// box API, by using RFC3339
type Time time.Time
@@ -52,7 +52,7 @@ func (e *Error) Error() string {
out += ": " + e.Message
}
if e.ContextInfo != nil {
out += fmt.Sprintf(" (%s)", string(e.ContextInfo))
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
}
return out
}
@@ -63,7 +63,7 @@ var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
// Types of things in Item/ItemMini
// Types of things in Item
const (
ItemTypeFolder = "folder"
ItemTypeFile = "file"
@@ -72,31 +72,20 @@ const (
ItemStatusDeleted = "deleted"
)
// ItemMini is a subset of the elements in a full Item returned by some API calls
type ItemMini struct {
Type string `json:"type"`
ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
}
// Item describes a folder or a file as returned by Get Folder Items and others
type Item struct {
Type string `json:"type"`
ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
CreatedAt Time `json:"created_at"`
ModifiedAt Time `json:"modified_at"`
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
Parent ItemMini `json:"parent"`
Type string `json:"type"`
ID string `json:"id"`
SequenceID string `json:"sequence_id"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
CreatedAt Time `json:"created_at"`
ModifiedAt Time `json:"modified_at"`
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
SharedLink struct {
URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"`
@@ -292,30 +281,3 @@ type User struct {
Address string `json:"address"`
AvatarURL string `json:"avatar_url"`
}
// FileTreeChangeEventTypes are the events that can require cache invalidation
var FileTreeChangeEventTypes = map[string]struct{}{
"ITEM_COPY": {},
"ITEM_CREATE": {},
"ITEM_MAKE_CURRENT_VERSION": {},
"ITEM_MODIFY": {},
"ITEM_MOVE": {},
"ITEM_RENAME": {},
"ITEM_TRASH": {},
"ITEM_UNDELETE_VIA_TRASH": {},
"ITEM_UPLOAD": {},
}
// Event is an array element in the response returned from /events
type Event struct {
EventType string `json:"event_type"`
EventID string `json:"event_id"`
Source Item `json:"source"`
}
// Events is returned from /events
type Events struct {
ChunkSize int64 `json:"chunk_size"`
Entries []Event `json:"entries"`
NextStreamPosition int64 `json:"next_stream_position"`
}

View File

@@ -17,9 +17,9 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
@@ -27,7 +27,6 @@ import (
"sync/atomic"
"time"
"github.com/golang-jwt/jwt/v4"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -46,6 +45,7 @@ import (
"github.com/rclone/rclone/lib/rest"
"github.com/youmark/pkcs8"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jws"
)
const (
@@ -76,11 +76,6 @@ var (
}
)
type boxCustomClaims struct {
jwt.StandardClaims
BoxSubType string `json:"box_sub_type,omitempty"`
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -107,18 +102,16 @@ func init() {
return nil, nil
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "0",
Advanced: true,
Sensitive: true,
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "0",
Advanced: true,
}, {
Name: "box_config_file",
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
}, {
Name: "access_token",
Help: "Box App Primary Access Token\n\nLeave blank normally.",
Sensitive: true,
Name: "access_token",
Help: "Box App Primary Access Token\n\nLeave blank normally.",
}, {
Name: "box_sub_type",
Default: "user",
@@ -149,23 +142,6 @@ func init() {
Default: "",
Help: "Only show items owned by the login (email address) passed in.",
Advanced: true,
}, {
Name: "impersonate",
Default: "",
Help: `Impersonate this user ID when using a service account.
Setting this flag allows rclone, when using a JWT service account, to
act on behalf of another user by setting the as-user header.
The user ID is the Box identifier for a user. User IDs can found for
any user via the GET /users endpoint, which is only available to
admins, or by calling the GET /users/me endpoint with an authenticated
user session.
See: https://developer.box.com/guides/authentication/jwt/as-user/
`,
Advanced: true,
Sensitive: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -202,12 +178,12 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(ctx)
err = jwtutil.Config("box", name, tokenURL, *claims, signingHeaders, queryParams, privateKey, m, client)
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
return err
}
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := os.ReadFile(configFile)
file, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
}
@@ -218,31 +194,34 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
return boxConfig, nil
}
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomClaims, err error) {
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
val, err := jwtutil.RandomHex(20)
if err != nil {
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
}
claims = &boxCustomClaims{
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019
StandardClaims: jwt.StandardClaims{
Id: val,
Issuer: boxConfig.BoxAppSettings.ClientID,
Subject: boxConfig.EnterpriseID,
Audience: tokenURL,
ExpiresAt: time.Now().Add(time.Second * 45).Unix(),
claims = &jws.ClaimSet{
Iss: boxConfig.BoxAppSettings.ClientID,
Sub: boxConfig.EnterpriseID,
Aud: tokenURL,
Exp: time.Now().Add(time.Second * 45).Unix(),
PrivateClaims: map[string]interface{}{
"box_sub_type": boxSubType,
"aud": tokenURL,
"jti": val,
},
BoxSubType: boxSubType,
}
return claims, nil
}
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} {
signingHeaders := map[string]interface{}{
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
signingHeaders := &jws.Header{
Algorithm: "RS256",
Typ: "JWT",
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
}
return signingHeaders
}
@@ -279,29 +258,19 @@ type Options struct {
AccessToken string `config:"access_token"`
ListChunk int `config:"list_chunk"`
OwnedBy string `config:"owned_by"`
Impersonate string `config:"impersonate"`
}
// ItemMeta defines metadata we cache for each Item ID
type ItemMeta struct {
SequenceID int64 // the most recent event processed for this item
ParentID string // ID of the parent directory of this item
Name string // leaf name of this item
}
// Fs represents a remote box
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
uploadToken *pacer.TokenDispenser // control concurrency
itemMetaCacheMu *sync.Mutex // protects itemMetaCache
itemMetaCache map[string]ItemMeta // map of Item ID to selected metadata
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the one drive server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
uploadToken *pacer.TokenDispenser // control concurrency
}
// Object describes a box object
@@ -449,14 +418,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
itemMetaCacheMu: new(sync.Mutex),
itemMetaCache: make(map[string]ItemMeta),
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
}
f.features = (&fs.Features{
CaseInsensitive: true,
@@ -469,11 +436,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
}
// If using impersonate set an as-user header
if f.opt.Impersonate != "" {
f.srv.SetHeader("as-user", f.opt.Impersonate)
}
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
@@ -716,17 +678,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
entries = append(entries, o)
}
// Cache some metadata for this Item to help us process events later
// on. In particular, the box event API does not provide the old path
// of the Item when it is renamed/deleted/moved/etc.
f.itemMetaCacheMu.Lock()
cachedItemMeta, found := f.itemMetaCache[info.ID]
if !found || cachedItemMeta.SequenceID < info.SequenceID {
f.itemMetaCache[info.ID] = ItemMeta{SequenceID: info.SequenceID, ParentID: directoryID, Name: info.Name}
}
f.itemMetaCacheMu.Unlock()
return false
})
if err != nil {
@@ -741,7 +692,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
//
// Returns the object, leaf, directoryID and error.
// Returns the object, leaf, directoryID and error
//
// Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
@@ -801,7 +752,7 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
// Put the object
//
// Copy the reader in to the new object which is returned.
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -841,9 +792,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// PutUnchecked the object into the container
//
// This will produce an error if the object already exists.
// This will produce an error if the object already exists
//
// Copy the reader in to the new object which is returned.
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -926,9 +877,9 @@ func (f *Fs) Precision() time.Duration {
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1044,9 +995,9 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1166,7 +1117,7 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) (err error) {
var (
deleteErrors atomic.Uint64
deleteErrors = int64(0)
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
wg sync.WaitGroup
)
@@ -1182,7 +1133,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
err := f.deletePermanently(ctx, item.Type, item.ID)
if err != nil {
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
deleteErrors.Add(1)
atomic.AddInt64(&deleteErrors, 1)
}
}()
} else {
@@ -1191,277 +1142,12 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
return false
})
wg.Wait()
if deleteErrors.Load() != 0 {
return fmt.Errorf("failed to delete %d trash items", deleteErrors.Load())
if deleteErrors != 0 {
return fmt.Errorf("failed to delete %d trash items", deleteErrors)
}
return err
}
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
//
// Automatically restarts itself in case of unexpected behavior of the remote.
//
// Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
go func() {
// get the `stream_position` early so all changes from now on get processed
streamPosition, err := f.changeNotifyStreamPosition(ctx)
if err != nil {
fs.Infof(f, "Failed to get StreamPosition: %s", err)
}
// box can send duplicate Event IDs. Use this map to track and filter
// the ones we've already processed.
processedEventIDs := make(map[string]time.Time)
var ticker *time.Ticker
var tickerC <-chan time.Time
for {
select {
case pollInterval, ok := <-pollIntervalChan:
if !ok {
if ticker != nil {
ticker.Stop()
}
return
}
if ticker != nil {
ticker.Stop()
ticker, tickerC = nil, nil
}
if pollInterval != 0 {
ticker = time.NewTicker(pollInterval)
tickerC = ticker.C
}
case <-tickerC:
if streamPosition == "" {
streamPosition, err = f.changeNotifyStreamPosition(ctx)
if err != nil {
fs.Infof(f, "Failed to get StreamPosition: %s", err)
continue
}
}
// Garbage collect EventIDs older than 1 minute
for eventID, timestamp := range processedEventIDs {
if time.Since(timestamp) > time.Minute {
delete(processedEventIDs, eventID)
}
}
streamPosition, err = f.changeNotifyRunner(ctx, notifyFunc, streamPosition, processedEventIDs)
if err != nil {
fs.Infof(f, "Change notify listener failure: %s", err)
}
}
}
}()
}
func (f *Fs) changeNotifyStreamPosition(ctx context.Context) (streamPosition string, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/events",
Parameters: fieldsValue(),
}
opts.Parameters.Set("stream_position", "now")
opts.Parameters.Set("stream_type", "changes")
var result api.Events
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", err
}
return strconv.FormatInt(result.NextStreamPosition, 10), nil
}
// Attempts to construct the full path for an object, given the ID of its
// parent directory and the name of the object.
//
// Can return "" if the parentID is not currently in the directory cache.
func (f *Fs) getFullPath(parentID string, childName string) (fullPath string) {
fullPath = ""
name := f.opt.Enc.ToStandardName(childName)
if parentID != "" {
if parentDir, ok := f.dirCache.GetInv(parentID); ok {
if len(parentDir) > 0 {
fullPath = parentDir + "/" + name
} else {
fullPath = name
}
}
} else {
// No parent, this object is at the root
fullPath = name
}
return fullPath
}
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), streamPosition string, processedEventIDs map[string]time.Time) (nextStreamPosition string, err error) {
nextStreamPosition = streamPosition
for {
limit := f.opt.ListChunk
// box only allows a max of 500 events
if limit > 500 {
limit = 500
}
opts := rest.Opts{
Method: "GET",
Path: "/events",
Parameters: fieldsValue(),
}
opts.Parameters.Set("stream_position", nextStreamPosition)
opts.Parameters.Set("stream_type", "changes")
opts.Parameters.Set("limit", strconv.Itoa(limit))
var result api.Events
var resp *http.Response
fs.Debugf(f, "Checking for changes on remote (next_stream_position: %q)", nextStreamPosition)
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", err
}
if result.ChunkSize != int64(len(result.Entries)) {
return "", fmt.Errorf("invalid response to event request, chunk_size (%v) not equal to number of entries (%v)", result.ChunkSize, len(result.Entries))
}
nextStreamPosition = strconv.FormatInt(result.NextStreamPosition, 10)
if result.ChunkSize == 0 {
return nextStreamPosition, nil
}
type pathToClear struct {
path string
entryType fs.EntryType
}
var pathsToClear []pathToClear
newEventIDs := 0
for _, entry := range result.Entries {
eventDetails := fmt.Sprintf("[%q(%d)|%s|%s|%s|%s]", entry.Source.Name, entry.Source.SequenceID,
entry.Source.Type, entry.EventType, entry.Source.ID, entry.EventID)
if entry.EventID == "" {
fs.Debugf(f, "%s ignored due to missing EventID", eventDetails)
continue
}
if _, ok := processedEventIDs[entry.EventID]; ok {
fs.Debugf(f, "%s ignored due to duplicate EventID", eventDetails)
continue
}
processedEventIDs[entry.EventID] = time.Now()
newEventIDs++
if entry.Source.ID == "" { // missing File or Folder ID
fs.Debugf(f, "%s ignored due to missing SourceID", eventDetails)
continue
}
if entry.Source.Type != api.ItemTypeFile && entry.Source.Type != api.ItemTypeFolder { // event is not for a file or folder
fs.Debugf(f, "%s ignored due to unsupported SourceType", eventDetails)
continue
}
// Only interested in event types that result in a file tree change
if _, found := api.FileTreeChangeEventTypes[entry.EventType]; !found {
fs.Debugf(f, "%s ignored due to unsupported EventType", eventDetails)
continue
}
f.itemMetaCacheMu.Lock()
itemMeta, cachedItemMetaFound := f.itemMetaCache[entry.Source.ID]
if cachedItemMetaFound {
if itemMeta.SequenceID >= entry.Source.SequenceID {
// Item in the cache has the same or newer SequenceID than
// this event. Ignore this event, it must be old.
f.itemMetaCacheMu.Unlock()
fs.Debugf(f, "%s ignored due to old SequenceID (%q)", eventDetails, itemMeta.SequenceID)
continue
}
// This event is newer. Delete its entry from the cache,
// we'll notify about its change below, then it's up to a
// future list operation to repopulate the cache.
delete(f.itemMetaCache, entry.Source.ID)
}
f.itemMetaCacheMu.Unlock()
entryType := fs.EntryDirectory
if entry.Source.Type == api.ItemTypeFile {
entryType = fs.EntryObject
}
// The box event only includes the new path for the object (e.g.
// the path after the object was moved). If there was an old path
// saved in our cache, it must be cleared.
if cachedItemMetaFound {
path := f.getFullPath(itemMeta.ParentID, itemMeta.Name)
if path != "" {
fs.Debugf(f, "%s added old path (%q) for notify", eventDetails, path)
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
} else {
fs.Debugf(f, "%s old parent not cached", eventDetails)
}
// If this is a directory, also delete it from the dir cache.
// This will effectively invalidate the item metadata cache
// entries for all descendents of this directory, since we
// will no longer be able to construct a full path for them.
// This is exactly what we want, since we don't want to notify
// on the paths of these descendents if one of their ancestors
// has been renamed/deleted.
if entry.Source.Type == api.ItemTypeFolder {
f.dirCache.FlushDir(path)
}
}
// If the item is "active", then it is not trashed or deleted, so
// it potentially has a valid parent.
//
// Construct the new path of the object, based on the Parent ID
// and its name. If we get an empty result, it means we don't
// currently know about this object so notification is unnecessary.
if entry.Source.ItemStatus == api.ItemStatusActive {
path := f.getFullPath(entry.Source.Parent.ID, entry.Source.Name)
if path != "" {
fs.Debugf(f, "%s added new path (%q) for notify", eventDetails, path)
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
} else {
fs.Debugf(f, "%s new parent not found", eventDetails)
}
}
}
// box can sometimes repeatedly return the same Event IDs within a
// short period of time. If it stops giving us new ones, treat it
// the same as if it returned us none at all.
if newEventIDs == 0 {
return nextStreamPosition, nil
}
notifiedPaths := make(map[string]bool)
for _, p := range pathsToClear {
if _, ok := notifiedPaths[p.path]; ok {
continue
}
notifiedPaths[p.path] = true
notifyFunc(p.path, p.entryType)
}
fs.Debugf(f, "Received %v events, resulting in %v paths and %v notifications", len(result.Entries), len(pathsToClear), len(notifiedPaths))
}
}
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func (f *Fs) DirCacheFlush() {
@@ -1549,6 +1235,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
@@ -1659,9 +1346,9 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
// Update the object with the contents of the io.Reader, modTime and size
//
// If existing is set then it updates the object rather than creating a new one.
// If existing is set then it updates the object rather than creating a new one
//
// The new object may have been created if an error is returned.
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
if o.fs.tokenRenewer != nil {
o.fs.tokenRenewer.Start()

View File

@@ -1,7 +1,6 @@
//go:build !plan9 && !js
// +build !plan9,!js
// Package cache implements a virtual provider to cache existing remotes.
package cache
import (
@@ -76,19 +75,17 @@ func init() {
Name: "plex_url",
Help: "The URL of the Plex server.",
}, {
Name: "plex_username",
Help: "The username of the Plex user.",
Sensitive: true,
Name: "plex_username",
Help: "The username of the Plex user.",
}, {
Name: "plex_password",
Help: "The password of the Plex user.",
IsPassword: true,
}, {
Name: "plex_token",
Help: "The plex token for authentication - auto set normally.",
Hide: fs.OptionHideBoth,
Advanced: true,
Sensitive: true,
Name: "plex_token",
Help: "The plex token for authentication - auto set normally.",
Hide: fs.OptionHideBoth,
Advanced: true,
}, {
Name: "plex_insecure",
Help: "Skip all certificate verification when connecting to the Plex server.",
@@ -1040,7 +1037,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
fs.Debugf(dir, "list: remove entry: %v", entryRemote)
}
entries = nil //nolint:ineffassign
entries = nil
// and then iterate over the ones from source (temp Objects will override source ones)
var batchDirectories []*Directory
@@ -1789,7 +1786,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
}
}
// StopBackgroundRunners will signal all the runners to stop their work
// StopBackgroundRunners will signall all the runners to stop their work
// can be triggered from a terminate signal or from testing between runs
func (f *Fs) StopBackgroundRunners() {
f.cleanupChan <- false

View File

@@ -11,6 +11,7 @@ import (
goflag "flag"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
@@ -101,12 +102,14 @@ func TestMain(m *testing.M) {
func TestInternalListRootAndInnerRemotes(t *testing.T) {
id := fmt.Sprintf("tilrair%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
// Instantiate inner fs
innerFolder := "inner"
runInstance.mkdir(t, rootFs, innerFolder)
rootFs2, _ := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil)
rootFs2, boltDb2 := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs2, boltDb2)
runInstance.writeObjectString(t, rootFs2, "one", "content")
listRoot, err := runInstance.list(t, rootFs, "")
@@ -164,7 +167,7 @@ func TestInternalVfsCache(t *testing.T) {
li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")}
for _, r := range li2 {
var err error
ci, err := os.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
ci, err := ioutil.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
if err != nil || len(ci) == 0 {
log.Printf("========== '%v' not in cache", r)
} else {
@@ -223,7 +226,8 @@ func TestInternalVfsCache(t *testing.T) {
func TestInternalObjWrapFsFound(t *testing.T) {
id := fmt.Sprintf("tiowff%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -255,7 +259,8 @@ func TestInternalObjWrapFsFound(t *testing.T) {
func TestInternalObjNotFound(t *testing.T) {
id := fmt.Sprintf("tionf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
obj, err := rootFs.NewObject(context.Background(), "404")
require.Error(t, err)
@@ -265,7 +270,8 @@ func TestInternalObjNotFound(t *testing.T) {
func TestInternalCachedWrittenContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -292,7 +298,8 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
t.Skip("Skip test on windows/386")
}
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
// write the object
runInstance.writeRemoteString(t, rootFs, "one", "one content")
@@ -310,7 +317,8 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticucm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
var err error
// create some rand test data
@@ -339,7 +347,8 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote")
}
@@ -369,7 +378,8 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
func TestInternalLargeWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote")
}
@@ -395,7 +405,8 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
id := fmt.Sprintf("tiwfcns%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -449,7 +460,8 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
func TestInternalMoveWithNotify(t *testing.T) {
id := fmt.Sprintf("timwn%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if !runInstance.wrappedIsExternal {
t.Skipf("Not external")
}
@@ -535,7 +547,8 @@ func TestInternalMoveWithNotify(t *testing.T) {
func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
id := fmt.Sprintf("tincep%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if !runInstance.wrappedIsExternal {
t.Skipf("Not external")
}
@@ -621,7 +634,8 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -653,7 +667,8 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
func TestInternalCacheWrites(t *testing.T) {
id := "ticw"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"writes": "true"})
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -674,7 +689,8 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
t.Skip("Skip test on windows/386")
}
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"workers": "1"})
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -709,7 +725,8 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
func TestInternalExpiredEntriesRemoved(t *testing.T) {
id := fmt.Sprintf("tieer%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"info_age": "5s"}, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -746,7 +763,9 @@ func TestInternalBug2117(t *testing.T) {
vfsflags.Opt.DirCacheTime = time.Second * 10
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt {
t.Skipf("skipping crypt")
@@ -822,7 +841,7 @@ func newRun() *run {
}
if uploadDir == "" {
r.tmpUploadDir, err = os.MkdirTemp("", "rclonecache-tmp")
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
if err != nil {
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
}
@@ -847,7 +866,7 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
return enc
}
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, cfg map[string]string, flags map[string]string) (fs.Fs, *cache.Persistent) {
fstest.Initialise()
remoteExists := false
for _, s := range config.FileSections() {
@@ -940,15 +959,10 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
err = f.Mkdir(context.Background(), "")
require.NoError(t, err)
t.Cleanup(func() {
runInstance.cleanupFs(t, f)
})
return f, boltDb
}
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
err := f.Features().Purge(context.Background(), "")
require.NoError(t, err)
cfs, err := r.getCacheFs(f)
@@ -970,7 +984,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
chunk := int64(1024)
cnt := size / chunk
left := size % chunk
f, err := os.CreateTemp("", "rclonecache-tempfile")
f, err := ioutil.TempFile("", "rclonecache-tempfile")
require.NoError(t, err)
for i := 0; i < int(cnt); i++ {
@@ -1098,6 +1112,27 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
return l, err
}
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer func() {
_ = in.Close()
}()
out, err := os.Create(dst)
if err != nil {
return err
}
defer func() {
_ = out.Close()
}()
_, err = io.Copy(out, in)
return err
}
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error

View File

@@ -18,7 +18,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
})

View File

@@ -21,8 +21,10 @@ import (
func TestInternalUploadTempDirCreated(t *testing.T) {
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
runInstance.newCacheFs(t, remoteName, id, false, true,
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
defer runInstance.cleanupFs(t, rootFs, boltDb)
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
require.NoError(t, err)
@@ -61,7 +63,9 @@ func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltD
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
@@ -69,15 +73,19 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
func TestInternalUploadMoveExistingFile(t *testing.T) {
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err)
@@ -111,8 +119,10 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
func TestInternalUploadTempPathCleaned(t *testing.T) {
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err)
@@ -152,19 +162,21 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
func TestInternalUploadQueueMoreFiles(t *testing.T) {
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "test")
require.NoError(t, err)
minSize := 5242880
maxSize := 10485760
totalFiles := 10
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
rand.Seed(time.Now().Unix())
lastFile := ""
for i := 0; i < totalFiles; i++ {
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
size := int64(rand.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin"
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
@@ -201,7 +213,9 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
func TestInternalUploadTempFileOperations(t *testing.T) {
id := "tiutfo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()
@@ -329,7 +343,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
func TestInternalUploadUploadingFileOperations(t *testing.T) {
id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()

View File

@@ -8,7 +8,7 @@ import (
"crypto/tls"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
@@ -167,7 +167,7 @@ func (p *plexConnector) listenWebsocket() {
continue
}
var data []byte
data, err = io.ReadAll(resp.Body)
data, err = ioutil.ReadAll(resp.Body)
if err != nil {
continue
}

View File

@@ -9,6 +9,7 @@ import (
"encoding/binary"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
@@ -472,7 +473,7 @@ func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error
var data []byte
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
data, err := os.ReadFile(fp)
data, err := ioutil.ReadFile(fp)
if err != nil {
return nil, err
}
@@ -485,7 +486,7 @@ func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
err := os.WriteFile(filePath, data, os.ModePerm)
err := ioutil.WriteFile(filePath, data, os.ModePerm)
if err != nil {
return err
}

View File

@@ -12,6 +12,7 @@ import (
"fmt"
gohash "hash"
"io"
"io/ioutil"
"math/rand"
"path"
"regexp"
@@ -31,6 +32,7 @@ import (
"github.com/rclone/rclone/fs/operations"
)
//
// Chunker's composite files have one or more chunks
// and optional metadata object. If it's present,
// meta object is named after the original file.
@@ -63,7 +65,7 @@ import (
// length of 13 decimals it makes a 7-digit base-36 number.
//
// When transactions is set to the norename style, data chunks will
// keep their temporary chunk names (with the transaction identifier
// keep their temporary chunk names (with the transacion identifier
// suffix). To distinguish them from temporary chunks, the txn field
// of the metadata file is set to match the transaction identifier of
// the data chunks.
@@ -77,6 +79,7 @@ import (
// Metadata format v1 does not define any control chunk types,
// they are currently ignored aka reserved.
// In future they can be used to implement resumable uploads etc.
//
const (
ctrlTypeRegStr = `[a-z][a-z0-9]{2,6}`
tempSuffixFormat = `_%04s`
@@ -539,6 +542,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
//
// xactID is a transaction identifier. Empty xactID denotes active chunk,
// otherwise temporary chunk name is produced.
//
func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string) string {
dir, parentName := path.Split(filePath)
var name, tempSuffix string
@@ -704,6 +708,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
// directory together with dead chunks.
// In future a flag named like `--chunker-list-hidden` may be added to
// rclone that will tell List to reveal hidden chunks.
//
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entries, err = f.base.List(ctx, dir)
if err != nil {
@@ -863,6 +868,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
// Note that chunker prefers analyzing file names rather than reading
// the content of meta object assuming that directory scans are fast
// but opening even a small file can be slow on some backends.
//
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.scanObject(ctx, remote, false)
}
@@ -1037,7 +1043,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
if err != nil {
return err
}
metadata, err := io.ReadAll(reader)
metadata, err := ioutil.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows
if err != nil {
return err
@@ -1078,7 +1084,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
// readXactID returns the transaction ID stored in the passed metadata object
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
// if xactID has already been read and cached return it now
// if xactID has already been read and cahced return it now
if o.xIDCached {
return o.xactID, nil
}
@@ -1096,7 +1102,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
if err != nil {
return "", err
}
data, err := io.ReadAll(reader)
data, err := ioutil.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows
if err != nil {
return "", err
@@ -1580,6 +1586,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// This command will chain to `purge` from wrapped remote.
// As a result it removes not only composite chunker files with their
// active chunks but also all hidden temporary chunks in the directory.
//
func (f *Fs) Purge(ctx context.Context, dir string) error {
do := f.base.Features().Purge
if do == nil {
@@ -1621,6 +1628,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Unsupported control chunks will get re-picked by a more recent
// rclone version with unexpected results. This can be helped by
// the `delete hidden` flag above or at least the user has been warned.
//
func (o *Object) Remove(ctx context.Context) (err error) {
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
// operations.Move can still call Remove if chunker's Move refuses
@@ -1796,9 +1804,9 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1817,9 +1825,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -2117,6 +2125,7 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
// file, then tries to read it from metadata. This in theory
// handles the unusual case when a small file has been tampered
// on the level of wrapped remote but chunker is unaware of that.
//
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
if err := o.readMetadata(ctx); err != nil {
return "", err // valid metadata is required to get hash, abort
@@ -2405,6 +2414,7 @@ type metaSimpleJSON struct {
// - for files larger than chunk size
// - if file contents can be mistaken as meta object
// - if consistent hashing is On but wrapped remote can't provide given hash
//
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1, xactID string) ([]byte, error) {
version := metadataVersion
if xactID == "" && version == 2 {
@@ -2437,6 +2447,7 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1,
// New format will have a higher version number and cannot be correctly
// handled by current implementation.
// The version check below will then explicitly ask user to upgrade rclone.
//
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
// Be strict about JSON format
// to reduce possibility that a random small file resembles metadata.

View File

@@ -5,7 +5,7 @@ import (
"context"
"flag"
"fmt"
"io"
"io/ioutil"
"path"
"regexp"
"strings"
@@ -413,7 +413,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
if r == nil {
return
}
data, err := io.ReadAll(r)
data, err := ioutil.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()
@@ -538,7 +538,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
assert.NoError(t, err)
var chunkContents []byte
assert.NotPanics(t, func() {
chunkContents, err = io.ReadAll(r)
chunkContents, err = ioutil.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
@@ -573,7 +573,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
r, err = willyChunk.Open(ctx)
assert.NoError(t, err)
assert.NotPanics(t, func() {
_, err = io.ReadAll(r)
_, err = ioutil.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
@@ -672,7 +672,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
assert.NoError(t, err, "open "+description)
assert.NotNil(t, r, "open stream of "+description)
if err == nil && r != nil {
data, err := io.ReadAll(r)
data, err := ioutil.ReadAll(r)
assert.NoError(t, err, "read all of "+description)
assert.Equal(t, contents, string(data), description+" contents is ok")
_ = r.Close()
@@ -758,8 +758,8 @@ func testFutureProof(t *testing.T, f *Fs) {
assert.Error(t, err)
// Rcat must fail
in := io.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime, nil)
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime)
assert.Nil(t, robj)
assert.NotNil(t, err)
if err != nil {
@@ -854,7 +854,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
r, err := dstFile.Open(ctx)
assert.NoError(t, err)
assert.NotNil(t, r)
data, err := io.ReadAll(r)
data, err := ioutil.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()

View File

@@ -40,7 +40,6 @@ func TestIntegration(t *testing.T) {
UnimplementableFsMethods: []string{
"PublicLink",
"OpenWriterAt",
"OpenChunkWriter",
"MergeDirs",
"DirCacheFlush",
"UserInfo",

View File

@@ -1,4 +1,4 @@
// Package combine implements a backend to combine multiple remotes in a directory tree
// Package combine implents a backend to combine multipe remotes in a directory tree
package combine
/*
@@ -233,7 +233,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f)
canMove := true
for _, u := range f.upstreams {
@@ -290,16 +289,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
}
}
// Enable CleanUp when any upstreams support it
if features.CleanUp == nil {
for _, u := range f.upstreams {
if u.f.Features().CleanUp != nil {
features.CleanUp = f.CleanUp
break
}
}
}
// Enable ChangeNotify when any upstreams support it
if features.ChangeNotify == nil {
for _, u := range f.upstreams {
@@ -310,9 +299,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
}
}
// show that we wrap other backends
features.Overlay = true
f.features = features
// Get common intersection of hashes
@@ -365,7 +351,7 @@ func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream
return g.Wait()
}
// join the elements together but unlike path.Join return empty string
// join the elements together but unline path.Join return empty string
func join(elem ...string) string {
result := path.Join(elem...)
if result == "." {
@@ -471,9 +457,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -505,9 +491,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -645,7 +631,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
if err != nil {
return nil, err
}
uSrc := fs.NewOverrideRemote(src, uRemote)
uSrc := operations.NewOverrideRemote(src, uRemote)
var o fs.Object
if stream {
o, err = u.f.Features().PutStream(ctx, in, uSrc, options...)
@@ -901,100 +887,6 @@ func (f *Fs) Shutdown(ctx context.Context) error {
})
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
u, uRemote, err := f.findUpstream(remote)
if err != nil {
return "", err
}
do := u.f.Features().PublicLink
if do == nil {
return "", fs.ErrorNotImplemented
}
return do(ctx, uRemote, expire, unlink)
}
// PutUnchecked in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
//
// May create duplicates or return errors if src already
// exists.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
srcPath := src.Remote()
u, uRemote, err := f.findUpstream(srcPath)
if err != nil {
return nil, err
}
do := u.f.Features().PutUnchecked
if do == nil {
return nil, fs.ErrorNotImplemented
}
uSrc := fs.NewOverrideRemote(src, uRemote)
return do(ctx, in, uSrc, options...)
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
if len(dirs) == 0 {
return nil
}
var (
u *upstream
uDirs []fs.Directory
)
for _, dir := range dirs {
uNew, uDir, err := f.findUpstream(dir.Remote())
if err != nil {
return err
}
if u == nil {
u = uNew
} else if u != uNew {
return fmt.Errorf("can't merge directories from different upstreams")
}
uDirs = append(uDirs, fs.NewOverrideDirectory(dir, uDir))
}
do := u.f.Features().MergeDirs
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx, uDirs)
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
func (f *Fs) CleanUp(ctx context.Context) error {
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
if do := u.f.Features().CleanUp; do != nil {
return do(ctx)
}
return nil
})
}
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
u, uRemote, err := f.findUpstream(remote)
if err != nil {
return nil, err
}
do := u.f.Features().OpenWriterAt
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx, uRemote, size)
}
// Object describes a wrapped Object
//
// This is a wrapped Object which knows its path prefix
@@ -1024,7 +916,7 @@ func (o *Object) String() string {
func (o *Object) Remote() string {
newPath, err := o.u.pathAdjustment.do(o.Object.String())
if err != nil {
fs.Errorf(o.Object, "Bad object: %v", err)
fs.Errorf(o, "Bad object: %v", err)
return err.Error()
}
return newPath
@@ -1096,10 +988,5 @@ var (
_ fs.Abouter = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.OpenWriterAter = (*Fs)(nil)
_ fs.FullObject = (*Object)(nil)
)

View File

@@ -10,11 +10,6 @@ import (
"github.com/rclone/rclone/fstest/fstests"
)
var (
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "OpenChunkWriter"}
unimplementableObjectMethods = []string{}
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
@@ -22,8 +17,8 @@ func TestIntegration(t *testing.T) {
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
@@ -40,9 +35,7 @@ func TestLocal(t *testing.T) {
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
QuickTestOK: true,
})
}
@@ -58,9 +51,7 @@ func TestMemory(t *testing.T) {
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
QuickTestOK: true,
})
}
@@ -77,8 +68,6 @@ func TestMixed(t *testing.T) {
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}

View File

@@ -13,6 +13,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"regexp"
"strings"
@@ -28,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
)
@@ -90,7 +90,7 @@ Generally -1 (default, equivalent to 5) is recommended.
Levels 1 to 9 increase compression at the cost of speed. Going past 6
generally offers very little return.
Level -2 uses Huffman encoding only. Only use if you know what you
Level -2 uses Huffmann encoding only. Only use if you know what you
are doing.
Level 0 turns off compression.`,
Default: sgzip.DefaultCompression,
@@ -130,7 +130,7 @@ type Fs struct {
features *fs.Features // optional features
}
// NewFs constructs an Fs from the path, container:path
// NewFs contstructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
@@ -186,7 +186,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// We support reading MIME types no matter the wrapped fs
f.features.ReadMimeType = true
@@ -257,16 +256,6 @@ func isMetadataFile(filename string) bool {
return strings.HasSuffix(filename, metaFileExt)
}
// Checks whether a file is a metadata file and returns the original
// file name and a flag indicating whether it was a metadata file or
// not.
func unwrapMetadataFile(filename string) (string, bool) {
if !isMetadataFile(filename) {
return "", false
}
return filename[:len(filename)-len(metaFileExt)], true
}
// makeDataName generates the file name for a data file with specified compression mode
func makeDataName(remote string, size int64, mode int) (newRemote string) {
if mode != Uncompressed {
@@ -378,16 +367,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if err != nil {
return nil, err
}
meta, err := readMetadata(ctx, mo)
if err != nil {
return nil, fmt.Errorf("error decoding metadata: %w", err)
meta := readMetadata(ctx, mo)
if meta == nil {
return nil, errors.New("error decoding metadata")
}
// Create our Object
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
if err != nil {
return nil, err
}
return f.newObject(o, mo, meta), nil
return f.newObject(o, mo, meta), err
}
// checkCompressAndType checks if an object is compressible and determines it's mime type
@@ -465,7 +451,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...)
}
// Need to include what we already read
// Need to include what we allready read
in = &ReadCloserWrapper{
Reader: io.MultiReader(bytes.NewReader(buf), in),
Closer: in,
@@ -478,7 +464,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
}
fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file")
tempFile, err := os.CreateTemp("", "rclone-press-")
tempFile, err := ioutil.TempFile("", "rclone-press-")
defer func() {
// these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish
// to ignore them
@@ -556,8 +542,8 @@ func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, o
}
// Transfer the data
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx))
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx), options)
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx))
if err != nil {
if o != nil {
removeErr := o.Remove(ctx)
@@ -691,7 +677,7 @@ func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.Ob
}
return nil, err
}
return f.newObject(dataObject, mo, meta), nil
return f.newObject(dataObject, mo, meta), err
}
// Put in to the remote path with the modTime given of the given size
@@ -745,7 +731,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
}
// If our new object is compressed we have to rename it with the correct size.
// Uncompressed objects don't store the size in the name so we they'll already have the correct name.
// Uncompressed objects don't store the size in the name so we they'll allready have the correct name.
if compressible {
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
if err != nil {
@@ -756,7 +742,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
return newObj, nil
}
// Temporarily disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
// Temporarely disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
// will break stuff. Right no I can't think of a way to make this work.
// PutUnchecked uploads the object
@@ -799,9 +785,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -849,9 +835,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -989,8 +975,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
fs.Logf(f, "path %q entryType %d", path, entryType)
var (
wrappedPath string
isMetadataFile bool
wrappedPath string
)
switch entryType {
case fs.EntryDirectory:
@@ -998,10 +983,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
case fs.EntryObject:
// Note: All we really need to do to monitor the object is to check whether the metadata changed,
// as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same.
wrappedPath, isMetadataFile = unwrapMetadataFile(path)
if !isMetadataFile {
return
}
wrappedPath = makeMetadataName(path)
default:
fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType)
return
@@ -1058,19 +1040,24 @@ func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mim
}
// This function will read the metadata from a metadata object.
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) {
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata) {
// Open our meradata object
rc, err := mo.Open(ctx)
if err != nil {
return nil, err
return nil
}
defer fs.CheckClose(rc, &err)
defer func() {
err := rc.Close()
if err != nil {
fs.Errorf(mo, "Error closing object: %v", err)
}
}()
jr := json.NewDecoder(rc)
meta = new(ObjectMetadata)
if err = jr.Decode(meta); err != nil {
return nil, err
return nil
}
return meta, nil
return meta
}
// Remove removes this object
@@ -1115,9 +1102,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
origName := o.Remote()
if o.meta.Mode != Uncompressed || compressible {
newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType)
if err != nil {
return err
}
if newObject.Object.Remote() != o.Object.Remote() {
if removeErr := o.Object.Remove(ctx); removeErr != nil {
return removeErr
@@ -1131,9 +1115,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// If we are, just update the object and metadata
newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType)
if err != nil {
return err
}
}
if err != nil {
return err
}
// Update object metadata and return
o.Object = newObject.Object
@@ -1144,9 +1128,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified.
func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object {
if o == nil {
log.Trace(nil, "newObject(%#v, %#v, %#v) called with nil o", o, mo, meta)
}
return &Object{
Object: o,
f: f,
@@ -1159,9 +1140,6 @@ func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object
// This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand.
func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object {
if o == nil {
log.Trace(nil, "newObjectSizeAndNameOnly(%#v, %#v, %#v) called with nil o", o, moName, size)
}
return &Object{
Object: o,
f: f,
@@ -1189,7 +1167,7 @@ func (o *Object) loadMetadataIfNotLoaded(ctx context.Context) (err error) {
return err
}
if o.meta == nil {
o.meta, err = readMetadata(ctx, o.mo)
o.meta = readMetadata(ctx, o.mo)
}
return err
}

View File

@@ -14,26 +14,23 @@ import (
"github.com/rclone/rclone/fstest/fstests"
)
var defaultOpt = fstests.Opt{
RemoteName: "TestCompress:",
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"OpenChunkWriter",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{},
}
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &defaultOpt)
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{}}
fstests.Run(t, &opt)
}
// TestRemoteGzip tests GZIP compression
@@ -43,13 +40,27 @@ func TestRemoteGzip(t *testing.T) {
}
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
name := "TestCompressGzip"
opt := defaultOpt
opt.RemoteName = name + ":"
opt.ExtraConfig = []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
}
opt.QuickTestOK = true
fstests.Run(t, &opt)
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
UnimplementableObjectMethods: []string{
"GetTier",
"SetTier",
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
},
QuickTestOK: true,
})
}

View File

@@ -21,7 +21,6 @@ import (
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/version"
"github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox"
@@ -38,6 +37,7 @@ const (
blockHeaderSize = secretbox.Overhead
blockDataSize = 64 * 1024
blockSize = blockHeaderSize + blockDataSize
encryptedSuffix = ".bin" // when file name encryption is off we add this suffix to make sure the cloud provider doesn't process the file
)
// Errors returned by cipher
@@ -53,9 +53,8 @@ var (
ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?")
ErrorBadBase32Encoding = errors.New("bad base32 filename encoding")
ErrorFileClosed = errors.New("file already closed")
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - does not match suffix")
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - no \"" + encryptedSuffix + "\" suffix")
ErrorBadSeek = errors.New("Seek beyond end of file")
ErrorSuffixMissingDot = errors.New("suffix config setting should include a '.'")
defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1}
obfuscQuoteRune = '!'
)
@@ -128,11 +127,11 @@ type fileNameEncoding interface {
// RFC4648
//
// The standard encoding is modified in two ways
// - it becomes lower case (no-one likes upper case filenames!)
// - we strip the padding character `=`
// * it becomes lower case (no-one likes upper case filenames!)
// * we strip the padding character `=`
type caseInsensitiveBase32Encoding struct{}
// EncodeToString encodes a string using the modified version of
// EncodeToString encodes a strign using the modified version of
// base32 encoding.
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
encoded := base32.HexEncoding.EncodeToString(src)
@@ -170,30 +169,27 @@ func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
// Cipher defines an encoding and decoding cipher for the crypt backend
type Cipher struct {
dataKey [32]byte // Key for secretbox
nameKey [32]byte // 16,24 or 32 bytes
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
block gocipher.Block
mode NameEncryptionMode
fileNameEnc fileNameEncoding
buffers sync.Pool // encrypt/decrypt buffers
cryptoRand io.Reader // read crypto random numbers from here
dirNameEncrypt bool
passBadBlocks bool // if set passed bad blocks as zeroed blocks
encryptedSuffix string
dataKey [32]byte // Key for secretbox
nameKey [32]byte // 16,24 or 32 bytes
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
block gocipher.Block
mode NameEncryptionMode
fileNameEnc fileNameEncoding
buffers sync.Pool // encrypt/decrypt buffers
cryptoRand io.Reader // read crypto random numbers from here
dirNameEncrypt bool
}
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
c := &Cipher{
mode: mode,
fileNameEnc: enc,
cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt,
encryptedSuffix: ".bin",
mode: mode,
fileNameEnc: enc,
cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt,
}
c.buffers.New = func() interface{} {
return new([blockSize]byte)
return make([]byte, blockSize)
}
err := c.Key(password, salt)
if err != nil {
@@ -202,29 +198,11 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
return c, nil
}
// setEncryptedSuffix set suffix, or an empty string
func (c *Cipher) setEncryptedSuffix(suffix string) {
if strings.EqualFold(suffix, "none") {
c.encryptedSuffix = ""
return
}
if !strings.HasPrefix(suffix, ".") {
fs.Errorf(nil, "crypt: bad suffix: %v", ErrorSuffixMissingDot)
suffix = "." + suffix
}
c.encryptedSuffix = suffix
}
// Call to set bad block pass through
func (c *Cipher) setPassBadBlocks(passBadBlocks bool) {
c.passBadBlocks = passBadBlocks
}
// Key creates all the internal keys from the password passed in using
// scrypt.
//
// If salt is "" we use a fixed salt just to make attackers lives
// slightly harder than using no salt.
// slighty harder than using no salt.
//
// Note that empty password makes all 0x00 keys which is used in the
// tests.
@@ -252,18 +230,21 @@ func (c *Cipher) Key(password, salt string) (err error) {
}
// getBlock gets a block from the pool of size blockSize
func (c *Cipher) getBlock() *[blockSize]byte {
return c.buffers.Get().(*[blockSize]byte)
func (c *Cipher) getBlock() []byte {
return c.buffers.Get().([]byte)
}
// putBlock returns a block to the pool of size blockSize
func (c *Cipher) putBlock(buf *[blockSize]byte) {
func (c *Cipher) putBlock(buf []byte) {
if len(buf) != blockSize {
panic("bad blocksize returned to pool")
}
c.buffers.Put(buf)
}
// encryptSegment encrypts a path segment
//
// This uses EME with AES.
// This uses EME with AES
//
// EME (ECB-Mix-ECB) is a wide-block encryption mode presented in the
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
@@ -273,8 +254,8 @@ func (c *Cipher) putBlock(buf *[blockSize]byte) {
// same filename must encrypt to the same thing.
//
// This means that
// - filenames with the same name will encrypt the same
// - filenames which start the same won't have a common prefix
// * filenames with the same name will encrypt the same
// * filenames which start the same won't have a common prefix
func (c *Cipher) encryptSegment(plaintext string) string {
if plaintext == "" {
return ""
@@ -527,7 +508,7 @@ func (c *Cipher) encryptFileName(in string) string {
// EncryptFileName encrypts a file path
func (c *Cipher) EncryptFileName(in string) string {
if c.mode == NameEncryptionOff {
return in + c.encryptedSuffix
return in + encryptedSuffix
}
return c.encryptFileName(in)
}
@@ -587,8 +568,8 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
// DecryptFileName decrypts a file path
func (c *Cipher) DecryptFileName(in string) (string, error) {
if c.mode == NameEncryptionOff {
remainingLength := len(in) - len(c.encryptedSuffix)
if remainingLength == 0 || !strings.HasSuffix(in, c.encryptedSuffix) {
remainingLength := len(in) - len(encryptedSuffix)
if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
return "", ErrorNotAnEncryptedFile
}
decrypted := in[:remainingLength]
@@ -628,7 +609,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
// fromReader fills the nonce from an io.Reader - normally the OSes
// crypto random number generator
func (n *nonce) fromReader(in io.Reader) error {
read, err := readers.ReadFill(in, (*n)[:])
read, err := io.ReadFull(in, (*n)[:])
if read != fileNonceSize {
return fmt.Errorf("short read of nonce: %w", err)
}
@@ -683,8 +664,8 @@ type encrypter struct {
in io.Reader
c *Cipher
nonce nonce
buf *[blockSize]byte
readBuf *[blockSize]byte
buf []byte
readBuf []byte
bufIndex int
bufSize int
err error
@@ -709,9 +690,9 @@ func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
}
}
// Copy magic into buffer
copy((*fh.buf)[:], fileMagicBytes)
copy(fh.buf, fileMagicBytes)
// Copy nonce into buffer
copy((*fh.buf)[fileMagicSize:], fh.nonce[:])
copy(fh.buf[fileMagicSize:], fh.nonce[:])
return fh, nil
}
@@ -726,20 +707,22 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
if fh.bufIndex >= fh.bufSize {
// Read data
// FIXME should overlap the reads with a go-routine and 2 buffers?
readBuf := (*fh.readBuf)[:blockDataSize]
n, err = readers.ReadFill(fh.in, readBuf)
readBuf := fh.readBuf[:blockDataSize]
n, err = io.ReadFull(fh.in, readBuf)
if n == 0 {
// err can't be nil since:
// n == len(buf) if and only if err == nil.
return fh.finish(err)
}
// possibly err != nil here, but we will process the
// data and the next call to ReadFill will return 0, err
// data and the next call to ReadFull will return 0, err
// Encrypt the block using the nonce
secretbox.Seal((*fh.buf)[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
fh.bufIndex = 0
fh.bufSize = blockHeaderSize + n
fh.nonce.increment()
}
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufSize])
n = copy(p, fh.buf[fh.bufIndex:fh.bufSize])
fh.bufIndex += n
return n, nil
}
@@ -780,8 +763,8 @@ type decrypter struct {
nonce nonce
initialNonce nonce
c *Cipher
buf *[blockSize]byte
readBuf *[blockSize]byte
buf []byte
readBuf []byte
bufIndex int
bufSize int
err error
@@ -799,12 +782,12 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
limit: -1,
}
// Read file header (magic + nonce)
readBuf := (*fh.readBuf)[:fileHeaderSize]
n, err := readers.ReadFill(fh.rc, readBuf)
if n < fileHeaderSize && err == io.EOF {
readBuf := fh.readBuf[:fileHeaderSize]
_, err := io.ReadFull(fh.rc, readBuf)
if err == io.EOF || err == io.ErrUnexpectedEOF {
// This read from 0..fileHeaderSize-1 bytes
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
} else if err != io.EOF && err != nil {
} else if err != nil {
return nil, fh.finishAndClose(err)
}
// check the magic
@@ -862,8 +845,10 @@ func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
func (fh *decrypter) fillBuffer() (err error) {
// FIXME should overlap the reads with a go-routine and 2 buffers?
readBuf := fh.readBuf
n, err := readers.ReadFill(fh.rc, (*readBuf)[:])
n, err := io.ReadFull(fh.rc, readBuf)
if n == 0 {
// err can't be nil since:
// n == len(buf) if and only if err == nil.
return err
}
// possibly err != nil here, but we will process the data and
@@ -871,25 +856,18 @@ func (fh *decrypter) fillBuffer() (err error) {
// Check header + 1 byte exists
if n <= blockHeaderSize {
if err != nil && err != io.EOF {
if err != nil {
return err // return pending error as it is likely more accurate
}
return ErrorEncryptedFileBadHeader
}
// Decrypt the block using the nonce
_, ok := secretbox.Open((*fh.buf)[:0], (*readBuf)[:n], fh.nonce.pointer(), &fh.c.dataKey)
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
if !ok {
if err != nil && err != io.EOF {
if err != nil {
return err // return pending error as it is likely more accurate
}
if !fh.c.passBadBlocks {
return ErrorEncryptedBadBlock
}
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
// Zero out the bad block and continue
for i := range (*fh.buf)[:n] {
(*fh.buf)[i] = 0
}
return ErrorEncryptedBadBlock
}
fh.bufIndex = 0
fh.bufSize = n - blockHeaderSize
@@ -915,7 +893,7 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
if fh.limit >= 0 && fh.limit < int64(toCopy) {
toCopy = int(fh.limit)
}
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufIndex+toCopy])
n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy])
fh.bufIndex += n
if fh.limit >= 0 {
fh.limit -= int64(n)
@@ -926,8 +904,9 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
return n, nil
}
// calculateUnderlying converts an (offset, limit) in an encrypted file
// into an (underlyingOffset, underlyingLimit) for the underlying file.
// calculateUnderlying converts an (offset, limit) in a crypted file
// into an (underlyingOffset, underlyingLimit) for the underlying
// file.
//
// It also returns number of bytes to discard after reading the first
// block and number of blocks this is from the start so the nonce can
@@ -1106,7 +1085,7 @@ func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
// DecryptDataSeek decrypts the data stream from offset
//
// The open function must return a ReadCloser opened to the offset supplied.
// The open function must return a ReadCloser opened to the offset supplied
//
// You must use this form of DecryptData if you might want to Seek the file handle
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {

View File

@@ -8,6 +8,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"strings"
"testing"
@@ -27,14 +28,14 @@ func TestNewNameEncryptionMode(t *testing.T) {
{"off", NameEncryptionOff, ""},
{"standard", NameEncryptionStandard, ""},
{"obfuscate", NameEncryptionObfuscated, ""},
{"potato", NameEncryptionOff, "unknown file name encryption mode \"potato\""},
{"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
} {
actual, actualErr := NewNameEncryptionMode(test.in)
assert.Equal(t, actual, test.expected)
if test.expectedErr == "" {
assert.NoError(t, actualErr)
} else {
assert.EqualError(t, actualErr, test.expectedErr)
assert.Error(t, actualErr, test.expectedErr)
}
}
}
@@ -405,13 +406,6 @@ func TestNonStandardEncryptFileName(t *testing.T) {
// Off mode
c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
// Off mode with custom suffix
c, _ = newCipher(NameEncryptionOff, "", "", true, nil)
c.setEncryptedSuffix(".jpg")
assert.Equal(t, "1/12/123.jpg", c.EncryptFileName("1/12/123"))
// Off mode with empty suffix
c.setEncryptedSuffix("none")
assert.Equal(t, "1/12/123", c.EncryptFileName("1/12/123"))
// Obfuscation mode
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
@@ -490,27 +484,21 @@ func TestNonStandardDecryptFileName(t *testing.T) {
in string
expected string
expectedErr error
customSuffix string
}{
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil, ""},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile, ""},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile, ""},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil, ""},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil, ""},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil, ""},
{NameEncryptionOff, true, "1/12/123.jpg", "1/12/123", nil, ".jpg"},
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil, "none"},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil, ""},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile, ""},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil, ""},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil, ""},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil, ""},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil, ""},
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
} {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
if test.customSuffix != "" {
c.setEncryptedSuffix(test.customSuffix)
}
actual, actualErr := c.DecryptFileName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what)
@@ -739,7 +727,7 @@ func TestNonceFromReader(t *testing.T) {
assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x)
buf = bytes.NewBufferString("123456789abcdefghijklmn")
err = x.fromReader(buf)
assert.EqualError(t, err, "short read of nonce: EOF")
assert.Error(t, err, "short read of nonce")
}
func TestNonceFromBuf(t *testing.T) {
@@ -1063,7 +1051,7 @@ func TestRandomSource(t *testing.T) {
_, _ = source.Read(buf)
sink = newRandomSource(1e8)
_, err = io.Copy(sink, source)
assert.EqualError(t, err, "Error in stream at 1")
assert.Error(t, err, "Error in stream")
}
type zeroes struct{}
@@ -1085,7 +1073,7 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
source := newRandomSource(copySize)
encrypted, err := c.newEncrypter(source, nil)
assert.NoError(t, err)
decrypted, err := c.newDecrypter(io.NopCloser(encrypted))
decrypted, err := c.newDecrypter(ioutil.NopCloser(encrypted))
assert.NoError(t, err)
sink := newRandomSource(copySize)
n, err := io.CopyBuffer(sink, decrypted, buf)
@@ -1156,15 +1144,15 @@ func TestEncryptData(t *testing.T) {
buf := bytes.NewBuffer(test.in)
encrypted, err := c.EncryptData(buf)
assert.NoError(t, err)
out, err := io.ReadAll(encrypted)
out, err := ioutil.ReadAll(encrypted)
assert.NoError(t, err)
assert.Equal(t, test.expected, out)
// Check we can decode the data properly too...
buf = bytes.NewBuffer(out)
decrypted, err := c.DecryptData(io.NopCloser(buf))
decrypted, err := c.DecryptData(ioutil.NopCloser(buf))
assert.NoError(t, err)
out, err = io.ReadAll(decrypted)
out, err = ioutil.ReadAll(decrypted)
assert.NoError(t, err)
assert.Equal(t, test.in, out)
}
@@ -1180,13 +1168,13 @@ func TestNewEncrypter(t *testing.T) {
fh, err := c.newEncrypter(z, nil)
assert.NoError(t, err)
assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce)
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, (*fh.buf)[:32])
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.buf[:32])
// Test error path
c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn")
fh, err = c.newEncrypter(z, nil)
assert.Nil(t, fh)
assert.EqualError(t, err, "short read of nonce: EOF")
assert.Error(t, err, "short read of nonce")
}
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to
@@ -1199,7 +1187,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err)
n, err := io.CopyN(io.Discard, fh, 1e6)
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(32), n)
}
@@ -1237,7 +1225,7 @@ func TestNewDecrypter(t *testing.T) {
cd := newCloseDetector(bytes.NewBuffer(file0[:i]))
fh, err = c.newDecrypter(cd)
assert.Nil(t, fh)
assert.EqualError(t, err, ErrorEncryptedFileTooShort.Error())
assert.Error(t, err, ErrorEncryptedFileTooShort.Error())
assert.Equal(t, 1, cd.closed)
}
@@ -1245,7 +1233,7 @@ func TestNewDecrypter(t *testing.T) {
cd = newCloseDetector(er)
fh, err = c.newDecrypter(cd)
assert.Nil(t, fh)
assert.EqualError(t, err, "potato")
assert.Error(t, err, "potato")
assert.Equal(t, 1, cd.closed)
// bad magic
@@ -1256,7 +1244,7 @@ func TestNewDecrypter(t *testing.T) {
cd := newCloseDetector(bytes.NewBuffer(file0copy))
fh, err := c.newDecrypter(cd)
assert.Nil(t, fh)
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
file0copy[i] ^= 0x1
assert.Equal(t, 1, cd.closed)
}
@@ -1269,12 +1257,12 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
in1 := bytes.NewBuffer(file16)
in := io.NopCloser(io.MultiReader(in1, in2))
in := ioutil.NopCloser(io.MultiReader(in1, in2))
fh, err := c.newDecrypter(in)
assert.NoError(t, err)
n, err := io.CopyN(io.Discard, fh, 1e6)
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(16), n)
}
@@ -1286,14 +1274,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
// Make random data
const dataSize = 150000
plaintext, err := io.ReadAll(newRandomSource(dataSize))
plaintext, err := ioutil.ReadAll(newRandomSource(dataSize))
assert.NoError(t, err)
// Encrypt the data
buf := bytes.NewBuffer(plaintext)
encrypted, err := c.EncryptData(buf)
assert.NoError(t, err)
ciphertext, err := io.ReadAll(encrypted)
ciphertext, err := ioutil.ReadAll(encrypted)
assert.NoError(t, err)
trials := []int{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65,
@@ -1312,7 +1300,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
end = len(ciphertext)
}
}
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
reader = ioutil.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
return reader, nil
}
@@ -1502,16 +1490,14 @@ func TestDecrypterRead(t *testing.T) {
assert.NoError(t, err, what)
continue
}
_, err = io.ReadAll(fh)
_, err = ioutil.ReadAll(fh)
var expectedErr error
switch {
case i == fileHeaderSize:
// This would normally produce an error *except* on the first block
expectedErr = nil
case i <= fileHeaderSize+blockHeaderSize:
expectedErr = ErrorEncryptedFileBadHeader
default:
expectedErr = ErrorEncryptedBadBlock
expectedErr = io.ErrUnexpectedEOF
}
if expectedErr != nil {
assert.EqualError(t, err, expectedErr.Error(), what)
@@ -1528,8 +1514,8 @@ func TestDecrypterRead(t *testing.T) {
cd := newCloseDetector(in)
fh, err := c.newDecrypter(cd)
assert.NoError(t, err)
_, err = io.ReadAll(fh)
assert.EqualError(t, err, "potato")
_, err = ioutil.ReadAll(fh)
assert.Error(t, err, "potato")
assert.Equal(t, 0, cd.closed)
// Test corrupting the input
@@ -1538,28 +1524,17 @@ func TestDecrypterRead(t *testing.T) {
copy(file16copy, file16)
for i := range file16copy {
file16copy[i] ^= 0xFF
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
fh, err := c.newDecrypter(ioutil.NopCloser(bytes.NewBuffer(file16copy)))
if i < fileMagicSize {
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
assert.Nil(t, fh)
} else {
assert.NoError(t, err)
_, err = io.ReadAll(fh)
assert.EqualError(t, err, ErrorEncryptedBadBlock.Error())
_, err = ioutil.ReadAll(fh)
assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
}
file16copy[i] ^= 0xFF
}
// Test that we can corrupt a byte and read zeroes if
// passBadBlocks is set
copy(file16copy, file16)
file16copy[len(file16copy)-1] ^= 0xFF
c.passBadBlocks = true
fh, err = c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
assert.NoError(t, err)
buf, err := io.ReadAll(fh)
assert.NoError(t, err)
assert.Equal(t, make([]byte, 16), buf)
}
func TestDecrypterClose(t *testing.T) {
@@ -1580,7 +1555,7 @@ func TestDecrypterClose(t *testing.T) {
// double close
err = fh.Close()
assert.EqualError(t, err, ErrorFileClosed.Error())
assert.Error(t, err, ErrorFileClosed.Error())
assert.Equal(t, 1, cd.closed)
// try again reading the file this time
@@ -1590,7 +1565,7 @@ func TestDecrypterClose(t *testing.T) {
assert.Equal(t, 0, cd.closed)
// close after reading
out, err := io.ReadAll(fh)
out, err := ioutil.ReadAll(fh)
assert.NoError(t, err)
assert.Equal(t, []byte{1}, out)
assert.Equal(t, io.EOF, fh.err)
@@ -1607,6 +1582,8 @@ func TestPutGetBlock(t *testing.T) {
block := c.getBlock()
c.putBlock(block)
c.putBlock(block)
assert.Panics(t, func() { c.putBlock(block[:len(block)-1]) })
}
func TestKey(t *testing.T) {

View File

@@ -48,7 +48,7 @@ func init() {
Help: "Very simple filename obfuscation.",
}, {
Value: "off",
Help: "Don't encrypt the file names.\nAdds a \".bin\", or \"suffix\" extension only.",
Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
},
},
}, {
@@ -79,9 +79,7 @@ NB If filename_encryption is "off" then this option will do nothing.`,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Deprecated: use --server-side-across-configs instead.
Allow server-side operations (e.g. copy) to work across different crypt configs.
Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
Normally this option is not what you want, but if you have two crypts
pointing to the same backend you can use it.
@@ -121,22 +119,13 @@ names, or for debugging purposes.`,
Help: "Encrypt file data.",
},
},
}, {
Name: "pass_bad_blocks",
Help: `If set this will pass bad blocks through as all 0.
This should not be set in normal operation, it should only be set if
trying to recover an encrypted file with errors and it is desired to
recover as much of the file as possible.`,
Default: false,
Advanced: true,
}, {
Name: "filename_encoding",
Help: `How to encode the encrypted filename to text string.
This option could help with shortening the encrypted filename. The
suitable option would depend on the way your remote count the filename
length and if it's case sensitive.`,
length and if it's case sensitve.`,
Default: "base32",
Examples: []fs.OptionExample{
{
@@ -149,18 +138,10 @@ length and if it's case sensitive.`,
},
{
Value: "base32768",
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)",
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
},
},
Advanced: true,
}, {
Name: "suffix",
Help: `If this is set it will override the default suffix of ".bin".
Setting suffix to "none" will result in an empty suffix. This may be useful
when the path length is critical.`,
Default: ".bin",
Advanced: true,
}},
})
}
@@ -193,8 +174,6 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
if err != nil {
return nil, fmt.Errorf("failed to make cipher: %w", err)
}
cipher.setEncryptedSuffix(opt.Suffix)
cipher.setPassBadBlocks(opt.PassBadBlocks)
return cipher, nil
}
@@ -256,7 +235,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
f.features = (&fs.Features{
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
DuplicateFiles: true,
ReadMimeType: false, // MimeTypes not supported with crypt
WriteMimeType: false,
@@ -268,7 +247,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
return f, err
@@ -284,9 +262,7 @@ type Options struct {
Password2 string `config:"password2"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ShowMapping bool `config:"show_mapping"`
PassBadBlocks bool `config:"pass_bad_blocks"`
FilenameEncoding string `config:"filename_encoding"`
Suffix string `config:"suffix"`
}
// Fs represents a wrapped fs.Fs
@@ -420,8 +396,6 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
// put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
ci := fs.GetConfig(ctx)
if f.opt.NoDataEncryption {
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
if err == nil && o != nil {
@@ -439,9 +413,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
// Find a hash the destination supports to compute a hash of
// the encrypted data
ht := f.Fs.Hashes().GetOne()
if ci.IgnoreChecksum {
ht = hash.None
}
var hasher *hash.MultiHasher
if ht != hash.None {
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
@@ -478,7 +449,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
}
fs.Debugf(src, "%v = %s OK", ht, srcHash)
}
@@ -536,9 +507,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -561,9 +532,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1076,11 +1047,10 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
// Get the underlying object if there is one
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
// Prefer direct interface assertion
} else if do, ok := o.ObjectInfo.(*fs.OverrideRemote); ok {
// Unwrap if it is an operations.OverrideRemote
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
// Otherwise likely is an operations.OverrideRemote
srcObj = do.UnWrap()
} else {
// Otherwise don't unwrap any further
return "", nil
}
// if this is wrapping a local object then we work out the hash

View File

@@ -17,28 +17,41 @@ import (
"github.com/stretchr/testify/require"
)
type testWrapper struct {
fs.ObjectInfo
}
// UnWrap returns the Object that this Object is wrapping or nil if it
// isn't wrapping anything
func (o testWrapper) UnWrap() fs.Object {
if o, ok := o.ObjectInfo.(fs.Object); ok {
return o
}
return nil
}
// Create a temporary local fs to upload things from
func makeTempLocalFs(t *testing.T) (localFs fs.Fs) {
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
localFs, err := fs.TemporaryLocalFs(context.Background())
require.NoError(t, err)
t.Cleanup(func() {
cleanup = func() {
require.NoError(t, localFs.Rmdir(context.Background(), ""))
})
return localFs
}
return localFs, cleanup
}
// Upload a file to a remote
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object) {
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
inBuf := bytes.NewBufferString(contents)
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
obj, err := f.Put(context.Background(), inBuf, upSrc)
require.NoError(t, err)
t.Cleanup(func() {
cleanup = func() {
require.NoError(t, obj.Remove(context.Background()))
})
return obj
}
return obj, cleanup
}
// Test the ObjectInfo
@@ -52,9 +65,11 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
path = "_wrap"
}
localFs := makeTempLocalFs(t)
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
obj := uploadFile(t, localFs, path, contents)
obj, cleanupObj := uploadFile(t, localFs, path, contents)
defer cleanupObj()
// encrypt the data
inBuf := bytes.NewBufferString(contents)
@@ -68,7 +83,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
var oi fs.ObjectInfo = obj
if wrap {
// wrap the object in an fs.ObjectUnwrapper if required
oi = fs.NewOverrideRemote(oi, "new_remote")
oi = testWrapper{oi}
}
// wrap the object in a crypt for upload using the nonce we
@@ -101,13 +116,16 @@ func testComputeHash(t *testing.T, f *Fs) {
t.Skipf("%v: does not support hashes", f.Fs)
}
localFs := makeTempLocalFs(t)
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
// Upload a file to localFs as a test object
localObj := uploadFile(t, localFs, path, contents)
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
defer cleanupLocalObj()
// Upload the same data to the remote Fs also
remoteObj := uploadFile(t, f, path, contents)
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
defer cleanupRemoteObj()
// Calculate the expected Hash of the remote object
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)

View File

@@ -24,7 +24,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil),
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
@@ -45,7 +45,7 @@ func TestStandardBase32(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -67,7 +67,7 @@ func TestStandardBase64(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base64"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -89,7 +89,7 @@ func TestStandardBase32768(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base32768"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -111,7 +111,7 @@ func TestOff(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "off"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -137,7 +137,7 @@ func TestObfuscate(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
},
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -164,7 +164,7 @@ func TestNoDataObfuscate(t *testing.T) {
{Name: name, Key: "no_data_encryption", Value: "true"},
},
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})

View File

@@ -14,10 +14,11 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"net/http"
"os"
"path"
"regexp"
"sort"
"strconv"
"strings"
@@ -71,7 +72,7 @@ const (
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
minChunkSize = fs.SizeSuffix(googleapi.MinUploadChunkSize)
defaultChunkSize = 8 * fs.Mebi
partialFields = "id,name,size,md5Checksum,sha1Checksum,sha256Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks,resourceKey"
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks,resourceKey"
listRGrouping = 50 // number of IDs to search at once when using ListR
listRInputBuffer = 1000 // size of input buffer when using ListR
defaultXDGIcon = "text-html"
@@ -202,7 +203,7 @@ func init() {
m.Set("root_folder_id", "appDataFolder")
}
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" && !opt.EnvAuth {
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
OAuth2Config: driveConfig,
})
@@ -277,23 +278,20 @@ Leave blank normally.
Fill in to access "Computers" folders (see docs), or for rclone to use
a non root folder as its starting point.
`,
Advanced: true,
Sensitive: true,
Advanced: true,
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideConfigurator,
Advanced: true,
Sensitive: true,
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideConfigurator,
Advanced: true,
}, {
Name: "team_drive",
Help: "ID of the Shared Drive (Team Drive).",
Hide: fs.OptionHideConfigurator,
Advanced: true,
Sensitive: true,
Name: "team_drive",
Help: "ID of the Shared Drive (Team Drive).",
Hide: fs.OptionHideConfigurator,
Advanced: true,
}, {
Name: "auth_owner_only",
Default: false,
@@ -323,13 +321,13 @@ rather than shortcuts themselves when doing server side copies.`,
}, {
Name: "skip_checksum_gphotos",
Default: false,
Help: `Skip checksums on Google photos and videos only.
Help: `Skip MD5 checksum on Google photos and videos only.
Use this if you get checksum errors when transferring Google photos or
videos.
Setting this flag will cause Google photos and videos to return a
blank checksums.
blank MD5 checksum.
Google photos are identified by being in the "photos" space.
@@ -419,11 +417,10 @@ date is used.`,
Help: "Size of listing chunk 100-1000, 0 to disable.",
Advanced: true,
}, {
Name: "impersonate",
Default: "",
Help: `Impersonate this user when using a service account.`,
Advanced: true,
Sensitive: true,
Name: "impersonate",
Default: "",
Help: `Impersonate this user when using a service account.`,
Advanced: true,
}, {
Name: "alternate_export",
Default: false,
@@ -455,11 +452,7 @@ If downloading a file returns the error "This file has been identified
as malware or spam and cannot be downloaded" with the error code
"cannotDownloadAbusiveFile" then supply this flag to rclone to
indicate you acknowledge the risks of downloading the file and rclone
will download it anyway.
Note that if you are using service account it will need Manager
permission (not Content Manager) to for this flag to work. If the SA
does not have the right permission, Google will just ignore the flag.`,
will download it anyway.`,
Advanced: true,
}, {
Name: "keep_revision_forever",
@@ -503,9 +496,7 @@ need to use --ignore size also.`,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Deprecated: use --server-side-across-configs instead.
Allow server-side operations (e.g. copy) to work across different drive configs.
Help: `Allow server-side operations (e.g. copy) to work across different drive configs.
This can be useful if you wish to do a server-side copy between two
different Google drives. Note that this isn't enabled by default
@@ -594,32 +585,9 @@ This resource key requirement only applies to a subset of old files.
Note also that opening the folder once in the web interface (with the
user you've authenticated rclone with) seems to be enough so that the
resource key is not needed.
`,
Advanced: true,
Sensitive: true,
}, {
Name: "fast_list_bug_fix",
Help: `Work around a bug in Google Drive listing.
Normally rclone will work around a bug in Google Drive when using
--fast-list (ListR) where the search "(A in parents) or (B in
parents)" returns nothing sometimes. See #3114, #4289 and
https://issuetracker.google.com/issues/149522397
Rclone detects this by finding no items in more than one directory
when listing and retries them as lists of individual directories.
This means that if you have a lot of empty directories rclone will end
up listing them all individually and this can take many more API
calls.
This flag allows the work-around to be disabled. This is **not**
recommended in normal use - only if you have a particular case you are
having trouble with like many empty directories.
resource key is no needed.
`,
Advanced: true,
Default: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -627,18 +595,6 @@ having trouble with like many empty directories.
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
// Don't encode / as it's a valid name character in drive.
Default: encoder.EncodeInvalidUtf8,
}, {
Name: "env_auth",
Help: "Get IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
Default: false,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter credentials in the next step.",
}, {
Value: "true",
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
}},
}}...),
})
@@ -694,9 +650,7 @@ type Options struct {
SkipShortcuts bool `config:"skip_shortcuts"`
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
ResourceKey string `config:"resource_key"`
FastListBugFix bool `config:"fast_list_bug_fix"`
Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
}
// Fs represents a remote drive server
@@ -751,8 +705,6 @@ type Object struct {
baseObject
url string // Download URL of this object
md5sum string // md5sum of the object
sha1sum string // sha1sum of the object
sha256sum string // sha256sum of the object
v2Download bool // generate v2 download link ondemand
}
@@ -806,7 +758,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
fs.Errorf(f, "Received download limit error: %v", err)
return false, fserrors.FatalError(err)
} else if f.opt.StopOnUploadLimit && (reason == "quotaExceeded" || reason == "storageQuotaExceeded") {
} else if f.opt.StopOnUploadLimit && reason == "quotaExceeded" {
fs.Errorf(f, "Received upload limit error: %v", err)
return false, fserrors.FatalError(err)
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
@@ -1156,7 +1108,7 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
// try loading service account credentials from env variable, then from a file
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil {
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
}
@@ -1167,12 +1119,6 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
if err != nil {
return nil, fmt.Errorf("failed to create oauth client from service account: %w", err)
}
} else if opt.EnvAuth {
scopes := driveScopes(opt.Scope)
oAuthClient, err = google.DefaultClient(ctx, scopes...)
if err != nil {
return nil, fmt.Errorf("failed to create client from environment: %w", err)
}
} else {
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
if err != nil {
@@ -1264,7 +1210,6 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
WriteMimeType: true,
CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
FilterAware: true,
}).Fill(ctx, f)
// Create a new authorized Drive client.
@@ -1416,8 +1361,6 @@ func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
for _, space := range info.Spaces {
if space == "photos" {
info.Md5Checksum = ""
info.Sha1Checksum = ""
info.Sha256Checksum = ""
break
}
}
@@ -1426,8 +1369,6 @@ func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
baseObject: f.newBaseObject(remote, info),
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, actualID(info.Id)),
md5sum: strings.ToLower(info.Md5Checksum),
sha1sum: strings.ToLower(info.Sha1Checksum),
sha256sum: strings.ToLower(info.Sha256Checksum),
v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
}
if info.ResourceKey != "" {
@@ -1548,9 +1489,6 @@ func (f *Fs) newObjectWithExportInfo(
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if strings.HasSuffix(remote, "/") {
return nil, fs.ErrorIsDir
}
info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote)
if err != nil {
return nil, err
@@ -1920,7 +1858,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
// drive where (A in parents) or (B in parents) returns nothing
// sometimes. See #3114, #4289 and
// https://issuetracker.google.com/issues/149522397
if f.opt.FastListBugFix && len(dirs) > 1 && !foundItems {
if len(dirs) > 1 && !foundItems {
if atomic.SwapInt32(&f.grouping, 1) != 1 {
fs.Debugf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs))
}
@@ -2242,7 +2180,7 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
// Put the object
//
// Copy the reader in to the new object which is returned.
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -2476,9 +2414,9 @@ func (f *Fs) Precision() time.Duration {
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -2711,9 +2649,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -2938,7 +2876,6 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
if f.rootFolderID == "appDataFolder" {
changesCall.Spaces("appDataFolder")
}
changesCall.RestrictToMyDrive(!f.opt.SharedWithMe)
changeList, err = changesCall.Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
@@ -3013,7 +2950,7 @@ func (f *Fs) DirCacheFlush() {
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.NewHashSet(hash.MD5, hash.SHA1, hash.SHA256)
return hash.Set(hash.MD5)
}
func (f *Fs) changeChunkSize(chunkSizeString string) (err error) {
@@ -3368,7 +3305,7 @@ drives found and a combined drive.
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
Adding this to the rclone config file will cause those team drives to
be accessible with the aliases shown. Any illegal characters will be
be accessible with the aliases shown. Any illegal charactes will be
substituted with "_" and duplicate names will have numbers suffixed.
It will also add a remote called AllDrives which shows all the shared
drives combined into one directory tree.
@@ -3385,9 +3322,9 @@ This takes an optional directory to trash which make this easier to
use via the API.
rclone backend untrash drive:directory
rclone backend --interactive untrash drive:directory subdir
rclone backend -i untrash drive:directory subdir
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it.
Use the -i flag to see what would be restored before restoring it.
Result:
@@ -3417,7 +3354,7 @@ component will be used as the file name.
If the destination is a drive backend then server-side copying will be
attempted if possible.
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
Use the -i flag to see what would be copied before copying.
`,
}, {
Name: "exportformats",
@@ -3493,12 +3430,13 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
if err != nil {
return nil, err
}
re := regexp.MustCompile(`[^\w_. -]+`)
if _, ok := opt["config"]; ok {
lines := []string{}
upstreams := []string{}
names := make(map[string]struct{}, len(drives))
for i, drive := range drives {
name := fspath.MakeConfigName(drive.Name)
name := re.ReplaceAllString(drive.Name, "_")
for {
if _, found := names[name]; !found {
break
@@ -3574,16 +3512,10 @@ func (o *baseObject) Remote() string {
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t == hash.MD5 {
return o.md5sum, nil
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
if t == hash.SHA1 {
return o.sha1sum, nil
}
if t == hash.SHA256 {
return o.sha256sum, nil
}
return "", hash.ErrUnsupported
return o.md5sum, nil
}
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
@@ -3637,6 +3569,7 @@ func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *baseObject) ModTime(ctx context.Context) time.Time {
@@ -3867,7 +3800,7 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
data = data[:limit]
}
return io.NopCloser(bytes.NewReader(data)), nil
return ioutil.NopCloser(bytes.NewReader(data)), nil
}
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
@@ -3893,7 +3826,7 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
// Update the already existing object
//
// Copy the reader into the object updating modTime and size.
// Copy the reader into the object updating modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
@@ -3926,7 +3859,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return err
}
newO, err := o.fs.newObjectWithInfo(ctx, o.remote, info)
newO, err := o.fs.newObjectWithInfo(ctx, src.Remote(), info)
if err != nil {
return err
}

View File

@@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"os"
"path"
@@ -77,7 +78,7 @@ var additionalMimeTypes = map[string]string{
// Load the example export formats into exportFormats for testing
func TestInternalLoadExampleFormats(t *testing.T) {
fetchFormatsOnce.Do(func() {})
buf, err := os.ReadFile(filepath.FromSlash("test/about.json"))
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
var about struct {
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
ImportFormats map[string][]string `json:"importFormats,omitempty"`
@@ -243,15 +244,6 @@ func (f *Fs) InternalTestShouldRetry(t *testing.T) {
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, quotaExceededRetry)
assert.Equal(t, quotaExceededError, expectedQuotaError)
sqEItem := googleapi.ErrorItem{
Reason: "storageQuotaExceeded",
}
generic403.Errors[0] = sqEItem
expectedStorageQuotaError := fserrors.FatalError(&generic403)
storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, storageQuotaExceededRetry)
assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError)
}
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
@@ -526,9 +518,6 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
opt := &filter.Opt{}
err := opt.MaxAge.Set("1h")
assert.NoError(t, err)

View File

@@ -8,19 +8,122 @@ package dropbox
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/atexit"
)
const (
maxBatchSize = 1000 // max size the batch can be
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
defaultBatchSizeAsync = 100 // default batch size if async
)
// batcher holds info about the current items waiting for upload
type batcher struct {
f *Fs // Fs this batch is part of
mode string // configured batch mode
size int // maximum size for batch
timeout time.Duration // idle timeout for batch
async bool // whether we are using async batching
in chan batcherRequest // incoming items to batch
closed chan struct{} // close to indicate batcher shut down
atexit atexit.FnHandle // atexit handle
shutOnce sync.Once // make sure we shutdown once only
wg sync.WaitGroup // wait for shutdown
}
// batcherRequest holds an incoming request with a place for a reply
type batcherRequest struct {
commitInfo *files.UploadSessionFinishArg
result chan<- batcherResponse
}
// Return true if batcherRequest is the quit request
func (br *batcherRequest) isQuit() bool {
return br.commitInfo == nil
}
// Send this to get the engine to quit
var quitRequest = batcherRequest{}
// batcherResponse holds a response to be delivered to clients waiting
// for a batch to complete.
type batcherResponse struct {
err error
entry *files.FileMetadata
}
// newBatcher creates a new batcher structure
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
if size > maxBatchSize || size < 0 {
return nil, fmt.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
}
async := false
switch mode {
case "sync":
if size <= 0 {
ci := fs.GetConfig(ctx)
size = ci.Transfers
}
if timeout <= 0 {
timeout = defaultTimeoutSync
}
case "async":
if size <= 0 {
size = defaultBatchSizeAsync
}
if timeout <= 0 {
timeout = defaultTimeoutAsync
}
async = true
case "off":
size = 0
default:
return nil, fmt.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
}
b := &batcher{
f: f,
mode: mode,
size: size,
timeout: timeout,
async: async,
in: make(chan batcherRequest, size),
closed: make(chan struct{}),
}
if b.Batching() {
b.atexit = atexit.Register(b.Shutdown)
b.wg.Add(1)
go b.commitLoop(context.Background())
}
return b, nil
}
// Batching returns true if batching is active
func (b *batcher) Batching() bool {
return b.size > 0
}
// finishBatch commits the batch, returning a batch status to poll or maybe complete
func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
var arg = &files.UploadSessionFinishBatchArg{
Entries: items,
}
err = f.pacer.Call(func() (bool, error) {
complete, err = f.srv.UploadSessionFinishBatchV2(arg)
err = b.f.pacer.Call(func() (bool, error) {
complete, err = b.f.srv.UploadSessionFinishBatchV2(arg)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
@@ -37,10 +140,66 @@ func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinish
return complete, nil
}
// Called by the batcher to commit a batch
func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []*files.FileMetadata, errors []error) (err error) {
// finishBatchJobStatus waits for the batch to complete returning completed entries
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
if launchBatchStatus.AsyncJobId == "" {
return nil, errors.New("wait for batch completion: empty job ID")
}
var batchStatus *files.UploadSessionFinishBatchJobStatus
sleepTime := 100 * time.Millisecond
const maxSleepTime = 1 * time.Second
startTime := time.Now()
try := 1
for {
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
if remaining < 0 {
break
}
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
AsyncJobId: launchBatchStatus.AsyncJobId,
})
return shouldRetry(ctx, err)
})
if err != nil {
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
} else {
if batchStatus.Tag == "complete" {
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
return batchStatus.Complete, nil
}
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
}
time.Sleep(sleepTime)
sleepTime *= 2
if sleepTime > maxSleepTime {
sleepTime = maxSleepTime
}
try++
}
if err == nil {
err = errors.New("batch didn't complete")
}
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
}
// commit a batch
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
// If commit fails then signal clients if sync
var signalled = b.async
defer func() {
if err != nil && signalled {
// Signal to clients that there was an error
for _, result := range results {
result <- batcherResponse{err: err}
}
}
}()
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
fs.Debugf(b.f, "Committing %s", desc)
// finalise the batch getting either a result or a job id to poll
complete, err := f.finishBatch(ctx, items)
complete, err := b.finishBatch(ctx, items)
if err != nil {
return err
}
@@ -51,13 +210,19 @@ func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinish
return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
}
// Format results for return
// Report results to clients
var (
errorTag = ""
errorCount = 0
)
for i := range results {
item := entries[i]
resp := batcherResponse{}
if item.Tag == "success" {
results[i] = item.Success
resp.entry = item.Success
} else {
errorTag := item.Tag
errorCount++
errorTag = item.Tag
if item.Failure != nil {
errorTag = item.Failure.Tag
if item.Failure.LookupFailed != nil {
@@ -70,9 +235,112 @@ func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinish
errorTag += "/" + item.Failure.PropertiesError.Tag
}
}
errors[i] = fmt.Errorf("upload failed: %s", errorTag)
resp.err = fmt.Errorf("batch upload failed: %s", errorTag)
}
if !b.async {
results[i] <- resp
}
}
// Show signalled so no need to report error to clients from now on
signalled = true
// Report an error if any failed in the batch
if errorTag != "" {
return fmt.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
}
fs.Debugf(b.f, "Committed %s", desc)
return nil
}
// commitLoop runs the commit engine in the background
func (b *batcher) commitLoop(ctx context.Context) {
var (
items []*files.UploadSessionFinishArg // current batch of uncommitted files
results []chan<- batcherResponse // current batch of clients awaiting results
idleTimer = time.NewTimer(b.timeout)
commit = func() {
err := b.commitBatch(ctx, items, results)
if err != nil {
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
}
items, results = nil, nil
}
)
defer b.wg.Done()
defer idleTimer.Stop()
idleTimer.Stop()
outer:
for {
select {
case req := <-b.in:
if req.isQuit() {
break outer
}
items = append(items, req.commitInfo)
results = append(results, req.result)
idleTimer.Stop()
if len(items) >= b.size {
commit()
} else {
idleTimer.Reset(b.timeout)
}
case <-idleTimer.C:
if len(items) > 0 {
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
commit()
}
}
}
// commit any remaining items
if len(items) > 0 {
commit()
}
}
// Shutdown finishes any pending batches then shuts everything down
//
// Can be called from atexit handler
func (b *batcher) Shutdown() {
if !b.Batching() {
return
}
b.shutOnce.Do(func() {
atexit.Unregister(b.atexit)
fs.Infof(b.f, "Commiting uploads - please wait...")
// show that batcher is shutting down
close(b.closed)
// quit the commitLoop by sending a quitRequest message
//
// Note that we don't close b.in because that will
// cause write to closed channel in Commit when we are
// exiting due to a signal.
b.in <- quitRequest
b.wg.Wait()
})
}
// Commit commits the file using a batch call, first adding it to the
// batch and then waiting for the batch to complete in a synchronous
// way if async is not set.
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
select {
case <-b.closed:
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
default:
}
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
resp := make(chan batcherResponse, 1)
b.in <- batcherRequest{
commitInfo: commitInfo,
result: resp,
}
// If running async then don't wait for the result
if b.async {
return nil, nil
}
result := <-resp
return result.entry, result.err
}

View File

@@ -47,7 +47,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/batcher"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
@@ -59,7 +58,7 @@ import (
const (
rcloneClientID = "5jcck7diasz0rqy"
rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g"
defaultMinSleep = fs.Duration(10 * time.Millisecond)
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
// Upload chunk size - setting too small makes uploads slow.
@@ -122,14 +121,6 @@ var (
// Errors
errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode"))
// Configure the batcher
defaultBatcherOptions = batcher.Options{
MaxBatchSize: 1000,
DefaultTimeoutSync: 500 * time.Millisecond,
DefaultTimeoutAsync: 10 * time.Second,
DefaultBatchSizeAsync: 100,
}
)
// Gets an oauth config with the right scopes
@@ -161,7 +152,7 @@ func init() {
},
})
},
Options: append(append(oauthutil.SharedOptions, []fs.Option{{
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "chunk_size",
Help: fmt.Sprintf(`Upload chunk size (< %v).
@@ -191,9 +182,8 @@ client_secret) to use this option as currently rclone's default set of
permissions doesn't include "members.read". This can be added once
v1.55 or later is in use everywhere.
`,
Default: "",
Advanced: true,
Sensitive: true,
Default: "",
Advanced: true,
}, {
Name: "shared_files",
Help: `Instructs rclone to work on individual shared files.
@@ -220,9 +210,66 @@ shared folder.`,
Default: false,
Advanced: true,
}, {
Name: "pacer_min_sleep",
Default: defaultMinSleep,
Help: "Minimum time to sleep between API calls.",
Name: "batch_mode",
Help: `Upload file batching sync|async|off.
This sets the batch mode used by rclone.
For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)
This has 3 possible values
- off - no batching
- sync - batch uploads and check completion (default)
- async - batch upload and don't check completion
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
`,
Default: "sync",
Advanced: true,
}, {
Name: "batch_size",
Help: `Max number of files in upload batch.
This sets the batch size of files to upload. It has to be less than 1000.
By default this is 0 which means rclone which calculate the batch size
depending on the setting of batch_mode.
- batch_mode: async - default batch_size is 100
- batch_mode: sync - default batch_size is the same as --transfers
- batch_mode: off - not in use
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
Setting this is a great idea if you are uploading lots of small files
as it will make them a lot quicker. You can use --transfers 32 to
maximise throughput.
`,
Default: 0,
Advanced: true,
}, {
Name: "batch_timeout",
Help: `Max time to allow an idle upload batch before uploading.
If an upload batch is idle for more than this long then it will be
uploaded.
The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use.
- batch_mode: async - default batch_timeout is 500ms
- batch_mode: sync - default batch_timeout is 10s
- batch_mode: off - not in use
`,
Default: fs.Duration(0),
Advanced: true,
}, {
Name: "batch_commit_timeout",
Help: `Max time to wait for a batch to finish comitting`,
Default: fs.Duration(10 * time.Minute),
Advanced: true,
}, {
Name: config.ConfigEncoding,
@@ -237,22 +284,22 @@ shared folder.`,
encoder.EncodeDel |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8,
}}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
}}...),
})
}
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
AsyncBatch bool `config:"async_batch"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
Enc encoder.MultiEncoder `config:"encoding"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
AsyncBatch bool `config:"async_batch"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote dropbox server
@@ -271,7 +318,7 @@ type Fs struct {
slashRootSlash string // root with "/" prefix and postfix, lowercase
pacer *fs.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none
batcher *batcher.Batcher[*files.UploadSessionFinishArg, *files.FileMetadata]
batcher *batcher // batch builder
}
// Object describes a dropbox object
@@ -395,13 +442,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
name: name,
opt: *opt,
ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
batcherOptions := defaultBatcherOptions
batcherOptions.Mode = f.opt.BatchMode
batcherOptions.Size = f.opt.BatchSize
batcherOptions.Timeout = time.Duration(f.opt.BatchTimeout)
f.batcher, err = batcher.New(ctx, f, f.commitBatch, batcherOptions)
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
if err != nil {
return nil, err
}
@@ -493,7 +536,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
default:
return nil, err
}
// if the mount failed we have to abort here
// if the moint failed we have to abort here
}
// if the mount succeeded it's now a normal folder in the users root namespace
// we disable shared folder mode and proceed normally
@@ -676,7 +719,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
}
for _, entry := range res.Entries {
leaf := f.opt.Enc.ToStandardName(entry.Name)
d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId)
d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
entries = append(entries, d)
if err != nil {
return nil, err
@@ -863,7 +906,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
remote := path.Join(dir, leaf)
if folderInfo != nil {
d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id)
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
entries = append(entries, d)
} else if fileInfo != nil {
o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
@@ -882,7 +925,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Put the object
//
// Copy the reader in to the new object which is returned.
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -1001,9 +1044,9 @@ func (f *Fs) Precision() time.Duration {
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1062,9 +1105,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1626,7 +1669,7 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
delta := int64(correctOffset) - int64(cursor.Offset)
skip += delta
what := fmt.Sprintf("incorrect offset error received: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
what := fmt.Sprintf("incorrect offset error receved: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
if skip < 0 {
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
} else if skip == chunkSize {
@@ -1672,7 +1715,7 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
// If we are batching then we should have written all the data now
// store the commit info now for a batch commit
if o.fs.batcher.Batching() {
return o.fs.batcher.Commit(ctx, o.remote, args)
return o.fs.batcher.Commit(ctx, args)
}
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1720,7 +1763,7 @@ func checkPathLength(name string) (err error) {
// Update the already existing object
//
// Copy the reader into the object updating modTime and size.
// Copy the reader into the object updating modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {

View File

@@ -28,14 +28,14 @@ var retryErrorCodes = []int{
509, // Bandwidth Limit Exceeded
}
var errorRegex = regexp.MustCompile(`#(\d{1,3})`)
var errorRegex = regexp.MustCompile(`#\d{1,3}`)
func parseFichierError(err error) int {
matches := errorRegex.FindStringSubmatch(err.Error())
if len(matches) == 0 {
return 0
}
code, err := strconv.Atoi(matches[1])
code, err := strconv.Atoi(matches[0])
if err != nil {
fs.Debugf(nil, "failed parsing fichier error: %v", err)
return 0
@@ -118,9 +118,6 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
Single: 1,
Pass: f.opt.FilePassword,
}
if f.opt.CDN {
request.CDN = 1
}
opts := rest.Opts{
Method: "POST",
Path: "/download/get_token.cgi",
@@ -408,32 +405,6 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
return response, nil
}
func (f *Fs) moveDir(ctx context.Context, folderID int, newLeaf string, destinationFolderID int) (response *MoveDirResponse, err error) {
request := &MoveDirRequest{
FolderID: folderID,
DestinationFolderID: destinationFolderID,
Rename: newLeaf,
// DestinationUser: destinationUser,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/mv.cgi",
}
response = &MoveDirResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("couldn't move dir: %w", err)
}
return response, nil
}
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
request := &CopyFileRequest{
URLs: []string{url},
@@ -502,7 +473,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("didn't get an upload node: %w", err)
return nil, fmt.Errorf("didnt got an upload node: %w", err)
}
// fs.Debugf(f, "Got Upload node")

View File

@@ -1,4 +1,3 @@
// Package fichier provides an interface to the 1Fichier storage system.
package fichier
import (
@@ -38,9 +37,8 @@ func init() {
Description: "1Fichier",
NewFs: NewFs,
Options: []fs.Option{{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
Name: "api_key",
Sensitive: true,
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
Name: "api_key",
}, {
Help: "If you want to download a shared folder, add this parameter.",
Name: "shared_folder",
@@ -55,11 +53,6 @@ func init() {
Name: "folder_password",
Advanced: true,
IsPassword: true,
}, {
Help: "Set if you wish to use CDN download links.",
Name: "cdn",
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -95,7 +88,6 @@ type Options struct {
SharedFolder string `config:"shared_folder"`
FilePassword string `config:"file_password"`
FolderPassword string `config:"folder_password"`
CDN bool `config:"cdn"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -340,7 +332,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(300e9) {
return nil, errors.New("File too big, can't upload")
return nil, errors.New("File too big, cant upload")
} else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
@@ -488,51 +480,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove.
//
// If destination exists then return fs.ErrorDirExists.
//
// This is complicated by the fact that we can't use moveDir to move
// to a different directory AND rename at the same time as it can
// overwrite files in the source directory.
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
if err != nil {
return err
}
srcIDnumeric, err := strconv.Atoi(srcID)
if err != nil {
return err
}
dstDirectoryIDnumeric, err := strconv.Atoi(dstDirectoryID)
if err != nil {
return err
}
var resp *MoveDirResponse
resp, err = f.moveDir(ctx, srcIDnumeric, dstLeaf, dstDirectoryIDnumeric)
if err != nil {
return fmt.Errorf("couldn't rename leaf: %w", err)
}
if resp.Status != "OK" {
return fmt.Errorf("couldn't rename leaf: %s", resp.Message)
}
srcFs.dirCache.FlushDir(srcRemote)
return nil
}
// Copy src to this remote using server side move operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
@@ -606,7 +553,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)

View File

@@ -20,7 +20,6 @@ type DownloadRequest struct {
URL string `json:"url"`
Single int `json:"single"`
Pass string `json:"pass,omitempty"`
CDN int `json:"cdn,omitempty"`
}
// RemoveFolderRequest is the request structure of the corresponding request
@@ -70,22 +69,6 @@ type MoveFileResponse struct {
URLs []string `json:"urls"`
}
// MoveDirRequest is the request structure of the corresponding request
type MoveDirRequest struct {
FolderID int `json:"folder_id"`
DestinationFolderID int `json:"destination_folder_id,omitempty"`
DestinationUser string `json:"destination_user"`
Rename string `json:"rename,omitempty"`
}
// MoveDirResponse is the response structure of the corresponding request
type MoveDirResponse struct {
Status string `json:"status"`
Message string `json:"message"`
OldName string `json:"old_name"`
NewName string `json:"new_name"`
}
// CopyFileRequest is the request structure of the corresponding request
type CopyFileRequest struct {
URLs []string `json:"urls"`
@@ -101,7 +84,7 @@ type CopyFileResponse struct {
URLs []FileCopy `json:"urls"`
}
// FileCopy is used in the CopyFileResponse
// FileCopy is used in the the CopyFileResponse
type FileCopy struct {
FromURL string `json:"from_url"`
ToURL string `json:"to_url"`

View File

@@ -19,7 +19,7 @@ const (
timeFormatJSON = `"` + timeFormatParameters + `"`
)
// Time represents date and time information for the
// Time represents represents date and time information for the
// filefabric API
type Time time.Time
@@ -95,7 +95,7 @@ type Status struct {
// Warning string `json:"warning"` // obsolete
}
// Status satisfies the error interface
// Status statisfies the error interface
func (e *Status) Error() string {
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
}

View File

@@ -20,6 +20,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
@@ -84,7 +85,6 @@ Leave blank normally.
Fill in to make rclone start with directory of a given ID.
`,
Sensitive: true,
}, {
Name: "permanent_token",
Help: `Permanent Authentication Token.
@@ -98,7 +98,6 @@ These tokens are normally valid for several years.
For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
`,
Sensitive: true,
}, {
Name: "token",
Help: `Session Token.
@@ -108,8 +107,7 @@ usually valid for 1 hour.
Don't set this value - rclone will set it automatically.
`,
Advanced: true,
Sensitive: true,
Advanced: true,
}, {
Name: "token_expiry",
Help: `Token expiry time.
@@ -152,15 +150,15 @@ type Fs struct {
opt Options // parsed options
features *fs.Features // optional features
m configmap.Mapper // to save config
srv *rest.Client // the connection to the server
srv *rest.Client // the connection to the one drive server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenMu sync.Mutex // hold when reading the token
token string // current access token
tokenExpiry time.Time // time the current token expires
tokenExpired atomic.Int32
canCopyWithName bool // set if detected that can use fi_name in copy
precision time.Duration // precision reported
tokenExpired int32 // read and written with atomic
canCopyWithName bool // set if detected that can use fi_name in copy
precision time.Duration // precision reported
}
// Object describes a filefabric object
@@ -243,7 +241,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
err = status // return the error from the RPC
code := status.GetCode()
if code == "login_token_expired" {
f.tokenExpired.Add(1)
atomic.AddInt32(&f.tokenExpired, 1)
} else {
for _, retryCode := range retryStatusCodes {
if code == retryCode.code {
@@ -323,12 +321,12 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
var refreshed = false
defer func() {
if refreshed {
f.tokenExpired.Store(0)
atomic.StoreInt32(&f.tokenExpired, 0)
}
f.tokenMu.Unlock()
}()
expired := f.tokenExpired.Load() != 0
expired := atomic.LoadInt32(&f.tokenExpired) != 0
if expired {
fs.Debugf(f, "Token invalid - refreshing")
}
@@ -375,7 +373,7 @@ type params map[string]interface{}
// rpc calls the rpc.php method of the SME file fabric
//
// This is an entry point to all the method calls.
// This is an entry point to all the method calls
//
// If result is nil then resp.Body will need closing
func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKError, options []fs.OpenOption) (resp *http.Response, err error) {
@@ -680,7 +678,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
//
// Returns the object, leaf, directoryID and error.
// Returns the object, leaf, directoryID and error
//
// Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
@@ -699,7 +697,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
// Put the object
//
// Copy the reader in to the new object which is returned.
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -785,9 +783,9 @@ func (f *Fs) Precision() time.Duration {
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -845,7 +843,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
}
// Wait for the background task to complete if necessary
// Wait for the the background task to complete if necessary
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
if taskID == "" || taskID == "0" {
// No task to wait for
@@ -958,9 +956,9 @@ func (f *Fs) move(ctx context.Context, isDir bool, id, oldLeaf, newLeaf, oldDire
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1137,6 +1135,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
@@ -1188,7 +1187,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, errors.New("can't download - no id")
}
if o.contentType == emptyMimeType {
return io.NopCloser(bytes.NewReader([]byte{})), nil
return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
}
fs.FixRangeOption(options, o.size)
resp, err := o.fs.rpc(ctx, "getFile", params{
@@ -1202,7 +1201,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update the object with the contents of the io.Reader, modTime and size
//
// If existing is set then it updates the object rather than creating a new one.
// If existing is set then it updates the object rather than creating a new one
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {

View File

@@ -28,7 +28,6 @@ import (
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/proxy"
"github.com/rclone/rclone/lib/readers"
)
@@ -49,15 +48,13 @@ func init() {
Description: "FTP",
NewFs: NewFs,
Options: []fs.Option{{
Name: "host",
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
Required: true,
Sensitive: true,
Name: "host",
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
Required: true,
}, {
Name: "user",
Help: "FTP username.",
Default: currentUser,
Sensitive: true,
Name: "user",
Help: "FTP username.",
Default: currentUser,
}, {
Name: "port",
Help: "FTP port number.",
@@ -73,7 +70,7 @@ func init() {
When using implicit FTP over TLS the client connects using TLS
right from the start which breaks compatibility with
non-TLS-aware servers. This is usually served over port 990 rather
than port 21. Cannot be used in combination with explicit FTPS.`,
than port 21. Cannot be used in combination with explicit FTP.`,
Default: false,
}, {
Name: "explicit_tls",
@@ -81,25 +78,11 @@ than port 21. Cannot be used in combination with explicit FTPS.`,
When using explicit FTP over TLS the client explicitly requests
security from the server in order to upgrade a plain text connection
to an encrypted one. Cannot be used in combination with implicit FTPS.`,
to an encrypted one. Cannot be used in combination with implicit FTP.`,
Default: false,
}, {
Name: "concurrency",
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
Note that setting this is very likely to cause deadlocks so it should
be used with care.
If you are doing a sync or copy then make sure concurrency is one more
than the sum of |--transfers| and |--checkers|.
If you use |--check-first| then it just needs to be one more than the
maximum of |--checkers| and |--transfers|.
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
--check-first| or |--checkers 1 --transfers 1|.
`, "|", "`", -1),
Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited.",
Default: 0,
Advanced: true,
}, {
@@ -127,11 +110,6 @@ So for |concurrency 3| you'd use |--checkers 2 --transfers 2
Help: "Use MDTM to set modification time (VsFtpd quirk)",
Default: false,
Advanced: true,
}, {
Name: "force_list_hidden",
Help: "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.",
Default: false,
Advanced: true,
}, {
Name: "idle_timeout",
Default: fs.Duration(60 * time.Second),
@@ -175,18 +153,6 @@ Enabled by default. Use 0 to disable.`,
If this is set and no password is supplied then rclone will ask for a password
`,
Advanced: true,
}, {
Name: "socks_proxy",
Default: "",
Help: `Socks 5 proxy host.
Supports the format user:pass@host:port, user@host:port, host:port.
Example:
myUser:myPass@localhost:9005
`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -225,13 +191,11 @@ type Options struct {
DisableMLSD bool `config:"disable_mlsd"`
DisableUTF8 bool `config:"disable_utf8"`
WritingMDTM bool `config:"writing_mdtm"`
ForceListHidden bool `config:"force_list_hidden"`
IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"`
ShutTimeout fs.Duration `config:"shut_timeout"`
AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"`
SocksProxy string `config:"socks_proxy"`
}
// Fs represents a remote FTP server
@@ -249,6 +213,7 @@ type Fs struct {
pool []*ftp.ServerConn
drain *time.Timer // used to drain the pool when we stop using the connections
tokens *pacer.TokenDispenser
tlsConf *tls.Config
pacer *fs.Pacer // pacer for FTP connections
fGetTime bool // true if the ftp library accepts GetTime
fSetTime bool // true if the ftp library accepts SetTime
@@ -330,118 +295,48 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
return len(p), nil
}
// Return a *textproto.Error if err contains one or nil otherwise
func textprotoError(err error) (errX *textproto.Error) {
if errors.As(err, &errX) {
return errX
}
return nil
}
// returns true if this FTP error should be retried
func isRetriableFtpError(err error) bool {
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusNotAvailable, ftp.StatusTransfertAborted:
return true
}
}
return false
}
// shouldRetry returns a boolean as to whether this err deserve to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if isRetriableFtpError(err) {
return true, err
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusNotAvailable:
return true, err
}
}
return fserrors.ShouldRetry(err), err
}
// Get a TLS config with a unique session cache.
//
// We can't share session caches between connections.
//
// See: https://github.com/rclone/rclone/issues/7234
func (f *Fs) tlsConfig() *tls.Config {
var tlsConfig *tls.Config
if f.opt.TLS || f.opt.ExplicitTLS {
tlsConfig = &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
if f.opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
}
if f.opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
}
return tlsConfig
}
// Open a new connection to the FTP server.
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
fs.Debugf(f, "Connecting to FTP server")
// tls.Config for this connection only. Will be used for data
// and control connections.
tlsConfig := f.tlsConfig()
// Make ftp library dial with fshttp dialer optionally using TLS
initialConnection := true
dial := func(network, address string) (conn net.Conn, err error) {
fs.Debugf(f, "dial(%q,%q)", network, address)
defer func() {
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
}()
baseDialer := fshttp.NewDialer(ctx)
if f.opt.SocksProxy != "" {
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
} else {
conn, err = baseDialer.Dial(network, address)
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
if f.tlsConf != nil && err == nil {
conn = tls.Client(conn, f.tlsConf)
}
if err != nil {
return nil, err
}
// Connect using cleartext only for non TLS
if tlsConfig == nil {
return conn, nil
}
// Initial connection only needs to be cleartext for explicit TLS
if f.opt.ExplicitTLS && initialConnection {
initialConnection = false
return conn, nil
}
// Upgrade connection to TLS
tlsConn := tls.Client(conn, tlsConfig)
// Do the initial handshake - tls.Client doesn't do it for us
// If we do this then connections to proftpd/pureftpd lock up
// See: https://github.com/rclone/rclone/issues/6426
// See: https://github.com/jlaffaye/ftp/issues/282
if false {
err = tlsConn.HandshakeContext(ctx)
if err != nil {
_ = conn.Close()
return nil, err
}
}
return tlsConn, nil
}
ftpConfig := []ftp.DialOption{
ftp.DialWithContext(ctx),
ftp.DialWithDialFunc(dial),
return
}
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
if f.opt.TLS {
// Our dialer takes care of TLS but ftp library also needs tlsConf
// as a trigger for sending PSBZ and PROT options to server.
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
} else if f.opt.ExplicitTLS {
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig))
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
// Initial connection needs to be cleartext for explicit TLS
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
if err != nil {
return nil, err
}
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn))
}
if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
@@ -458,9 +353,6 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.WritingMDTM {
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
}
if f.opt.ForceListHidden {
ftpConfig = append(ftpConfig, ftp.DialWithForceListHidden(true))
}
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
}
@@ -524,7 +416,8 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
*pc = nil
if err != nil {
// If not a regular FTP error code then check the connection
if tpErr := textprotoError(err); tpErr != nil {
var tpErr *textproto.Error
if !errors.As(err, &tpErr) {
nopErr := c.NoOp()
if nopErr != nil {
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
@@ -596,6 +489,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
if opt.TLS && opt.ExplicitTLS {
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
}
var tlsConfig *tls.Config
if opt.TLS || opt.ExplicitTLS {
tlsConfig = &tls.Config{
ServerName: opt.Host,
InsecureSkipVerify: opt.SkipVerifyTLSCert,
}
if opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(opt.TLSCacheSize)
}
if opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
}
u := protocol + path.Join(dialAddr+"/", root)
ci := fs.GetConfig(ctx)
f := &Fs{
@@ -608,11 +514,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
pass: pass,
dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency),
tlsConf: tlsConfig,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
PartialUploads: true,
}).Fill(ctx, f)
// set the pool drainer timer going
if f.opt.IdleTimeout > 0 {
@@ -660,7 +566,8 @@ func (f *Fs) Shutdown(ctx context.Context) error {
// translateErrorFile turns FTP errors into rclone errors if possible for a file
func translateErrorFile(err error) error {
if errX := textprotoError(err); errX != nil {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
err = fs.ErrorObjectNotFound
@@ -671,7 +578,8 @@ func translateErrorFile(err error) error {
// translateErrorDir turns FTP errors into rclone errors if possible for a directory
func translateErrorDir(err error) error {
if errX := textprotoError(err); errX != nil {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
err = fs.ErrorDirNotFound
@@ -702,7 +610,8 @@ func (f *Fs) dirFromStandardPath(dir string) string {
// findItem finds a directory entry for the name in its parent directory
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
if remote == "" || remote == "." || remote == "/" {
fullPath := path.Join(f.root, remote)
if fullPath == "" || fullPath == "." || fullPath == "/" {
// if root, assume exists and synthesize an entry
return &ftp.Entry{
Name: "",
@@ -710,38 +619,13 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
Time: time.Now(),
}, nil
}
dir := path.Dir(fullPath)
base := path.Base(fullPath)
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, fmt.Errorf("findItem: %w", err)
}
// returns TRUE if MLST is supported which is required to call GetEntry
if c.IsTimePreciseInList() {
entry, err := c.GetEntry(f.opt.Enc.FromStandardPath(remote))
f.putFtpConnection(&c, err)
if err != nil {
err = translateErrorFile(err)
if err == fs.ErrorObjectNotFound {
return nil, nil
}
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusBadArguments:
err = nil
}
}
return nil, err
}
if entry != nil {
f.entryToStandard(entry)
}
return entry, nil
}
dir := path.Dir(remote)
base := path.Base(remote)
files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err)
if err != nil {
@@ -760,7 +644,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
entry, err := f.findItem(ctx, path.Join(f.root, remote))
entry, err := f.findItem(ctx, remote)
if err != nil {
return nil, err
}
@@ -782,7 +666,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
// dirExists checks the directory pointed to by remote exists or not
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
entry, err := f.findItem(ctx, path.Join(f.root, remote))
entry, err := f.findItem(ctx, remote)
if err != nil {
return false, fmt.Errorf("dirExists: %w", err)
}
@@ -885,12 +769,11 @@ func (f *Fs) Hashes() hash.Set {
// Precision shows whether modified time is supported or not depending on the
// FTP server capabilities, namely whether FTP server:
// - accepts the MDTM command to get file time (fGetTime)
// or supports MLSD returning precise file time in the list (fLstTime)
// - accepts the MFMT command to set file time (fSetTime)
// or non-standard form of the MDTM command (fSetTime, too)
// used by VsFtpd for the same purpose (WritingMDTM)
//
// - accepts the MDTM command to get file time (fGetTime)
// or supports MLSD returning precise file time in the list (fLstTime)
// - accepts the MFMT command to set file time (fSetTime)
// or non-standard form of the MDTM command (fSetTime, too)
// used by VsFtpd for the same purpose (WritingMDTM)
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
func (f *Fs) Precision() time.Duration {
if (f.fGetTime || f.fLstTime) && f.fSetTime {
@@ -926,18 +809,32 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// getInfo reads the FileInfo for a path
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
file, err := f.findItem(ctx, remote)
dir := path.Dir(remote)
base := path.Base(remote)
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, err
} else if file != nil {
info := &FileInfo{
Name: remote,
Size: file.Size,
ModTime: file.Time,
precise: f.fLstTime,
IsDir: file.Type == ftp.EntryTypeFolder,
return nil, fmt.Errorf("getInfo: %w", err)
}
files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err)
if err != nil {
return nil, translateErrorFile(err)
}
for i := range files {
file := files[i]
f.entryToStandard(file)
if file.Name == base {
info := &FileInfo{
Name: remote,
Size: file.Size,
ModTime: file.Time,
precise: f.fLstTime,
IsDir: file.Type == ftp.EntryTypeFolder,
}
return info, nil
}
return info, nil
}
return nil, fs.ErrorObjectNotFound
}
@@ -968,7 +865,8 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
}
err = c.MakeDir(f.dirFromStandardPath(abspath))
f.putFtpConnection(&c, err)
if errX := textprotoError(err); errX != nil {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
err = nil
@@ -1137,7 +1035,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// SetModTime sets the modification time of the object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if !o.fs.fSetTime {
fs.Debugf(o.fs, "SetModTime is not supported")
fs.Errorf(o.fs, "SetModTime is not supported")
return nil
}
c, err := o.fs.getFtpConnection(ctx)
@@ -1209,7 +1107,8 @@ func (f *ftpReadCloser) Close() error {
// mask the error if it was caused by a premature close
// NB StatusAboutToSend is to work around a bug in pureftpd
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
if errX := textprotoError(err); errX != nil {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
err = nil
@@ -1235,33 +1134,22 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
}
}
}
var (
fd *ftp.Response
c *ftp.ServerConn
)
err = o.fs.pacer.Call(func() (bool, error) {
c, err = o.fs.getFtpConnection(ctx)
if err != nil {
return false, err // getFtpConnection has retries already
}
fd, err = c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
}
return shouldRetry(ctx, err)
})
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
return nil, fmt.Errorf("open: %w", err)
}
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
return nil, fmt.Errorf("open: %w", err)
}
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
return rc, nil
}
// Update the already existing object
//
// Copy the reader into the object updating modTime and size.
// Copy the reader into the object updating modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
@@ -1287,10 +1175,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
// Ignore error 250 here - send by some servers
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusRequestedFileActionOK:
err = nil
if err != nil {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusRequestedFileActionOK:
err = nil
}
}
}
if err != nil {

View File

@@ -34,9 +34,9 @@ func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
// test that big file uploads do not cause network i/o timeout
func (f *Fs) testUploadTimeout(t *testing.T) {
const (
fileSize = 100000000 // 100 MiB
idleTimeout = 1 * time.Second // small because test server is local
maxTime = 10 * time.Second // prevent test hangup
fileSize = 100000000 // 100 MiB
idleTimeout = 40 * time.Millisecond // small because test server is local
maxTime = 10 * time.Second // prevent test hangup
)
if testing.Short() {

View File

@@ -19,8 +19,8 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path"
"strconv"
"strings"
@@ -82,8 +82,7 @@ func init() {
saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials")
anonymous, _ := m.Get("anonymous")
envAuth, _ := m.Get("env_auth")
if saFile != "" || saCreds != "" || anonymous == "true" || envAuth == "true" {
if saFile != "" || saCreds != "" || anonymous == "true" {
return nil, nil
}
return oauthutil.ConfigOut("", &oauthutil.Options{
@@ -91,21 +90,15 @@ func init() {
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "project_number",
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
Sensitive: true,
}, {
Name: "user_project",
Help: "User project.\n\nOptional - needed only for requester pays.",
Sensitive: true,
Name: "project_number",
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
Sensitive: true,
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
}, {
Name: "anonymous",
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
@@ -304,15 +297,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
Value: "DURABLE_REDUCED_AVAILABILITY",
Help: "Durable reduced availability storage class",
}},
}, {
Name: "directory_markers",
Default: false,
Advanced: true,
Help: `Upload an empty object with a trailing slash when a new directory is created
Empty folders are unsupported for bucket based remotes, this option creates an empty
object ending with "/", to persist the folder.
`,
}, {
Name: "no_check_bucket",
Help: `If set, don't attempt to check the bucket exists or create it.
@@ -327,7 +311,7 @@ rclone does if you know the bucket exists already.
Help: `If set this will decompress gzip encoded objects.
It is possible to upload objects to GCS with "Content-Encoding: gzip"
set. Normally rclone will download these files as compressed objects.
set. Normally rclone will download these files files as compressed objects.
If this flag is set then rclone will decompress these files with
"Content-Encoding: gzip" as they are received. This means that rclone
@@ -335,10 +319,6 @@ can't check the size and hash but the file contents will be decompressed.
`,
Advanced: true,
Default: false,
}, {
Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.",
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -346,17 +326,6 @@ can't check the size and hash but the file contents will be decompressed.
Default: (encoder.Base |
encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8),
}, {
Name: "env_auth",
Help: "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
Default: false,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter credentials in the next step.",
}, {
Value: "true",
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
}},
}}...),
})
}
@@ -364,7 +333,6 @@ can't check the size and hash but the file contents will be decompressed.
// Options defines the configuration for this backend
type Options struct {
ProjectNumber string `config:"project_number"`
UserProject string `config:"user_project"`
ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"`
Anonymous bool `config:"anonymous"`
@@ -375,10 +343,7 @@ type Options struct {
StorageClass string `config:"storage_class"`
NoCheckBucket bool `config:"no_check_bucket"`
Decompress bool `config:"decompress"`
Endpoint string `config:"endpoint"`
Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
DirectoryMarkers bool `config:"directory_markers"`
}
// Fs represents a remote storage server
@@ -474,7 +439,7 @@ func parsePath(path string) (root string) {
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
}
@@ -517,7 +482,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// try loading service account credentials from env variable, then from a file
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil {
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
}
@@ -530,11 +495,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
}
} else if opt.EnvAuth {
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
if err != nil {
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
}
} else {
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
if err != nil {
@@ -560,17 +520,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
BucketBased: true,
BucketBasedRootOK: true,
}).Fill(ctx, f)
if opt.DirectoryMarkers {
f.features.CanHaveEmptyDirectories = true
}
// Create a new authorized Drive client.
f.client = oAuthClient
gcsOpts := []option.ClientOption{option.WithHTTPClient(f.client)}
if opt.Endpoint != "" {
gcsOpts = append(gcsOpts, option.WithEndpoint(opt.Endpoint))
}
f.svc, err = storage.NewService(context.Background(), gcsOpts...)
f.svc, err = storage.NewService(context.Background(), option.WithHTTPClient(f.client))
if err != nil {
return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
}
@@ -579,11 +532,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Check to see if the object exists
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
err = f.pacer.Call(func() (bool, error) {
get := f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx)
if f.opt.UserProject != "" {
get = get.UserProject(f.opt.UserProject)
}
_, err = get.Do()
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
return shouldRetry(ctx, err)
})
if err == nil {
@@ -631,7 +580,7 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
//
// dir is the starting directory, "" for root
//
// Set recurse to read sub directories.
// Set recurse to read sub directories
//
// The remote has prefix removed from it and if addBucket is set
// then it adds the bucket to the start.
@@ -643,13 +592,9 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
directory += "/"
}
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
if f.opt.UserProject != "" {
list = list.UserProject(f.opt.UserProject)
}
if !recurse {
list = list.Delimiter("/")
}
foundItems := 0
for {
var objects *storage.Objects
err = f.pacer.Call(func() (bool, error) {
@@ -665,7 +610,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
return err
}
if !recurse {
foundItems += len(objects.Prefixes)
var object storage.Object
for _, remote := range objects.Prefixes {
if !strings.HasSuffix(remote, "/") {
@@ -686,29 +630,22 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
}
}
foundItems += len(objects.Items)
for _, object := range objects.Items {
remote := f.opt.Enc.ToStandardPath(object.Name)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", object.Name)
continue
}
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
// is this a directory marker?
if isDirectory {
// Don't insert the root directory
if remote == directory {
continue
}
// process directory markers as directories
remote = strings.TrimRight(remote, "/")
}
remote = remote[len(prefix):]
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
if addBucket {
remote = path.Join(bucket, remote)
}
err = fn(remote, object, isDirectory)
// is this a directory marker?
if isDirectory {
continue // skip directory marker
}
err = fn(remote, object, false)
if err != nil {
return err
}
@@ -718,17 +655,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
list.PageToken(objects.NextPageToken)
}
if f.opt.DirectoryMarkers && foundItems == 0 && directory != "" {
// Determine whether the directory exists or not by whether it has a marker
_, err := f.readObjectInfo(ctx, bucket, directory)
if err != nil {
if err == fs.ErrorObjectNotFound {
return fs.ErrorDirNotFound
}
return err
}
}
return nil
}
@@ -772,9 +698,6 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
return nil, errors.New("can't list buckets without project number")
}
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
if f.opt.UserProject != "" {
listBuckets = listBuckets.UserProject(f.opt.UserProject)
}
for {
var buckets *storage.Buckets
err = f.pacer.Call(func() (bool, error) {
@@ -875,7 +798,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// Put the object into the bucket
//
// Copy the reader in to the new object which is returned.
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -892,69 +815,10 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
return f.Put(ctx, in, src, options...)
}
// Create directory marker file and parents
func (f *Fs) createDirectoryMarker(ctx context.Context, bucket, dir string) error {
if !f.opt.DirectoryMarkers || bucket == "" {
return nil
}
// Object to be uploaded
o := &Object{
fs: f,
modTime: time.Now(),
}
for {
_, bucketPath := f.split(dir)
// Don't create the directory marker if it is the bucket or at the very root
if bucketPath == "" {
break
}
o.remote = dir + "/"
// Check to see if object already exists
_, err := o.readObjectInfo(ctx)
if err == nil {
return nil
}
// Upload it if not
fs.Debugf(o, "Creating directory marker")
content := io.Reader(strings.NewReader(""))
err = o.Update(ctx, content, o)
if err != nil {
return fmt.Errorf("creating directory marker failed: %w", err)
}
// Now check parent directory exists
dir = path.Dir(dir)
if dir == "/" || dir == "." {
break
}
}
return nil
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
bucket, _ := f.split(dir)
e := f.checkBucket(ctx, bucket)
if e != nil {
return e
}
return f.createDirectoryMarker(ctx, bucket, dir)
}
// mkdirParent creates the parent bucket/directory if it doesn't exist
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
remote = strings.TrimRight(remote, "/")
dir := path.Dir(remote)
if dir == "/" || dir == "." {
dir = ""
}
return f.Mkdir(ctx, dir)
return f.makeBucket(ctx, bucket)
}
// makeBucket creates the bucket if it doesn't exist
@@ -963,11 +827,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
// List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details.
err = f.pacer.Call(func() (bool, error) {
list := f.svc.Objects.List(bucket).MaxResults(1).Context(ctx)
if f.opt.UserProject != "" {
list = list.UserProject(f.opt.UserProject)
}
_, err = list.Do()
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
return shouldRetry(ctx, err)
})
if err == nil {
@@ -1002,11 +862,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
if !f.opt.BucketPolicyOnly {
insertBucket.PredefinedAcl(f.opt.BucketACL)
}
insertBucket = insertBucket.Context(ctx)
if f.opt.UserProject != "" {
insertBucket = insertBucket.UserProject(f.opt.UserProject)
}
_, err = insertBucket.Do()
_, err = insertBucket.Context(ctx).Do()
return shouldRetry(ctx, err)
})
}, nil)
@@ -1026,28 +882,12 @@ func (f *Fs) checkBucket(ctx context.Context, bucket string) error {
// to delete was not empty.
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
bucket, directory := f.split(dir)
// Remove directory marker file
if f.opt.DirectoryMarkers && bucket != "" && dir != "" {
o := &Object{
fs: f,
remote: dir + "/",
}
fs.Debugf(o, "Removing directory marker")
err := o.Remove(ctx)
if err != nil {
return fmt.Errorf("removing directory marker failed: %w", err)
}
}
if bucket == "" || directory != "" {
return nil
}
return f.cache.Remove(bucket, func() error {
return f.pacer.Call(func() (bool, error) {
deleteBucket := f.svc.Buckets.Delete(bucket).Context(ctx)
if f.opt.UserProject != "" {
deleteBucket = deleteBucket.UserProject(f.opt.UserProject)
}
err = deleteBucket.Do()
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
return shouldRetry(ctx, err)
})
})
@@ -1060,16 +900,16 @@ func (f *Fs) Precision() time.Duration {
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote)
err := f.mkdirParent(ctx, remote)
err := f.checkBucket(ctx, dstBucket)
if err != nil {
return nil, err
}
@@ -1093,11 +933,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var rewriteResponse *storage.RewriteResponse
for {
err = f.pacer.Call(func() (bool, error) {
rewriteRequest = rewriteRequest.Context(ctx)
if f.opt.UserProject != "" {
rewriteRequest.UserProject(f.opt.UserProject)
}
rewriteResponse, err = rewriteRequest.Do()
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
return shouldRetry(ctx, err)
})
if err != nil {
@@ -1207,17 +1043,8 @@ func (o *Object) setMetaData(info *storage.Object) {
// readObjectInfo reads the definition for an object
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
bucket, bucketPath := o.split()
return o.fs.readObjectInfo(ctx, bucket, bucketPath)
}
// readObjectInfo reads the definition for an object
func (f *Fs) readObjectInfo(ctx context.Context, bucket, bucketPath string) (object *storage.Object, err error) {
err = f.pacer.Call(func() (bool, error) {
get := f.svc.Objects.Get(bucket, bucketPath).Context(ctx)
if f.opt.UserProject != "" {
get = get.UserProject(f.opt.UserProject)
}
object, err = get.Do()
err = o.fs.pacer.Call(func() (bool, error) {
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
return shouldRetry(ctx, err)
})
if err != nil {
@@ -1289,11 +1116,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
if !o.fs.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
}
copyObject = copyObject.Context(ctx)
if o.fs.opt.UserProject != "" {
copyObject = copyObject.UserProject(o.fs.opt.UserProject)
}
newObject, err = copyObject.Do()
newObject, err = copyObject.Context(ctx).Do()
return shouldRetry(ctx, err)
})
if err != nil {
@@ -1310,9 +1133,6 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.fs.opt.UserProject != "" {
o.url = o.url + "&userProject=" + o.fs.opt.UserProject
}
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
if err != nil {
return nil, err
@@ -1356,14 +1176,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
bucket, bucketPath := o.split()
// Create parent dir/bucket if not saving directory marker
if !strings.HasSuffix(o.remote, "/") {
err = o.fs.mkdirParent(ctx, o.remote)
if err != nil {
return err
}
err := o.fs.checkBucket(ctx, bucket)
if err != nil {
return err
}
modTime := src.ModTime(ctx)
@@ -1408,11 +1225,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if !o.fs.opt.BucketPolicyOnly {
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
}
insertObject = insertObject.Context(ctx)
if o.fs.opt.UserProject != "" {
insertObject = insertObject.UserProject(o.fs.opt.UserProject)
}
newObject, err = insertObject.Do()
newObject, err = insertObject.Context(ctx).Do()
return shouldRetry(ctx, err)
})
if err != nil {
@@ -1427,11 +1240,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
func (o *Object) Remove(ctx context.Context) (err error) {
bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
deleteBucket := o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx)
if o.fs.opt.UserProject != "" {
deleteBucket = deleteBucket.UserProject(o.fs.opt.UserProject)
}
err = deleteBucket.Do()
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
return shouldRetry(ctx, err)
})
return err

View File

@@ -6,7 +6,6 @@ import (
"testing"
"github.com/rclone/rclone/backend/googlecloudstorage"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
@@ -17,17 +16,3 @@ func TestIntegration(t *testing.T) {
NilObject: (*googlecloudstorage.Object)(nil),
})
}
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
name := "TestGoogleCloudStorage"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*googlecloudstorage.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"},
},
})
}

View File

@@ -1,4 +1,3 @@
// Package api provides types used by the Google Photos API.
package api
import (

View File

@@ -29,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/batcher"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
@@ -72,14 +71,6 @@ var (
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
}
// Configure the batcher
defaultBatcherOptions = batcher.Options{
MaxBatchSize: 50,
DefaultTimeoutSync: 1000 * time.Millisecond,
DefaultTimeoutAsync: 10 * time.Second,
DefaultBatchSizeAsync: 50,
}
)
// Register with Fs
@@ -120,7 +111,7 @@ will count towards storage in your Google Account.`)
}
return nil, fmt.Errorf("unknown state %q", config.State)
},
Options: append(append(oauthutil.SharedOptions, []fs.Option{{
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "read_only",
Default: false,
Help: `Set to make the Google Photos backend read only.
@@ -167,7 +158,7 @@ listings and won't be transferred.`,
Default: (encoder.Base |
encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8),
}}...), defaultBatcherOptions.FsOptions("")...),
}}...),
})
}
@@ -178,9 +169,6 @@ type Options struct {
StartYear int `config:"start_year"`
IncludeArchived bool `config:"include_archived"`
Enc encoder.MultiEncoder `config:"encoding"`
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
}
// Fs represents a remote storage server
@@ -190,7 +178,7 @@ type Fs struct {
opt Options // parsed options
features *fs.Features // optional features
unAuth *rest.Client // unauthenticated http client
srv *rest.Client // the connection to the server
srv *rest.Client // the connection to the one drive server
ts *oauthutil.TokenSource // token source for oauth2
pacer *fs.Pacer // To pace the API calls
startTime time.Time // time Fs was started - used for datestamps
@@ -199,7 +187,6 @@ type Fs struct {
uploadedMu sync.Mutex // to protect the below
uploaded dirtree.DirTree // record of uploaded items
createMu sync.Mutex // held when creating albums to prevent dupes
batcher *batcher.Batcher[uploadedItem, *api.MediaItem]
}
// Object describes a storage object
@@ -325,14 +312,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
albums: map[bool]*albums{},
uploaded: dirtree.New(),
}
batcherOptions := defaultBatcherOptions
batcherOptions.Mode = f.opt.BatchMode
batcherOptions.Size = f.opt.BatchSize
batcherOptions.Timeout = time.Duration(f.opt.BatchTimeout)
f.batcher, err = batcher.New(ctx, f, f.commitBatch, batcherOptions)
if err != nil {
return nil, err
}
f.features = (&fs.Features{
ReadMimeType: true,
}).Fill(ctx, f)
@@ -682,7 +661,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Put the object into the bucket
//
// Copy the reader in to the new object which is returned.
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -802,13 +781,6 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
f.batcher.Shutdown()
return nil
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -989,82 +961,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return resp.Body, err
}
// input to the batcher
type uploadedItem struct {
AlbumID string // desired album
UploadToken string // upload ID
}
// Commit a batch of items to albumID returning the errors in errors
func (f *Fs) commitBatchAlbumID(ctx context.Context, items []uploadedItem, results []*api.MediaItem, errors []error, albumID string) {
// Create the media item from an UploadToken, optionally adding to an album
opts := rest.Opts{
Method: "POST",
Path: "/mediaItems:batchCreate",
}
var request = api.BatchCreateRequest{
AlbumID: albumID,
}
itemsInBatch := 0
for i := range items {
if items[i].AlbumID == albumID {
request.NewMediaItems = append(request.NewMediaItems, api.NewMediaItem{
SimpleMediaItem: api.SimpleMediaItem{
UploadToken: items[i].UploadToken,
},
})
itemsInBatch++
}
}
var result api.BatchCreateResponse
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
err = fmt.Errorf("failed to create media item: %w", err)
}
if err == nil && len(result.NewMediaItemResults) != itemsInBatch {
err = fmt.Errorf("bad response to BatchCreate expecting %d items but got %d", itemsInBatch, len(result.NewMediaItemResults))
}
j := 0
for i := range items {
if items[i].AlbumID == albumID {
if err == nil {
media := &result.NewMediaItemResults[j]
if media.Status.Code != 0 {
errors[i] = fmt.Errorf("upload failed: %s (%d)", media.Status.Message, media.Status.Code)
} else {
results[i] = &media.MediaItem
}
} else {
errors[i] = err
}
j++
}
}
}
// Called by the batcher to commit a batch
func (f *Fs) commitBatch(ctx context.Context, items []uploadedItem, results []*api.MediaItem, errors []error) (err error) {
// Discover all the AlbumIDs as we have to upload these separately
//
// Should maybe have one batcher per AlbumID
albumIDs := map[string]struct{}{}
for i := range items {
albumIDs[items[i].AlbumID] = struct{}{}
}
// batch the albums
for albumID := range albumIDs {
// errors returned in errors
f.commitBatchAlbumID(ctx, items, results, errors, albumID)
}
return nil
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
@@ -1125,26 +1021,37 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errors.New("empty upload token")
}
uploaded := uploadedItem{
AlbumID: albumID,
UploadToken: uploadToken,
// Create the media item from an UploadToken, optionally adding to an album
opts = rest.Opts{
Method: "POST",
Path: "/mediaItems:batchCreate",
}
// Save the upload into an album
var info *api.MediaItem
if o.fs.batcher.Batching() {
info, err = o.fs.batcher.Commit(ctx, o.remote, uploaded)
} else {
errors := make([]error, 1)
results := make([]*api.MediaItem, 1)
err = o.fs.commitBatch(ctx, []uploadedItem{uploaded}, results, errors)
if err != nil {
err = errors[0]
info = results[0]
}
var request = api.BatchCreateRequest{
AlbumID: albumID,
NewMediaItems: []api.NewMediaItem{
{
SimpleMediaItem: api.SimpleMediaItem{
UploadToken: uploadToken,
},
},
},
}
o.setMetaData(info)
var result api.BatchCreateResponse
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return fmt.Errorf("failed to create media item: %w", err)
}
if len(result.NewMediaItemResults) != 1 {
return errors.New("bad response to BatchCreate wrong number of items")
}
mediaItemResult := result.NewMediaItemResults[0]
if mediaItemResult.Status.Code != 0 {
return fmt.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
}
o.setMetaData(&mediaItemResult.MediaItem)
// Add upload to internal storage
if pattern.isUpload {

View File

@@ -3,7 +3,7 @@ package googlephotos
import (
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"path"
"testing"
@@ -12,6 +12,7 @@ import (
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
@@ -55,7 +56,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()
@@ -98,7 +99,7 @@ func TestIntegration(t *testing.T) {
t.Run("ObjectOpen", func(t *testing.T) {
in, err := dstObj.Open(ctx)
require.NoError(t, err)
buf, err := io.ReadAll(in)
buf, err := ioutil.ReadAll(in)
require.NoError(t, err)
require.NoError(t, in.Close())
assert.True(t, len(buf) > 1000)
@@ -220,7 +221,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()

View File

@@ -315,7 +315,7 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
// featureFilter creates a filter for the Feature enum
//
// The API only supports one feature, FAVORITES, so hardcode that feature.
// The API only supports one feature, FAVORITES, so hardcode that feature
//
// https://developers.google.com/photos/library/reference/rest/v1/mediaItems/search#FeatureFilter
func featureFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter) {

View File

@@ -161,7 +161,7 @@ func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bo
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
}
accounting.Stats(ctx).NewCheckingTransfer(obj, "importing").Done(ctx, err)
accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
doneCount++
}
})

View File

@@ -166,7 +166,6 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)

View File

@@ -23,7 +23,6 @@ func TestIntegration(t *testing.T) {
NilObject: (*hasher.Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"OpenChunkWriter",
},
UnimplementableObjectMethods: []string{},
}

View File

@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"path"
"time"
@@ -117,7 +118,7 @@ func (o *Object) updateHashes(ctx context.Context) error {
defer func() {
_ = r.Close()
}()
if _, err = io.Copy(io.Discard, r); err != nil {
if _, err = io.Copy(ioutil.Discard, r); err != nil {
fs.Infof(o, "update failed (copy): %v", err)
return err
}

View File

@@ -21,7 +21,6 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/pacer"
)
// Fs represents a HDFS server
@@ -32,15 +31,8 @@ type Fs struct {
opt Options // options for this backend
ci *fs.ConfigInfo // global config
client *hdfs.Client
pacer *fs.Pacer // pacer for API calls
}
const (
minSleep = 20 * time.Millisecond
maxSleep = 10 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
func getKerberosClient() (*krb.Client, error) {
configPath := os.Getenv("KRB5_CONFIG")
@@ -122,7 +114,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt: *opt,
ci: fs.GetConfig(ctx),
client: client,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
@@ -274,9 +265,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//

View File

@@ -1,7 +1,6 @@
//go:build !plan9
// +build !plan9
// Package hdfs provides an interface to the HDFS storage system.
package hdfs
import (
@@ -19,10 +18,9 @@ func init() {
Description: "Hadoop distributed file system",
NewFs: NewFs,
Options: []fs.Option{{
Name: "namenode",
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
Required: true,
Sensitive: true,
Name: "namenode",
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
Required: true,
}, {
Name: "username",
Help: "Hadoop user name.",
@@ -30,7 +28,6 @@ func init() {
Value: "root",
Help: "Connect to hdfs as root.",
}},
Sensitive: true,
}, {
Name: "service_principal_name",
Help: `Kerberos service principal name for the namenode.
@@ -38,16 +35,15 @@ func init() {
Enables KERBEROS authentication. Specifies the Service Principal Name
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
Advanced: true,
Sensitive: true,
Advanced: true,
}, {
Name: "data_transfer_protection",
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
Specifies whether or not authentication, data signature integrity
checks, and wire encryption are required when communicating with
the datanodes. Possible values are 'authentication', 'integrity'
and 'privacy'. Used only with KERBEROS enabled.`,
checks, and wire encryption is required when communicating the the
datanodes. Possible values are 'authentication', 'integrity' and
'privacy'. Used only with KERBEROS enabled.`,
Examples: []fs.OptionExample{{
Value: "privacy",
Help: "Ensure authentication, integrity and encryption enabled.",

View File

@@ -5,12 +5,10 @@ package hdfs
import (
"context"
"errors"
"io"
"path"
"time"
"github.com/colinmarc/hdfs/v2"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers"
@@ -108,7 +106,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update object
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
realpath := o.fs.realpath(o.remote)
realpath := o.fs.realpath(src.Remote())
dirname := path.Dir(realpath)
fs.Debugf(o.fs, "update [%s]", realpath)
@@ -143,23 +141,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
// If the datanodes have acknowledged all writes but not yet
// to the namenode, FileWriter.Close can return ErrReplicating
// (wrapped in an os.PathError). This indicates that all data
// has been written, but the lease is still open for the file.
//
// It is safe in this case to either ignore the error (and let
// the lease expire on its own) or to call Close multiple
// times until it completes without an error. The Java client,
// for context, always chooses to retry, with exponential
// backoff.
err = o.fs.pacer.Call(func() (bool, error) {
err := out.Close()
if err == nil {
return false, nil
}
return errors.Is(err, hdfs.ErrReplicating), err
})
err = out.Close()
if err != nil {
cleanup()
return err

View File

@@ -294,6 +294,15 @@ func (f *Fs) copyOrMove(ctx context.Context, isDirectory bool, operationType Cop
return &result, nil
}
// copyDirectory moves the directory at the source-path to the destination-path and
// returns the resulting api-object if successful.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
func (f *Fs) copyDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
return f.copyOrMove(ctx, true, CopyOriginalPreserveModTime, source, destination, onExist)
}
// moveDirectory moves the directory at the source-path to the destination-path and
// returns the resulting api-object if successful.
//

View File

@@ -2,7 +2,7 @@
package hidrive
// FIXME HiDrive only supports file or folder names of 255 characters or less.
// Operations that create files or folders with longer names will throw an HTTP error:
// Operations that create files oder folder with longer names will throw a HTTP error:
// - 422 Unprocessable Entity
// A more graceful way for rclone to handle this may be desirable.
@@ -330,7 +330,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, transaction)
}
// Do not allow the root-prefix to be nonexistent nor a directory,
// Do not allow the root-prefix to be non-existent nor a directory,
// but it can be empty.
if f.opt.RootPrefix != "" {
item, err := f.fetchMetadataForPath(ctx, f.opt.RootPrefix, api.HiDriveObjectNoMetadataFields)
@@ -338,7 +338,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, fmt.Errorf("could not access root-prefix: %w", err)
}
if item.Type != api.HiDriveObjectTypeDirectory {
return nil, errors.New("the root-prefix needs to point to a valid directory or be empty")
return nil, errors.New("The root-prefix needs to point to a valid directory or be empty")
}
}
@@ -623,7 +623,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// should be retried after the parent-directories of the destination have been created.
// If so, it will create the parent-directories.
//
// If any errors arise while finding the source or
// If any errors arrise while finding the source or
// creating the parent-directory those will be returned.
// Otherwise returns the originalError.
func (f *Fs) shouldRetryAndCreateParents(ctx context.Context, destinationPath string, sourcePath string, originalError error) (bool, error) {
@@ -961,7 +961,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} else {
_, _, err = o.fs.uploadFileChunked(ctx, resolvedPath, in, modTime, int(o.fs.opt.UploadChunkSize), o.fs.opt.UploadConcurrency)
}
// Try to check if object was updated, either way.
// Try to check if object was updated, eitherway.
// Metadata should be updated even if the upload fails.
info, metaErr = o.fs.fetchMetadataForPath(ctx, resolvedPath, api.HiDriveObjectWithMetadataFields)
} else {

View File

@@ -138,7 +138,7 @@ var testTable = []struct {
// pattern describes how to use data to construct the hash-input.
// For every entry n at even indices this repeats the data n times.
// For every entry m at odd indices this repeats a null-byte m times.
// The input-data is constructed by concatenating the results in order.
// The input-data is constructed by concatinating the results in order.
pattern []int64
out []byte
name string

View File

@@ -1,4 +1,3 @@
// Package internal provides utilities for HiDrive.
package internal
import (

View File

@@ -13,6 +13,7 @@ import (
"net/http"
"net/url"
"path"
"strconv"
"strings"
"sync"
"time"
@@ -304,7 +305,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
fs: f,
remote: remote,
}
err := o.head(ctx)
err := o.stat(ctx)
if err != nil {
return nil, err
}
@@ -316,6 +317,15 @@ func (f *Fs) url(remote string) string {
return f.endpointURL + rest.URLPathEscape(remote)
}
// parse s into an int64, on failure return def
func parseInt64(s string, def int64) int64 {
n, e := strconv.ParseInt(s, 10, 64)
if e != nil {
return def
}
return n
}
// Errors returned by parseName
var (
errURLJoinFailed = errors.New("URLJoin failed")
@@ -490,12 +500,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fs: f,
remote: remote,
}
switch err := file.head(ctx); err {
switch err := file.stat(ctx); err {
case nil:
add(file)
case fs.ErrorNotAFile:
// ...found a directory not a file
add(fs.NewDir(remote, time.Time{}))
add(fs.NewDir(remote, timeUnset))
default:
fs.Debugf(remote, "skipping because of error: %v", err)
}
@@ -507,7 +517,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
name = strings.TrimRight(name, "/")
remote := path.Join(dir, name)
if isDir {
add(fs.NewDir(remote, time.Time{}))
add(fs.NewDir(remote, timeUnset))
} else {
in <- remote
}
@@ -569,8 +579,8 @@ func (o *Object) url() string {
return o.fs.url(o.remote)
}
// head sends a HEAD request to update info fields in the Object
func (o *Object) head(ctx context.Context) error {
// stat updates the info field in the Object
func (o *Object) stat(ctx context.Context) error {
if o.fs.opt.NoHead {
o.size = -1
o.modTime = timeUnset
@@ -591,19 +601,13 @@ func (o *Object) head(ctx context.Context) error {
if err != nil {
return fmt.Errorf("failed to stat: %w", err)
}
return o.decodeMetadata(ctx, res)
}
// decodeMetadata updates info fields in the Object according to HTTP response headers
func (o *Object) decodeMetadata(ctx context.Context, res *http.Response) error {
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil {
t = timeUnset
}
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
o.modTime = t
o.contentType = res.Header.Get("Content-Type")
o.size = rest.ParseSizeFromHeaders(res.Header)
// If NoSlash is set then check ContentType to see if it is a directory
if o.fs.opt.NoSlash {
mediaType, _, err := mime.ParseMediaType(o.contentType)
@@ -649,9 +653,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if err != nil {
return nil, fmt.Errorf("Open failed: %w", err)
}
if err = o.decodeMetadata(ctx, res); err != nil {
return nil, fmt.Errorf("decodeMetadata failed: %w", err)
}
return res.Body, nil
}

View File

@@ -3,7 +3,7 @@ package http
import (
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
@@ -33,21 +33,20 @@ var (
lineEndSize = 1
)
// prepareServer prepares the test server and shuts it down automatically
// when the test completes.
func prepareServer(t *testing.T) configmap.Simple {
// prepareServer the test server and return a function to tidy it up afterwards
func prepareServer(t *testing.T) (configmap.Simple, func()) {
// file server for test/files
fileServer := http.FileServer(http.Dir(filesPath))
// verify the file path is correct, and also check which line endings
// are used to get sizes right ("\n" except on Windows, but even there
// we may have "\n" or "\r\n" depending on git crlf setting)
fileList, err := os.ReadDir(filesPath)
fileList, err := ioutil.ReadDir(filesPath)
require.NoError(t, err)
require.Greater(t, len(fileList), 0)
for _, file := range fileList {
if !file.IsDir() {
data, _ := os.ReadFile(filepath.Join(filesPath, file.Name()))
data, _ := ioutil.ReadFile(filepath.Join(filesPath, file.Name()))
if strings.HasSuffix(string(data), "\r\n") {
lineEndSize = 2
}
@@ -79,21 +78,20 @@ func prepareServer(t *testing.T) configmap.Simple {
"url": ts.URL,
"headers": strings.Join(headers, ","),
}
t.Cleanup(ts.Close)
return m
// return a function to tidy up
return m, ts.Close
}
// prepare prepares the test server and shuts it down automatically
// when the test completes.
func prepare(t *testing.T) fs.Fs {
m := prepareServer(t)
// prepare the test server and return a function to tidy it up afterwards
func prepare(t *testing.T) (fs.Fs, func()) {
m, tidy := prepareServer(t)
// Instantiate it
f, err := NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err)
return f
return f, tidy
}
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
@@ -136,19 +134,22 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
}
func TestListRoot(t *testing.T) {
f := prepare(t)
f, tidy := prepare(t)
defer tidy()
testListRoot(t, f, false)
}
func TestListRootNoSlash(t *testing.T) {
f := prepare(t)
f, tidy := prepare(t)
f.(*Fs).opt.NoSlash = true
defer tidy()
testListRoot(t, f, true)
}
func TestListSubDir(t *testing.T) {
f := prepare(t)
f, tidy := prepare(t)
defer tidy()
entries, err := f.List(context.Background(), "three")
require.NoError(t, err)
@@ -165,7 +166,8 @@ func TestListSubDir(t *testing.T) {
}
func TestNewObject(t *testing.T) {
f := prepare(t)
f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err)
@@ -192,69 +194,36 @@ func TestNewObject(t *testing.T) {
}
func TestOpen(t *testing.T) {
m := prepareServer(t)
f, tidy := prepare(t)
defer tidy()
for _, head := range []bool{false, true} {
if !head {
m.Set("no_head", "true")
}
f, err := NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err)
o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err)
for _, rangeRead := range []bool{false, true} {
o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err)
if !head {
// Test mod time is still indeterminate
tObj := o.ModTime(context.Background())
assert.Equal(t, time.Duration(0), time.Unix(0, 0).Sub(tObj))
// Test file size is still indeterminate
assert.Equal(t, int64(-1), o.Size())
}
var data []byte
if !rangeRead {
// Test normal read
fd, err := o.Open(context.Background())
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
if lineEndSize == 2 {
assert.Equal(t, "beetroot\r\n", string(data))
} else {
assert.Equal(t, "beetroot\n", string(data))
}
} else {
// Test with range request
fd, err := o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "eetro", string(data))
}
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err)
tFile := fi.ModTime()
// Test the time is always correct on the object after file open
tObj := o.ModTime(context.Background())
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
if !rangeRead {
// Test the file size
assert.Equal(t, int64(len(data)), o.Size())
}
}
// Test normal read
fd, err := o.Open(context.Background())
require.NoError(t, err)
data, err := ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
if lineEndSize == 2 {
assert.Equal(t, "beetroot\r\n", string(data))
} else {
assert.Equal(t, "beetroot\n", string(data))
}
// Test with range request
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "eetro", string(data))
}
func TestMimeType(t *testing.T) {
f := prepare(t)
f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err)
@@ -265,7 +234,8 @@ func TestMimeType(t *testing.T) {
}
func TestIsAFileRoot(t *testing.T) {
m := prepareServer(t)
m, tidy := prepareServer(t)
defer tidy()
f, err := NewFs(context.Background(), remoteName, "one%.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)
@@ -274,7 +244,8 @@ func TestIsAFileRoot(t *testing.T) {
}
func TestIsAFileSubDir(t *testing.T) {
m := prepareServer(t)
m, tidy := prepareServer(t)
defer tidy()
f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)

62
backend/hubic/auth.go Normal file
View File

@@ -0,0 +1,62 @@
package hubic
import (
"context"
"net/http"
"time"
"github.com/ncw/swift/v2"
"github.com/rclone/rclone/fs"
)
// auth is an authenticator for swift
type auth struct {
f *Fs
}
// newAuth creates a swift authenticator
func newAuth(f *Fs) *auth {
return &auth{
f: f,
}
}
// Request constructs an http.Request for authentication
//
// returns nil for not needed
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
const retries = 10
for try := 1; try <= retries; try++ {
err = a.f.getCredentials(context.TODO())
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
}
return nil, err
}
// Response parses the result of an http request
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
return nil
}
// The public storage URL - set Internal to true to read
// internal/service net URL
func (a *auth) StorageUrl(Internal bool) string { // nolint
return a.f.credentials.Endpoint
}
// The access token
func (a *auth) Token() string {
return a.f.credentials.Token
}
// The CDN url if available
func (a *auth) CdnUrl() string { // nolint
return ""
}
// Check the interfaces are satisfied
var _ swift.Authenticator = (*auth)(nil)

200
backend/hubic/hubic.go Normal file
View File

@@ -0,0 +1,200 @@
// Package hubic provides an interface to the Hubic object storage
// system.
package hubic
// This uses the normal swift mechanism to update the credentials and
// ignores the expires field returned by the Hubic API. This may need
// to be revisited after some actual experience.
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
swiftLib "github.com/ncw/swift/v2"
"github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/oauthutil"
"golang.org/x/oauth2"
)
const (
rcloneClientID = "api_hubic_svWP970PvSWbw5G3PzrAqZ6X2uHeZBPI"
rcloneEncryptedClientSecret = "leZKCcqy9movLhDWLVXX8cSLp_FzoiAPeEJOIOMRw1A5RuC4iLEPDYPWVF46adC_MVonnLdVEOTHVstfBOZ_lY4WNp8CK_YWlpRZ9diT5YI"
)
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauth2.Config{
Scopes: []string{
"credentials.r", // Read OpenStack credentials
},
Endpoint: oauth2.Endpoint{
AuthURL: "https://api.hubic.com/oauth/auth/",
TokenURL: "https://api.hubic.com/oauth/token/",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "hubic",
Description: "Hubic",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
},
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
})
}
// credentials is the JSON returned from the Hubic API to read the
// OpenStack credentials
type credentials struct {
Token string `json:"token"` // OpenStack token
Endpoint string `json:"endpoint"` // OpenStack endpoint
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00"
}
// Fs represents a remote hubic
type Fs struct {
fs.Fs // wrapped Fs
features *fs.Features // optional features
client *http.Client // client for oauth api
credentials credentials // returned from the Hubic API
expires time.Time // time credentials expire
}
// Object describes a swift object
type Object struct {
*swift.Object
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Object.String()
}
// ------------------------------------------------------------
// String converts this Fs to a string
func (f *Fs) String() string {
if f.Fs == nil {
return "Hubic"
}
return fmt.Sprintf("Hubic %s", f.Fs.String())
}
// getCredentials reads the OpenStack Credentials using the Hubic API
//
// The credentials are read into the Fs
func (f *Fs) getCredentials(ctx context.Context) (err error) {
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
if err != nil {
return err
}
resp, err := f.client.Do(req)
if err != nil {
return err
}
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode < 200 || resp.StatusCode > 299 {
body, _ := ioutil.ReadAll(resp.Body)
bodyStr := strings.TrimSpace(strings.ReplaceAll(string(body), "\n", " "))
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
}
decoder := json.NewDecoder(resp.Body)
var result credentials
err = decoder.Decode(&result)
if err != nil {
return err
}
// fs.Debugf(f, "Got credentials %+v", result)
if result.Token == "" || result.Endpoint == "" || result.Expires == "" {
return errors.New("couldn't read token, result and expired from credentials")
}
f.credentials = result
expires, err := time.Parse(time.RFC3339, result.Expires)
if err != nil {
return err
}
f.expires = expires
fs.Debugf(f, "Got swift credentials (expiry %v in %v)", f.expires, time.Until(f.expires))
return nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, fmt.Errorf("failed to configure Hubic: %w", err)
}
f := &Fs{
client: client,
}
// Make the swift Connection
ci := fs.GetConfig(ctx)
c := &swiftLib.Connection{
Auth: newAuth(f),
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(ctx),
}
err = c.Authenticate(ctx)
if err != nil {
return nil, fmt.Errorf("error authenticating swift connection: %w", err)
}
// Parse config into swift.Options struct
opt := new(swift.Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Make inner swift Fs from the connection
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true)
if err != nil && err != fs.ErrorIsFile {
return nil, err
}
f.Fs = swiftFs
f.features = f.Fs.Features().Wrap(f)
return f, err
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.Fs
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
)

View File

@@ -0,0 +1,19 @@
// Test Hubic filesystem interface
package hubic_test
import (
"testing"
"github.com/rclone/rclone/backend/hubic"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestHubic:",
NilObject: (*hubic.Object)(nil),
SkipFsCheckWrap: true,
SkipObjectCheckWrap: true,
})
}

View File

@@ -133,13 +133,11 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
},
Options: []fs.Option{{
Name: "access_key_id",
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
Sensitive: true,
Name: "access_key_id",
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
}, {
Name: "secret_access_key",
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
Sensitive: true,
Name: "secret_access_key",
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
}, {
// their official client (https://github.com/jjjake/internetarchive) hardcodes following the two
Name: "endpoint",
@@ -229,7 +227,7 @@ type Object struct {
rawData json.RawMessage
}
// IAFile represents a subset of object in MetadataResponse.Files
// IAFile reprensents a subset of object in MetadataResponse.Files
type IAFile struct {
Name string `json:"name"`
// Source string `json:"source"`
@@ -245,7 +243,7 @@ type IAFile struct {
rawData json.RawMessage
}
// MetadataResponse represents subset of the JSON object returned by (frontend)/metadata/
// MetadataResponse reprensents subset of the JSON object returned by (frontend)/metadata/
type MetadataResponse struct {
Files []IAFile `json:"files"`
ItemSize int64 `json:"item_size"`
@@ -579,9 +577,9 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1275,7 +1273,7 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string {
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
}
// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape
// mimicks urllib.parse.quote() on Python; exclude / from url.PathEscape
func quotePath(s string) string {
seg := strings.Split(s, "/")
newValues := []string{}

View File

@@ -1,4 +1,3 @@
// Package api provides types used by the Jottacloud API.
package api
import (
@@ -395,7 +394,7 @@ type JottaFile struct {
State string `xml:"currentRevision>state"`
CreatedAt JottaTime `xml:"currentRevision>created"`
ModifiedAt JottaTime `xml:"currentRevision>modified"`
UpdatedAt JottaTime `xml:"currentRevision>updated"`
Updated JottaTime `xml:"currentRevision>updated"`
Size int64 `xml:"currentRevision>size"`
MimeType string `xml:"currentRevision>mime"`
MD5 string `xml:"currentRevision>md5"`

View File

@@ -1,4 +1,3 @@
// Package jottacloud provides an interface to the Jottacloud storage system.
package jottacloud
import (
@@ -12,6 +11,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
@@ -67,21 +67,13 @@ const (
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
legacyConfigVersion = 0
teliaseCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
teliaseCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
teliaseCloudClientID = "desktop"
telianoCloudTokenURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/token"
telianoCloudAuthURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/auth"
telianoCloudClientID = "desktop"
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
teliaCloudClientID = "desktop"
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
tele2CloudClientID = "desktop"
onlimeCloudTokenURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/token"
onlimeCloudAuthURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/auth"
onlimeCloudClientID = "desktop"
)
// Register with Fs
@@ -92,34 +84,7 @@ func init() {
Description: "Jottacloud",
NewFs: NewFs,
Config: Config,
MetadataInfo: &fs.MetadataInfo{
Help: `Jottacloud has limited support for metadata, currently an extended set of timestamps.`,
System: map[string]fs.MetadataHelp{
"btime": {
Help: "Time of file birth (creation), read from rclone metadata",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
},
"mtime": {
Help: "Time of last modification, read from rclone metadata",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
},
"utime": {
Help: "Time of last upload, when current revision was created, generated by backend",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
ReadOnly: true,
},
"content-type": {
Help: "MIME type, also known as media type",
Type: "string",
Example: "text/plain",
ReadOnly: true,
},
},
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: "md5_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
Default: fs.SizeSuffix(10 * 1024 * 1024),
@@ -154,7 +119,7 @@ func init() {
Default: (encoder.Display |
encoder.EncodeWin | // :?"*<>|
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}
@@ -169,17 +134,11 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
Value: "legacy",
Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
}, {
Value: "telia_se",
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud (Sweden).",
}, {
Value: "telia_no",
Help: "Telia Sky authentication.\nUse this if you are using Telia Sky (Norway).",
Value: "telia",
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud.",
}, {
Value: "tele2",
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
}, {
Value: "onlime",
Help: "Onlime Cloud authentication.\nUse this if you are using Onlime Cloud.",
}})
case "auth_type_done":
// Jump to next state according to config chosen
@@ -272,32 +231,17 @@ machines.`)
return nil, fmt.Errorf("error while saving token: %w", err)
}
return fs.ConfigGoto("choose_device")
case "telia_se": // telia_se cloud config
case "telia": // telia cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, teliaseCloudClientID)
m.Set(configTokenURL, teliaseCloudTokenURL)
m.Set(configClientID, teliaCloudClientID)
m.Set(configTokenURL, teliaCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: teliaseCloudAuthURL,
TokenURL: teliaseCloudTokenURL,
AuthURL: teliaCloudAuthURL,
TokenURL: teliaCloudTokenURL,
},
ClientID: teliaseCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "telia_no": // telia_no cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, telianoCloudClientID)
m.Set(configTokenURL, telianoCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: telianoCloudAuthURL,
TokenURL: telianoCloudTokenURL,
},
ClientID: telianoCloudClientID,
ClientID: teliaCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
@@ -317,21 +261,6 @@ machines.`)
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "onlime": // onlime cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, onlimeCloudClientID)
m.Set(configTokenURL, onlimeCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: onlimeCloudAuthURL,
TokenURL: onlimeCloudTokenURL,
},
ClientID: onlimeCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "choose_device":
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
Choosing no, the default, will let you access the storage used for the archive
@@ -524,9 +453,7 @@ type Object struct {
remote string
hasMetaData bool
size int64
createTime time.Time
modTime time.Time
updateTime time.Time
md5 string
mimeType string
}
@@ -894,7 +821,7 @@ func (f *Fs) allocatePathRaw(file string, absolute bool) string {
func grantTypeFilter(req *http.Request) {
if legacyTokenURL == req.URL.String() {
// read the entire body
refreshBody, err := io.ReadAll(req.Body)
refreshBody, err := ioutil.ReadAll(req.Body)
if err != nil {
return
}
@@ -904,7 +831,7 @@ func grantTypeFilter(req *http.Request) {
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
// set the new ReadCloser (with a dummy Close())
req.Body = io.NopCloser(bytes.NewReader(refreshBody))
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
}
}
@@ -1001,9 +928,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
CanHaveEmptyDirectories: true,
ReadMimeType: true,
WriteMimeType: false,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: false,
}).Fill(ctx, f)
f.jfsSrv.SetErrorHandler(errorHandler)
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
@@ -1190,7 +1114,6 @@ func parseListRStream(ctx context.Context, r io.Reader, filesystem *Fs, callback
remote: filesystem.opt.Enc.ToStandardPath(path.Join(f.Path, f.Name)),
size: f.Size,
md5: f.Checksum,
createTime: time.Time(f.Created),
modTime: time.Time(f.Modified),
})
}
@@ -1324,7 +1247,7 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
// Put the object
//
// Copy the reader in to the new object which is returned.
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -1420,7 +1343,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// is currently in trash, but can be made to match, it will be
// restored. Returns ErrorObjectNotFound if upload will be necessary
// to get a matching remote file.
func (f *Fs) createOrUpdate(ctx context.Context, file string, createTime time.Time, modTime time.Time, size int64, md5 string) (info *api.JottaFile, err error) {
func (f *Fs) createOrUpdate(ctx context.Context, file string, modTime time.Time, size int64, md5 string) (info *api.JottaFile, err error) {
opts := rest.Opts{
Method: "POST",
Path: f.filePath(file),
@@ -1430,10 +1353,11 @@ func (f *Fs) createOrUpdate(ctx context.Context, file string, createTime time.Ti
opts.Parameters.Set("cphash", "true")
fileDate := api.JottaTime(modTime).String()
opts.ExtraHeaders["JSize"] = strconv.FormatInt(size, 10)
opts.ExtraHeaders["JMd5"] = md5
opts.ExtraHeaders["JCreated"] = api.JottaTime(createTime).String()
opts.ExtraHeaders["JModified"] = api.JottaTime(modTime).String()
opts.ExtraHeaders["JCreated"] = fileDate
opts.ExtraHeaders["JModified"] = fileDate
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
@@ -1473,9 +1397,9 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1493,10 +1417,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
// if destination was a trashed file then after a successful copy the copied file is still in trash (bug in api?)
// if destination was a trashed file then after a successfull copy the copied file is still in trash (bug in api?)
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
info, err = f.createOrUpdate(ctx, remote, srcObj.createTime, srcObj.modTime, srcObj.size, srcObj.md5)
info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5)
}
if err != nil {
@@ -1509,9 +1433,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1759,9 +1683,7 @@ func (o *Object) setMetaData(info *api.JottaFile) (err error) {
o.size = info.Size
o.md5 = info.MD5
o.mimeType = info.MimeType
o.createTime = time.Time(info.CreatedAt)
o.modTime = time.Time(info.ModifiedAt)
o.updateTime = time.Time(info.UpdatedAt)
return nil
}
@@ -1806,7 +1728,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
// (note that if size/md5 does not match, the file content will
// also be modified if deduplication is possible, i.e. it is
// important to use correct/latest values)
_, err = o.fs.createOrUpdate(ctx, o.remote, o.createTime, modTime, o.size, o.md5)
_, err = o.fs.createOrUpdate(ctx, o.remote, modTime, o.size, o.md5)
if err != nil {
if err == fs.ErrorObjectNotFound {
// file was modified (size/md5 changed) between readMetaData and createOrUpdate?
@@ -1866,7 +1788,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
var tempFile *os.File
// create the cache file
tempFile, err = os.CreateTemp("", cachePrefix)
tempFile, err = ioutil.TempFile("", cachePrefix)
if err != nil {
return
}
@@ -1894,7 +1816,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
} else {
// that's a small file, just read it into memory
var inData []byte
inData, err = io.ReadAll(teeReader)
inData, err = ioutil.ReadAll(teeReader)
if err != nil {
return
}
@@ -1907,7 +1829,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
// Update the object with the contents of the io.Reader, modTime and size
//
// If existing is set then it updates the object rather than creating a new one.
// If existing is set then it updates the object rather than creating a new one
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
@@ -1916,12 +1838,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err == nil {
// if the object exists delete it
err = o.remove(ctx, true)
if err != nil && err != fs.ErrorObjectNotFound {
// if delete failed then report that, unless it was because the file did not exist after all
if err != nil {
return fmt.Errorf("failed to remove old object: %w", err)
}
} else if err != fs.ErrorObjectNotFound {
// if the object does not exist we can just continue but if the error is something different we should report that
}
// if the object does not exist we can just continue but if the error is something different we should report that
if err != fs.ErrorObjectNotFound {
return err
}
}
@@ -1943,37 +1865,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Wrap the accounting back onto the stream
in = wrap(in)
}
// Fetch metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, src, options)
if err != nil {
return fmt.Errorf("failed to read metadata from source object: %w", err)
}
var createdTime string
var modTime string
if meta != nil {
if v, ok := meta["btime"]; ok {
t, err := time.Parse(time.RFC3339Nano, v) // metadata stores RFC3339Nano timestamps
if err != nil {
fs.Debugf(o, "failed to parse metadata btime: %q: %v", v, err)
} else {
createdTime = api.Rfc3339Time(t).String() // jottacloud api wants RFC3339 timestamps
}
}
if v, ok := meta["mtime"]; ok {
t, err := time.Parse(time.RFC3339Nano, v)
if err != nil {
fs.Debugf(o, "failed to parse metadata mtime: %q: %v", v, err)
} else {
modTime = api.Rfc3339Time(t).String()
}
}
}
if modTime == "" { // prefer mtime in meta as Modified time, fallback to source ModTime
modTime = api.Rfc3339Time(src.ModTime(ctx)).String()
}
if createdTime == "" { // if no Created time set same as Modified
createdTime = modTime
}
// use the api to allocate the file first and get resume / deduplication info
var resp *http.Response
@@ -1983,12 +1874,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Options: options,
ExtraHeaders: make(map[string]string),
}
fileDate := api.Rfc3339Time(src.ModTime(ctx)).String()
// the allocate request
var request = api.AllocateFileRequest{
Bytes: size,
Created: createdTime,
Modified: modTime,
Created: fileDate,
Modified: fileDate,
Md5: md5String,
Path: o.fs.allocatePathRaw(o.remote, true),
}
@@ -2003,10 +1895,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
// If the file state is INCOMPLETE and CORRUPT, we must upload it.
// Else, if the file state is COMPLETE, we don't need to upload it because
// the content is already there, possibly it was created with deduplication,
// and also any metadata changes are already performed by the allocate request.
// If the file state is INCOMPLETE and CORRUPT, try to upload a then
if response.State != "COMPLETED" {
// how much do we still have to upload?
remainingBytes := size - response.ResumePos
@@ -2024,24 +1913,28 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// copy the already uploaded bytes into the trash :)
var result api.UploadResponse
_, err = io.CopyN(io.Discard, in, response.ResumePos)
_, err = io.CopyN(ioutil.Discard, in, response.ResumePos)
if err != nil {
return err
}
// send the remaining bytes
_, err = o.fs.apiSrv.CallJSON(ctx, &opts, nil, &result)
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, nil, &result)
if err != nil {
return err
}
// Upload response contains main metadata properties (size, md5 and modTime)
// which could be set back to the object, but it does not contain the
// necessary information to set the createTime and updateTime properties,
// so must therefore perform a read instead.
// finally update the meta data
o.hasMetaData = true
o.size = result.Bytes
o.md5 = result.Md5
o.modTime = time.Unix(result.Modified/1000, 0)
} else {
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
return o.readMetaData(ctx, true)
}
// in any case we must update the object meta data
return o.readMetaData(ctx, true)
return nil
}
func (o *Object) remove(ctx context.Context, hard bool) error {
@@ -2058,17 +1951,10 @@ func (o *Object) remove(ctx context.Context, hard bool) error {
opts.Parameters.Set("dl", "true")
}
err := o.fs.pacer.Call(func() (bool, error) {
return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.jfsSrv.CallXML(ctx, &opts, nil, nil)
return shouldRetry(ctx, resp, err)
})
if apiErr, ok := err.(*api.Error); ok {
// attempting to hard delete will fail if path does not exist, but standard delete will succeed
if apiErr.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
}
// Remove an object
@@ -2076,22 +1962,6 @@ func (o *Object) Remove(ctx context.Context) error {
return o.remove(ctx, o.fs.opt.HardDelete)
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
err = o.readMetaData(ctx, false)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return nil, err
}
metadata.Set("btime", o.createTime.Format(time.RFC3339Nano)) // metadata timestamps should be RFC3339Nano
metadata.Set("mtime", o.modTime.Format(time.RFC3339Nano))
metadata.Set("utime", o.updateTime.Format(time.RFC3339Nano))
metadata.Set("content-type", o.mimeType)
return metadata, nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -2106,5 +1976,4 @@ var (
_ fs.CleanUpper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
_ fs.Metadataer = (*Object)(nil)
)

View File

@@ -1,17 +1,11 @@
package jottacloud
import (
"context"
"crypto/md5"
"fmt"
"io"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -46,48 +40,3 @@ func TestReadMD5(t *testing.T) {
})
}
}
func (f *Fs) InternalTestMetadata(t *testing.T) {
ctx := context.Background()
contents := random.String(1000)
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
utime := time.Now()
metadata := fs.Metadata{
"btime": "2009-05-06T04:05:06.499999999Z",
"mtime": "2010-06-07T08:09:07.599999999Z",
//"utime" - read-only
//"content-type" - read-only
}
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, contents, true, "text/html", metadata)
defer func() {
assert.NoError(t, obj.Remove(ctx))
}()
o := obj.(*Object)
gotMetadata, err := o.Metadata(ctx)
require.NoError(t, err)
for k, v := range metadata {
got := gotMetadata[k]
switch k {
case "btime":
assert.True(t, fstest.Time(v).Truncate(f.Precision()).Equal(fstest.Time(got)), fmt.Sprintf("btime not equal want %v got %v", v, got))
case "mtime":
assert.True(t, fstest.Time(v).Truncate(f.Precision()).Equal(fstest.Time(got)), fmt.Sprintf("btime not equal want %v got %v", v, got))
case "utime":
gotUtime := fstest.Time(got)
dt := gotUtime.Sub(utime)
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("utime more than 1 minute out want %v got %v delta %v", utime, gotUtime, dt))
assert.True(t, fstest.Time(v).Equal(fstest.Time(got)))
case "content-type":
assert.True(t, o.MimeType(ctx) == got)
default:
assert.Equal(t, v, got, k)
}
}
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Metadata", f.InternalTestMetadata)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -1,4 +1,3 @@
// Package koofr provides an interface to the Koofr storage system.
package koofr
import (
@@ -61,10 +60,9 @@ func init() {
Default: true,
Advanced: true,
}, {
Name: "user",
Help: "Your user name.",
Required: true,
Sensitive: true,
Name: "user",
Help: "Your user name.",
Required: true,
}, {
Name: "password",
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
@@ -377,7 +375,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for i, file := range files {
remote := path.Join(dir, f.opt.Enc.ToStandardName(file.Name))
if file.Type == "dir" {
entries[i] = fs.NewDir(remote, time.Time{})
entries[i] = fs.NewDir(remote, time.Unix(0, 0))
} else {
entries[i] = &Object{
fs: f,
@@ -669,7 +667,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
//
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
//
// I am not sure about meaning of "path" parameter; in my experiments
// I am not sure about meaning of "path" parameter; in my expriments
// it is always "%2F", and omitting it or putting any other value
// results in 404.
//

View File

@@ -7,13 +7,13 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
"unicode/utf8"
@@ -22,7 +22,6 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
@@ -124,8 +123,8 @@ routine so this flag shouldn't normally be used.`,
Help: `Don't check to see if the files change during upload.
Normally rclone checks the size and modification time of files as they
are being uploaded and aborts with a message which starts "can't copy -
source file is being updated" if the file changes during upload.
are being uploaded and aborts with a message which starts "can't copy
- source file is being updated" if the file changes during upload.
However on some file systems this modification time check may fail (e.g.
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
@@ -235,16 +234,15 @@ type Options struct {
// Fs represents a local filesystem rooted at root
type Fs struct {
name string // the name of the remote
root string // The root directory (OS path)
opt Options // parsed config options
features *fs.Features // optional features
dev uint64 // device number of root node
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
warnedMu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
xattrSupported atomic.Int32 // whether xattrs are supported
name string // the name of the remote
root string // The root directory (OS path)
opt Options // parsed config options
features *fs.Features // optional features
dev uint64 // device number of root node
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
warnedMu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
// do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error)
@@ -267,10 +265,7 @@ type Object struct {
// ------------------------------------------------------------
var (
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
)
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
// NewFs constructs an Fs from the path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
@@ -291,9 +286,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
dev: devUnset,
lstat: os.Lstat,
}
if xattrSupported {
f.xattrSupported.Store(1)
}
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
f.features = (&fs.Features{
CaseInsensitive: f.caseInsensitive(),
@@ -303,8 +295,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
FilterAware: true,
PartialUploads: true,
}).Fill(ctx, f)
if opt.FollowSymlinks {
f.lstat = os.Stat
@@ -315,16 +305,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err == nil {
f.dev = readDevice(fi, f.opt.OneFileSystem)
}
// Check to see if this is a .rclonelink if not found
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
}
if err == nil && f.isRegular(fi.Mode()) {
// Handle the odd case, that a symlink was specified by name without the link suffix
if !hasLinkSuffix && opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
return nil, errLinksNeedsSuffix
}
// It is a file, so use the parent as the root
f.root = filepath.Dir(f.root)
// return an error with an fs which points to the parent
@@ -458,8 +439,6 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
filter, useFilter := filter.GetConfig(ctx), filter.GetUseFilter(ctx)
fsDirPath := f.localPath(dir)
_, err = os.Stat(fsDirPath)
if err != nil {
@@ -510,14 +489,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
continue
}
if fierr != nil {
// Don't report errors on any file names that are excluded
if useFilter {
newRemote := f.cleanRemote(dir, name)
if !filter.IncludeRemote(newRemote) {
continue
}
}
fierr = fmt.Errorf("failed to get info about directory entry %q: %w", namepath, fierr)
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
fs.Errorf(dir, "%v", fierr)
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue
@@ -538,10 +510,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
localPath := filepath.Join(fsDirPath, name)
fi, err = os.Stat(localPath)
// Quietly skip errors on excluded files and directories
if err != nil && useFilter && !filter.IncludeRemote(newRemote) {
continue
}
if os.IsNotExist(err) || isCircularSymlinkError(err) {
// Skip bad symlinks and circular symlinks
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
@@ -566,11 +534,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
newRemote += linkSuffix
}
// Don't include non directory if not included
// we leave directory filtering to the layer above
if useFilter && !filter.IncludeRemote(newRemote) {
continue
}
fso, err := f.newObjectWithInfo(newRemote, fi)
if err != nil {
return nil, err
@@ -642,13 +605,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
//
// If it isn't empty it will return an error
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
localPath := f.localPath(dir)
if fi, err := os.Stat(localPath); err != nil {
return err
} else if !fi.IsDir() {
return fs.ErrorIsFile
}
return os.Remove(localPath)
return os.Remove(f.localPath(dir))
}
// Precision of the file system
@@ -669,7 +626,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
precision = time.Second
// Create temporary file and test it
fd, err := os.CreateTemp("", "rclone")
fd, err := ioutil.TempFile("", "rclone")
if err != nil {
// If failed return 1s
// fmt.Println("Failed to create temp file", err)
@@ -738,9 +695,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1096,7 +1053,7 @@ func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err
if err != nil {
return nil, err
}
return readers.NewLimitedReadCloser(io.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
}
// Open an object for read
@@ -1423,27 +1380,30 @@ func (o *Object) writeMetadata(metadata fs.Metadata) (err error) {
}
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
if runtime.GOOS != "windows" || !strings.HasPrefix(s, "\\") {
if !filepath.IsAbs(s) {
if runtime.GOOS == "windows" {
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
} else {
s = filepath.Clean(s)
}
}
if runtime.GOOS == "windows" {
s = filepath.ToSlash(s)
vol := filepath.VolumeName(s)
s = vol + enc.FromStandardPath(s[len(vol):])
s = filepath.FromSlash(s)
if !noUNC {
// Convert to UNC
s = file.UNCPath(s)
}
return s
}
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
s = enc.FromStandardPath(s)
return s
}

View File

@@ -4,22 +4,18 @@ import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers"
@@ -35,6 +31,7 @@ func TestMain(m *testing.M) {
// Test copy with source file that's updating
func TestUpdatingCheck(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
filePath := "sub dir/local test"
r.WriteFile(filePath, "content", time.Now())
@@ -79,6 +76,7 @@ func TestUpdatingCheck(t *testing.T) {
func TestSymlink(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
f := r.Flocal.(*Fs)
dir := f.root
@@ -147,24 +145,10 @@ func TestSymlink(t *testing.T) {
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
require.Equal(t, fs.ErrorObjectNotFound, err)
// Check that NewFs works with the suffixed version and --links
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
"links": "true",
})
require.Equal(t, fs.ErrorIsFile, err)
require.Equal(t, dir, f2.(*Fs).root)
// Check that NewFs doesn't see the non suffixed version with --links
f2, err = NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"), configmap.Simple{
"links": "true",
})
require.Equal(t, errLinksNeedsSuffix, err)
require.Nil(t, f2)
// Check reading the object
in, err := o.Open(ctx)
require.NoError(t, err)
contents, err := io.ReadAll(in)
contents, err := ioutil.ReadAll(in)
require.NoError(t, err)
require.Equal(t, "file.txt", string(contents))
require.NoError(t, in.Close())
@@ -172,7 +156,7 @@ func TestSymlink(t *testing.T) {
// Check reading the object with range
in, err = o.Open(ctx, &fs.RangeOption{Start: 2, End: 5})
require.NoError(t, err)
contents, err = io.ReadAll(in)
contents, err = ioutil.ReadAll(in)
require.NoError(t, err)
require.Equal(t, "file.txt"[2:5+1], string(contents))
require.NoError(t, in.Close())
@@ -191,6 +175,7 @@ func TestSymlinkError(t *testing.T) {
func TestHashOnUpdate(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt"
when := time.Now()
r.WriteFile(filePath, "content", when)
@@ -205,7 +190,7 @@ func TestHashOnUpdate(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
// Reupload it with different contents but same size and timestamp
// Reupload it with diferent contents but same size and timestamp
var b = bytes.NewBufferString("CONTENT")
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
err = o.Update(ctx, b, src)
@@ -221,6 +206,7 @@ func TestHashOnUpdate(t *testing.T) {
func TestHashOnDelete(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt"
when := time.Now()
r.WriteFile(filePath, "content", when)
@@ -249,6 +235,7 @@ func TestHashOnDelete(t *testing.T) {
func TestMetadata(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "metafile.txt"
when := time.Now()
const dayLength = len("2001-01-01")
@@ -379,179 +366,3 @@ func TestMetadata(t *testing.T) {
})
}
func TestFilter(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
when := time.Now()
r.WriteFile("included", "included file", when)
r.WriteFile("excluded", "excluded file", when)
f := r.Flocal.(*Fs)
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
// Add a filter
ctx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ included"))
require.NoError(t, fi.AddRule("- *"))
// Check listing without use filter flag
entries, err := f.List(ctx, "")
require.NoError(t, err)
sort.Sort(entries)
require.Equal(t, "[excluded included]", fmt.Sprint(entries))
// Add user filter flag
ctx = filter.SetUseFilter(ctx, true)
// Check listing with use filter flag
entries, err = f.List(ctx, "")
require.NoError(t, err)
sort.Sort(entries)
require.Equal(t, "[included]", fmt.Sprint(entries))
}
func testFilterSymlink(t *testing.T, copyLinks bool) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
when := time.Now()
f := r.Flocal.(*Fs)
// Create a file, a directory, a symlink to a file, a symlink to a directory and a dangling symlink
r.WriteFile("included.file", "included file", when)
r.WriteFile("included.dir/included.sub.file", "included sub file", when)
require.NoError(t, os.Symlink("included.file", filepath.Join(r.LocalName, "included.file.link")))
require.NoError(t, os.Symlink("included.dir", filepath.Join(r.LocalName, "included.dir.link")))
require.NoError(t, os.Symlink("dangling", filepath.Join(r.LocalName, "dangling.link")))
defer func() {
// Reset -L/-l mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = false
f.lstat = os.Lstat
}()
if copyLinks {
// Set fs into "-L" mode
f.opt.FollowSymlinks = true
f.opt.TranslateSymlinks = false
f.lstat = os.Stat
} else {
// Set fs into "-l" mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = true
f.lstat = os.Lstat
}
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
// Reset global error count
accounting.Stats(ctx).ResetErrors()
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
// Add a filter
ctx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ included.file"))
require.NoError(t, fi.AddRule("+ included.dir/**"))
if copyLinks {
require.NoError(t, fi.AddRule("+ included.file.link"))
require.NoError(t, fi.AddRule("+ included.dir.link/**"))
} else {
require.NoError(t, fi.AddRule("+ included.file.link.rclonelink"))
require.NoError(t, fi.AddRule("+ included.dir.link.rclonelink"))
}
require.NoError(t, fi.AddRule("- *"))
// Check listing without use filter flag
entries, err := f.List(ctx, "")
require.NoError(t, err)
if copyLinks {
// Check 1 global errors one for each dangling symlink
assert.Equal(t, int64(1), accounting.Stats(ctx).GetErrors(), "global errors found")
} else {
// Check 0 global errors as dangling symlink copied properly
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
}
accounting.Stats(ctx).ResetErrors()
sort.Sort(entries)
if copyLinks {
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
} else {
require.Equal(t, "[dangling.link.rclonelink included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
}
// Add user filter flag
ctx = filter.SetUseFilter(ctx, true)
// Check listing with use filter flag
entries, err = f.List(ctx, "")
require.NoError(t, err)
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
sort.Sort(entries)
if copyLinks {
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
} else {
require.Equal(t, "[included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
}
// Check listing through a symlink still works
entries, err = f.List(ctx, "included.dir")
require.NoError(t, err)
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
sort.Sort(entries)
require.Equal(t, "[included.dir/included.sub.file]", fmt.Sprint(entries))
}
func TestFilterSymlinkCopyLinks(t *testing.T) {
testFilterSymlink(t, true)
}
func TestFilterSymlinkLinks(t *testing.T) {
testFilterSymlink(t, false)
}
func TestCopySymlink(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
when := time.Now()
f := r.Flocal.(*Fs)
// Create a file and a symlink to it
r.WriteFile("src/file.txt", "hello world", when)
require.NoError(t, os.Symlink("file.txt", filepath.Join(r.LocalName, "src", "link.txt")))
defer func() {
// Reset -L/-l mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = false
f.lstat = os.Lstat
}()
// Set fs into "-l/--links" mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = true
f.lstat = os.Lstat
// Create dst
require.NoError(t, f.Mkdir(ctx, "dst"))
// Do copy from src into dst
src, err := f.NewObject(ctx, "src/link.txt.rclonelink")
require.NoError(t, err)
require.NotNil(t, src)
dst, err := operations.Copy(ctx, f, nil, "dst/link.txt.rclonelink", src)
require.NoError(t, err)
require.NotNil(t, dst)
// Test that we made a symlink and it has the right contents
dstPath := filepath.Join(r.LocalName, "dst", "link.txt")
linkContents, err := os.Readlink(dstPath)
require.NoError(t, err)
assert.Equal(t, "file.txt", linkContents)
}

View File

@@ -5,42 +5,19 @@ package local
import (
"fmt"
"runtime"
"sync"
"time"
"github.com/rclone/rclone/fs"
"golang.org/x/sys/unix"
)
var (
statxCheckOnce sync.Once
readMetadataFromFileFn func(o *Object, m *fs.Metadata) (err error)
)
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
statxCheckOnce.Do(func() {
// Check statx() is available as it was only introduced in kernel 4.11
// If not, fall back to fstatat() which was introduced in 2.6.16 which is guaranteed for all Go versions
var stat unix.Statx_t
if runtime.GOOS != "android" && unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
readMetadataFromFileFn = readMetadataFromFileStatx
} else {
readMetadataFromFileFn = readMetadataFromFileFstatat
}
})
return readMetadataFromFileFn(o, m)
}
// Read the metadata from the file into metadata where possible
func readMetadataFromFileStatx(o *Object, m *fs.Metadata) (err error) {
flags := unix.AT_SYMLINK_NOFOLLOW
if o.fs.opt.FollowSymlinks {
flags = 0
}
var stat unix.Statx_t
// statx() was added to Linux in kernel 4.11
err = unix.Statx(unix.AT_FDCWD, o.path, flags, (0 |
unix.STATX_TYPE | // Want stx_mode & S_IFMT
unix.STATX_MODE | // Want stx_mode & ~S_IFMT
@@ -68,36 +45,3 @@ func readMetadataFromFileStatx(o *Object, m *fs.Metadata) (err error) {
setTime("btime", stat.Btime)
return nil
}
// Read the metadata from the file into metadata where possible
func readMetadataFromFileFstatat(o *Object, m *fs.Metadata) (err error) {
flags := unix.AT_SYMLINK_NOFOLLOW
if o.fs.opt.FollowSymlinks {
flags = 0
}
var stat unix.Stat_t
// fstatat() was added to Linux in kernel 2.6.16
// Go only supports 2.6.32 or later
err = unix.Fstatat(unix.AT_FDCWD, o.path, &stat, flags)
if err != nil {
return err
}
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
if stat.Rdev != 0 {
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
}
setTime := func(key string, t unix.Timespec) {
// The types of t.Sec and t.Nsec vary from int32 to int64 on
// different Linux architectures so we need to cast them to
// int64 here and hence need to quiet the linter about
// unnecessary casts.
//
// nolint: unconvert
m.Set(key, time.Unix(int64(t.Sec), int64(t.Nsec)).Format(metadataTimeFormat))
}
setTime("atime", stat.Atim)
setTime("mtime", stat.Mtim)
return nil
}

View File

@@ -1,6 +1,7 @@
package local
import (
"io/ioutil"
"os"
"sync"
"testing"
@@ -12,7 +13,7 @@ import (
// Check we can remove an open file
func TestRemove(t *testing.T) {
fd, err := os.CreateTemp("", "rclone-remove-test")
fd, err := ioutil.TempFile("", "rclone-remove-test")
require.NoError(t, err)
name := fd.Name()
defer func() {

View File

@@ -9,7 +9,7 @@ import (
const haveSetBTime = false
// setBTime changes the birth time of the file passed in
// setBTime changes the the birth time of the file passed in
func setBTime(name string, btime time.Time) error {
// Does nothing
return nil

View File

@@ -5,13 +5,13 @@ package local
import (
"os"
"syscall"
"time"
"syscall"
)
const haveSetBTime = true
// setBTime sets the birth time of the file passed in
// setBTime sets the the birth time of the file passed in
func setBTime(name string, btime time.Time) (err error) {
h, err := syscall.Open(name, os.O_RDWR, 0755)
if err != nil {

View File

@@ -6,7 +6,6 @@ package local
import (
"fmt"
"strings"
"syscall"
"github.com/pkg/xattr"
"github.com/rclone/rclone/fs"
@@ -17,30 +16,12 @@ const (
xattrSupported = xattr.XATTR_SUPPORTED
)
// Check to see if the error supplied is a not supported error, and if
// so, disable xattrs
func (f *Fs) xattrIsNotSupported(err error) bool {
xattrErr, ok := err.(*xattr.Error)
if !ok {
return false
}
// Xattrs not supported can be ENOTSUP or ENOATTR or EINVAL (on Solaris)
if xattrErr.Err == syscall.EINVAL || xattrErr.Err == syscall.ENOTSUP || xattrErr.Err == xattr.ENOATTR {
// Show xattrs not supported
if f.xattrSupported.CompareAndSwap(1, 0) {
fs.Errorf(f, "xattrs not supported - disabling: %v", err)
}
return true
}
return false
}
// getXattr returns the extended attributes for an object
//
// It doesn't return any attributes owned by this backend in
// metadataKeys
func (o *Object) getXattr() (metadata fs.Metadata, err error) {
if !xattrSupported || o.fs.xattrSupported.Load() == 0 {
if !xattrSupported {
return nil, nil
}
var list []string
@@ -50,9 +31,6 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
list, err = xattr.LList(o.path)
}
if err != nil {
if o.fs.xattrIsNotSupported(err) {
return nil, nil
}
return nil, fmt.Errorf("failed to read xattr: %w", err)
}
if len(list) == 0 {
@@ -67,9 +45,6 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
v, err = xattr.LGet(o.path, k)
}
if err != nil {
if o.fs.xattrIsNotSupported(err) {
return nil, nil
}
return nil, fmt.Errorf("failed to read xattr key %q: %w", k, err)
}
k = strings.ToLower(k)
@@ -89,7 +64,7 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
//
// It doesn't set any attributes owned by this backend in metadataKeys
func (o *Object) setXattr(metadata fs.Metadata) (err error) {
if !xattrSupported || o.fs.xattrSupported.Load() == 0 {
if !xattrSupported {
return nil
}
for k, value := range metadata {
@@ -105,9 +80,6 @@ func (o *Object) setXattr(metadata fs.Metadata) (err error) {
err = xattr.LSet(o.path, k, v)
}
if err != nil {
if o.fs.xattrIsNotSupported(err) {
return nil
}
return fmt.Errorf("failed to set xattr key %q: %w", k, err)
}
}

View File

@@ -69,11 +69,6 @@ func (w *BinWriter) WritePu64(val int64) {
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
}
// WriteP64 writes an signed long as unsigned varint
func (w *BinWriter) WriteP64(val int64) {
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
}
// WriteString writes a zero-terminated string
func (w *BinWriter) WriteString(str string) {
buf := []byte(str)

View File

@@ -1,4 +1,3 @@
// Package api provides types used by the Mail.ru API.
package api
import (

View File

@@ -1,4 +1,3 @@
// Package mailru provides an interface to the Mail.ru Cloud storage system.
package mailru
import (
@@ -18,6 +17,7 @@ import (
"encoding/hex"
"encoding/json"
"io/ioutil"
"net/http"
"net/url"
@@ -85,19 +85,13 @@ func init() {
Name: "mailru",
Description: "Mail.ru Cloud",
NewFs: NewFs,
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "user",
Help: "User name (usually email).",
Required: true,
Sensitive: true,
Options: []fs.Option{{
Name: "user",
Help: "User name (usually email).",
Required: true,
}, {
Name: "pass",
Help: `Password.
This must be an app password - rclone will not work with your normal
password. See the Configuration section in the docs for how to make an
app password.
`,
Name: "pass",
Help: "Password.",
Required: true,
IsPassword: true,
}, {
@@ -214,7 +208,7 @@ Supported quirks: atomicmkdir binlist unknowndirs`,
encoder.EncodeWin | // :?"*<>|
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}
@@ -636,17 +630,21 @@ func (f *Fs) readItemMetaData(ctx context.Context, path string) (entry fs.DirEnt
// itemToEntry converts API item to rclone directory entry
// The dirSize return value is:
//
// <0 - for a file or in case of error
// =0 - for an empty directory
// >0 - for a non-empty directory
// <0 - for a file or in case of error
// =0 - for an empty directory
// >0 - for a non-empty directory
func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.DirEntry, dirSize int, err error) {
remote, err := f.relPath(f.opt.Enc.ToStandardPath(item.Home))
if err != nil {
return nil, -1, err
}
modTime := time.Unix(int64(item.Mtime), 0)
mTime := int64(item.Mtime)
if mTime < 0 {
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
mTime = 0
}
modTime := time.Unix(mTime, 0)
isDir, err := f.isDir(item.Kind, remote)
if err != nil {
@@ -1660,7 +1658,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Attempt to put by calculating hash in memory
if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) {
fileBuf, err = io.ReadAll(in)
fileBuf, err = ioutil.ReadAll(in)
if err != nil {
return err
}
@@ -1703,7 +1701,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if size <= mrhash.Size {
// Optimize upload: skip extra request if data fits in the hash buffer.
if fileBuf == nil {
fileBuf, err = io.ReadAll(wrapIn)
fileBuf, err = ioutil.ReadAll(wrapIn)
}
if fileHash == nil && err == nil {
fileHash = mrhash.Sum(fileBuf)
@@ -2058,7 +2056,7 @@ func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
req.WritePu16(0) // revision
req.WriteString(o.fs.opt.Enc.FromStandardPath(o.absPath()))
req.WritePu64(o.size)
req.WriteP64(o.modTime.Unix())
req.WritePu64(o.modTime.Unix())
req.WritePu32(0)
req.Write(o.mrHash)
@@ -2214,7 +2212,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
fs.Debugf(o, "Server returned full content instead of range")
if start > 0 {
// Discard the beginning of the data
_, err = io.CopyN(io.Discard, wrapStream, start)
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
if err != nil {
closeBody(res)
return nil, err

View File

@@ -58,10 +58,9 @@ func init() {
Description: "Mega",
NewFs: NewFs,
Options: []fs.Option{{
Name: "user",
Help: "User name.",
Required: true,
Sensitive: true,
Name: "user",
Help: "User name.",
Required: true,
}, {
Name: "pass",
Help: "Password.",
@@ -84,17 +83,6 @@ than permanently deleting them. If you specify this then rclone will
permanently delete objects instead.`,
Default: false,
Advanced: true,
}, {
Name: "use_https",
Help: `Use HTTPS for transfers.
MEGA uses plain text HTTP connections by default.
Some ISPs throttle HTTP connections, this causes transfers to become very slow.
Enabling this will force MEGA to use HTTPS for all transfers.
HTTPS is normally not necessary since all data is already encrypted anyway.
Enabling it will increase CPU usage and add network overhead.`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -112,7 +100,6 @@ type Options struct {
Pass string `config:"pass"`
Debug bool `config:"debug"`
HardDelete bool `config:"hard_delete"`
UseHTTPS bool `config:"use_https"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -131,7 +118,7 @@ type Fs struct {
// Object describes a mega object
//
// Will definitely have info but maybe not meta.
// Will definitely have info but maybe not meta
//
// Normally rclone would just store an ID here but go-mega and mega.nz
// expect you to build an entire tree of all the objects in memory.
@@ -206,7 +193,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
ci := fs.GetConfig(ctx)
// cache *mega.Mega on username so we can reuse and share
// cache *mega.Mega on username so we can re-use and share
// them between remotes. They are expensive to make as they
// contain all the objects and sharing the objects makes the
// move code easier as we don't have to worry about mixing
@@ -217,7 +204,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if srv == nil {
srv = mega.New().SetClient(fshttp.NewClient(ctx))
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
srv.SetHTTPS(opt.UseHTTPS)
srv.SetLogger(func(format string, v ...interface{}) {
fs.Infof("*go-mega*", format, v...)
})
@@ -361,7 +347,7 @@ func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *
}
}
if err != nil {
return nil, fmt.Errorf("internal error: mkdir called with nonexistent root node: %w", err)
return nil, fmt.Errorf("internal error: mkdir called with non-existent root node: %w", err)
}
// i is number of directories to create (may be 0)
// node is directory to create them from
@@ -401,7 +387,7 @@ func (f *Fs) findRoot(ctx context.Context, create bool) (*mega.Node, error) {
return f._rootNode, nil
}
// Check for preexisting root
// Check for pre-existing root
absRoot := f.srv.FS.GetRoot()
node, err := f.findDir(absRoot, f.root)
//log.Printf("findRoot findDir %p %v", node, err)
@@ -550,7 +536,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
//
// Returns the dirNode, object, leaf and error.
// Returns the dirNode, object, leaf and error
//
// Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
@@ -568,7 +554,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
// Put the object
//
// Copy the reader in to the new object which is returned.
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
// PutUnchecked uploads the object
@@ -590,7 +576,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// PutUnchecked the object
//
// Copy the reader in to the new object which is returned.
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
// PutUnchecked uploads the object
@@ -763,9 +749,9 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -993,6 +979,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
@@ -1128,7 +1115,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update the object with the contents of the io.Reader, modTime and size
//
// If existing is set then it updates the object rather than creating a new one.
// If existing is set then it updates the object rather than creating a new one
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {

View File

@@ -8,6 +8,7 @@ import (
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"path"
"strings"
"sync"
@@ -417,7 +418,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// Put the object into the bucket
//
// Copy the reader in to the new object which is returned.
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -462,9 +463,9 @@ func (f *Fs) Precision() time.Duration {
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -574,7 +575,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
data = data[:limit]
}
return io.NopCloser(bytes.NewBuffer(data)), nil
return ioutil.NopCloser(bytes.NewBuffer(data)), nil
}
// Update the object with the contents of the io.Reader, modTime and size
@@ -582,7 +583,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
bucket, bucketPath := o.split()
data, err := io.ReadAll(in)
data, err := ioutil.ReadAll(in)
if err != nil {
return fmt.Errorf("failed to update memory object: %w", err)
}

Some files were not shown because too many files have changed in this diff Show More