1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-09 12:03:20 +00:00

Compare commits

..

2 Commits

Author SHA1 Message Date
Nick Craig-Wood
45032a2122 build: fix homebrew build failures
See: https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6351357
2023-07-05 16:45:29 +01:00
Nick Craig-Wood
41296a972a docs: update contact page on website 2023-07-05 12:19:04 +01:00
624 changed files with 64543 additions and 106914 deletions

View File

@@ -27,12 +27,12 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.19', 'go1.20'] job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.18', 'go1.19']
include: include:
- job_name: linux - job_name: linux
os: ubuntu-latest os: ubuntu-latest
go: '1.21' go: '1.20'
gotags: cmount gotags: cmount
build_flags: '-include "^linux/"' build_flags: '-include "^linux/"'
check: true check: true
@@ -43,14 +43,14 @@ jobs:
- job_name: linux_386 - job_name: linux_386
os: ubuntu-latest os: ubuntu-latest
go: '1.21' go: '1.20'
goarch: 386 goarch: 386
gotags: cmount gotags: cmount
quicktest: true quicktest: true
- job_name: mac_amd64 - job_name: mac_amd64
os: macos-11 os: macos-11
go: '1.21' go: '1.20'
gotags: 'cmount' gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo' build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true quicktest: true
@@ -59,14 +59,14 @@ jobs:
- job_name: mac_arm64 - job_name: mac_arm64
os: macos-11 os: macos-11
go: '1.21' go: '1.20'
gotags: 'cmount' gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib' build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true deploy: true
- job_name: windows - job_name: windows
os: windows-latest os: windows-latest
go: '1.21' go: '1.20'
gotags: cmount gotags: cmount
cgo: '0' cgo: '0'
build_flags: '-include "^windows/"' build_flags: '-include "^windows/"'
@@ -76,20 +76,20 @@ jobs:
- job_name: other_os - job_name: other_os
os: ubuntu-latest os: ubuntu-latest
go: '1.21' go: '1.20'
build_flags: '-exclude "^(windows/|darwin/|linux/)"' build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true compile_all: true
deploy: true deploy: true
- job_name: go1.19 - job_name: go1.18
os: ubuntu-latest os: ubuntu-latest
go: '1.19' go: '1.18'
quicktest: true quicktest: true
racequicktest: true racequicktest: true
- job_name: go1.20 - job_name: go1.19
os: ubuntu-latest os: ubuntu-latest
go: '1.20' go: '1.19'
quicktest: true quicktest: true
racequicktest: true racequicktest: true
@@ -99,7 +99,7 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -130,13 +130,12 @@ jobs:
- name: Install Libraries on macOS - name: Install Libraries on macOS
shell: bash shell: bash
run: | run: |
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788 brew untap homebrew/core homebrew/cask
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008 brew config
unset HOMEBREW_NO_INSTALL_FROM_API
brew untap --force homebrew/core
brew untap --force homebrew/cask
brew update brew update
brew install --cask macfuse brew install --cask macfuse
env:
HOMEBREW_NO_INSTALL_FROM_API: # No value, so brew doesn't rely on homebrew-core / homebrew-cask
if: matrix.os == 'macos-11' if: matrix.os == 'macos-11'
- name: Install Libraries on Windows - name: Install Libraries on Windows
@@ -216,6 +215,7 @@ jobs:
shell: bash shell: bash
run: | run: |
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
if [[ "${{ matrix.os }}" == "windows-latest" ]]; then make release_dep_windows ; fi
make ci_beta make ci_beta
env: env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }} RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
@@ -231,7 +231,7 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
- name: Code quality test - name: Code quality test
uses: golangci/golangci-lint-action@v3 uses: golangci/golangci-lint-action@v3
@@ -243,7 +243,7 @@ jobs:
- name: Install Go - name: Install Go
uses: actions/setup-go@v4 uses: actions/setup-go@v4
with: with:
go-version: '1.21' go-version: '1.20'
check-latest: true check-latest: true
- name: Install govulncheck - name: Install govulncheck
@@ -260,7 +260,7 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -268,7 +268,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v4 uses: actions/setup-go@v4
with: with:
go-version: '1.21' go-version: '1.20'
- name: Go module cache - name: Go module cache
uses: actions/cache@v3 uses: actions/cache@v3

View File

@@ -10,35 +10,26 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Build image job name: Build image job
steps: steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master - name: Checkout master
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v2
with: with:
username: ${{ secrets.DOCKERHUB_USERNAME }} username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker - name: Extract metadata (tags, labels) for Docker
id: meta id: meta
uses: docker/metadata-action@v5 uses: docker/metadata-action@v4
with: with:
images: ghcr.io/${{ github.repository }} images: ghcr.io/${{ github.repository }}
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v3 uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@v3 uses: docker/login-action@v2
with: with:
registry: ghcr.io registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will # This is the user that triggered the Workflow. In this case, it will
@@ -51,12 +42,9 @@ jobs:
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret # See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
# for more detailed information. # for more detailed information.
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Show disk usage
shell: bash
run: |
df -h .
- name: Build and publish image - name: Build and publish image
uses: docker/build-push-action@v5 uses: docker/build-push-action@v4
with: with:
file: Dockerfile file: Dockerfile
context: . context: .
@@ -66,12 +54,8 @@ jobs:
rclone/rclone:beta rclone/rclone:beta
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
cache-from: type=gha, scope=${{ github.workflow }} cache-from: type=gha
cache-to: type=gha, mode=max, scope=${{ github.workflow }} cache-to: type=gha,mode=max
provenance: false provenance: false
# Eventually cache will need to be cleared if builds more frequent than once a week # Eventually cache will need to be cleared if builds more frequent than once a week
# https://github.com/docker/build-push-action/issues/252 # https://github.com/docker/build-push-action/issues/252
- name: Show disk usage
shell: bash
run: |
df -h .

View File

@@ -10,17 +10,8 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Build image job name: Build image job
steps: steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master - name: Checkout master
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Get actual patch version - name: Get actual patch version
@@ -48,17 +39,8 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Build docker plugin job name: Build docker plugin job
steps: steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master - name: Checkout master
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Build and publish docker plugin - name: Build and publish docker plugin

View File

@@ -5,7 +5,7 @@ on:
jobs: jobs:
publish: publish:
runs-on: ubuntu-latest runs-on: windows-latest # Action can only run on Windows
steps: steps:
- uses: vedantmgoyal2009/winget-releaser@v2 - uses: vedantmgoyal2009/winget-releaser@v2
with: with:

5
.gitignore vendored
View File

@@ -8,13 +8,10 @@ rclone.iml
.idea .idea
.history .history
*.test *.test
*.log
*.iml *.iml
fuzz-build.zip fuzz-build.zip
*.orig *.orig
*.rej *.rej
Thumbs.db Thumbs.db
__pycache__ __pycache__
.DS_Store
/docs/static/img/logos/
resource_windows_*.syso
.devcontainer

View File

@@ -33,67 +33,23 @@ issues:
- staticcheck - staticcheck
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated' text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
# don't disable the revive messages about comments on exported functions
include:
- EXC0012
- EXC0013
- EXC0014
- EXC0015
run: run:
# timeout for analysis, e.g. 30s, 5m, default is 1m # timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m timeout: 10m
linters-settings: linters-settings:
revive: revive:
# setting rules seems to disable all the rules, so re-enable them here
rules: rules:
- name: blank-imports
disabled: false
- name: context-as-argument
disabled: false
- name: context-keys-type
disabled: false
- name: dot-imports
disabled: false
- name: empty-block
disabled: true
- name: error-naming
disabled: false
- name: error-return
disabled: false
- name: error-strings
disabled: false
- name: errorf
disabled: false
- name: exported
disabled: false
- name: increment-decrement
disabled: true
- name: indent-error-flow
disabled: false
- name: package-comments
disabled: false
- name: range
disabled: false
- name: receiver-naming
disabled: false
- name: redefines-builtin-id
disabled: true
- name: superfluous-else
disabled: true
- name: time-naming
disabled: false
- name: unexported-return
disabled: false
- name: unreachable-code - name: unreachable-code
disabled: true disabled: true
- name: unused-parameter - name: unused-parameter
disabled: true disabled: true
- name: var-declaration - name: empty-block
disabled: false disabled: true
- name: var-naming - name: redefines-builtin-id
disabled: false disabled: true
- name: superfluous-else
disabled: true
stylecheck: stylecheck:
# Only enable the checks performed by the staticcheck stand-alone tool, # Only enable the checks performed by the staticcheck stand-alone tool,
# as documented here: https://staticcheck.io/docs/configuration/options/#checks # as documented here: https://staticcheck.io/docs/configuration/options/#checks

View File

@@ -1,8 +1,8 @@
# Contributing to rclone # Contributing to rclone #
This is a short guide on how to contribute things to rclone. This is a short guide on how to contribute things to rclone.
## Reporting a bug ## Reporting a bug ##
If you've just got a question or aren't sure if you've found a bug If you've just got a question or aren't sure if you've found a bug
then please use the [rclone forum](https://forum.rclone.org/) instead then please use the [rclone forum](https://forum.rclone.org/) instead
@@ -12,13 +12,13 @@ When filing an issue, please include the following information if
possible as well as a description of the problem. Make sure you test possible as well as a description of the problem. Make sure you test
with the [latest beta of rclone](https://beta.rclone.org/): with the [latest beta of rclone](https://beta.rclone.org/):
- Rclone version (e.g. output from `rclone version`) * Rclone version (e.g. output from `rclone version`)
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit) * Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`) * The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
- A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`) * A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
- if the log contains secrets then edit the file with a text editor first to obscure them * if the log contains secrets then edit the file with a text editor first to obscure them
## Submitting a new feature or bug fix ## Submitting a new feature or bug fix ##
If you find a bug that you'd like to fix, or a new feature that you'd If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via GitHub. like to implement then please submit a pull request via GitHub.
@@ -73,9 +73,9 @@ This is typically enough if you made a simple bug fix, otherwise please read the
Make sure you Make sure you
- Add [unit tests](#testing) for a new feature. * Add [unit tests](#testing) for a new feature.
- Add [documentation](#writing-documentation) for a new feature. * Add [documentation](#writing-documentation) for a new feature.
- [Commit your changes](#committing-your-changes) using the [commit message guidelines](#commit-messages). * [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
When you are done with that push your changes to GitHub: When you are done with that push your changes to GitHub:
@@ -88,9 +88,9 @@ Your changes will then get reviewed and you might get asked to fix some stuff. I
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits). You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
## Using Git and GitHub ## Using Git and GitHub ##
### Committing your changes ### Committing your changes ###
Follow the guideline for [commit messages](#commit-messages) and then: Follow the guideline for [commit messages](#commit-messages) and then:
@@ -107,7 +107,7 @@ You can modify the message or changes in the latest commit using:
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits). If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Replacing your previously pushed commits ### Replacing your previously pushed commits ###
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub. Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
@@ -115,7 +115,7 @@ Your previously pushed commits are replaced by:
git push --force origin my-new-feature git push --force origin my-new-feature
### Basing your changes on the latest master ### Basing your changes on the latest master ###
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream): To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
@@ -149,21 +149,13 @@ If you squash commits that have been pushed to GitHub, then you will have to [re
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation. Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
### GitHub Continuous Integration ### GitHub Continuous Integration ###
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository. rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
## Testing ## Testing ##
### Code quality tests ### Quick testing ###
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then you can run the same tests as get run in the CI which can be very helpful.
You can run them with `make check` or with `golangci-lint run ./...`.
Using these tests ensures that the rclone codebase all uses the same coding standards. These tests also check for easy mistakes to make (like forgetting to check an error return).
### Quick testing
rclone's tests are run from the go testing framework, so at the top rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests. level you can run this to run all the tests.
@@ -176,7 +168,7 @@ You can also use `make`, if supported by your platform
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub. The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
### Backend testing ### Backend testing ###
rclone contains a mixture of unit tests and integration tests. rclone contains a mixture of unit tests and integration tests.
Because it is difficult (and in some respects pointless) to test cloud Because it is difficult (and in some respects pointless) to test cloud
@@ -211,7 +203,7 @@ project root:
go install github.com/rclone/rclone/fstest/test_all go install github.com/rclone/rclone/fstest/test_all
test_all -backend drive test_all -backend drive
### Full integration testing ### Full integration testing ###
If you want to run all the integration tests against all the remotes, If you want to run all the integration tests against all the remotes,
then change into the project root and run then change into the project root and run
@@ -226,56 +218,55 @@ The commands may require some extra go packages which you can install with
The full integration tests are run daily on the integration test server. You can The full integration tests are run daily on the integration test server. You can
find the results at https://pub.rclone.org/integration-tests/ find the results at https://pub.rclone.org/integration-tests/
## Code Organisation ## Code Organisation ##
Rclone code is organised into a small number of top level directories Rclone code is organised into a small number of top level directories
with modules beneath. with modules beneath.
- backend - the rclone backends for interfacing to cloud providers - * backend - the rclone backends for interfacing to cloud providers -
- all - import this to load all the cloud providers * all - import this to load all the cloud providers
- ...providers * ...providers
- bin - scripts for use while building or maintaining rclone * bin - scripts for use while building or maintaining rclone
- cmd - the rclone commands * cmd - the rclone commands
- all - import this to load all the commands * all - import this to load all the commands
- ...commands * ...commands
- cmdtest - end-to-end tests of commands, flags, environment variables,... * cmdtest - end-to-end tests of commands, flags, environment variables,...
- docs - the documentation and website * docs - the documentation and website
- content - adjust these docs only - everything else is autogenerated * content - adjust these docs only - everything else is autogenerated
- command - these are auto-generated - edit the corresponding .go file * command - these are auto-generated - edit the corresponding .go file
- fs - main rclone definitions - minimal amount of code * fs - main rclone definitions - minimal amount of code
- accounting - bandwidth limiting and statistics * accounting - bandwidth limiting and statistics
- asyncreader - an io.Reader which reads ahead * asyncreader - an io.Reader which reads ahead
- config - manage the config file and flags * config - manage the config file and flags
- driveletter - detect if a name is a drive letter * driveletter - detect if a name is a drive letter
- filter - implements include/exclude filtering * filter - implements include/exclude filtering
- fserrors - rclone specific error handling * fserrors - rclone specific error handling
- fshttp - http handling for rclone * fshttp - http handling for rclone
- fspath - path handling for rclone * fspath - path handling for rclone
- hash - defines rclone's hash types and functions * hash - defines rclone's hash types and functions
- list - list a remote * list - list a remote
- log - logging facilities * log - logging facilities
- march - iterates directories in lock step * march - iterates directories in lock step
- object - in memory Fs objects * object - in memory Fs objects
- operations - primitives for sync, e.g. Copy, Move * operations - primitives for sync, e.g. Copy, Move
- sync - sync directories * sync - sync directories
- walk - walk a directory * walk - walk a directory
- fstest - provides integration test framework * fstest - provides integration test framework
- fstests - integration tests for the backends * fstests - integration tests for the backends
- mockdir - mocks an fs.Directory * mockdir - mocks an fs.Directory
- mockobject - mocks an fs.Object * mockobject - mocks an fs.Object
- test_all - Runs integration tests for everything * test_all - Runs integration tests for everything
- graphics - the images used in the website, etc. * graphics - the images used in the website, etc.
- lib - libraries used by the backend * lib - libraries used by the backend
- atexit - register functions to run when rclone exits * atexit - register functions to run when rclone exits
- dircache - directory ID to name caching * dircache - directory ID to name caching
- oauthutil - helpers for using oauth * oauthutil - helpers for using oauth
- pacer - retries with backoff and paces operations * pacer - retries with backoff and paces operations
- readers - a selection of useful io.Readers * readers - a selection of useful io.Readers
- rest - a thin abstraction over net/http for REST * rest - a thin abstraction over net/http for REST
- librclone - in memory interface to rclone's API for embedding rclone * vfs - Virtual FileSystem layer for implementing rclone mount and similar
- vfs - Virtual FileSystem layer for implementing rclone mount and similar
## Writing Documentation ## Writing Documentation ##
If you are adding a new feature then please update the documentation. If you are adding a new feature then please update the documentation.
@@ -286,22 +277,22 @@ alphabetical order.
If you add a new backend option/flag, then it should be documented in If you add a new backend option/flag, then it should be documented in
the source file in the `Help:` field. the source file in the `Help:` field.
- Start with the most important information about the option, * Start with the most important information about the option,
as a single sentence on a single line. as a single sentence on a single line.
- This text will be used for the command-line flag help. * This text will be used for the command-line flag help.
- It will be combined with other information, such as any default value, * It will be combined with other information, such as any default value,
and the result will look odd if not written as a single sentence. and the result will look odd if not written as a single sentence.
- It should end with a period/full stop character, which will be shown * It should end with a period/full stop character, which will be shown
in docs but automatically removed when producing the flag help. in docs but automatically removed when producing the flag help.
- Try to keep it below 80 characters, to reduce text wrapping in the terminal. * Try to keep it below 80 characters, to reduce text wrapping in the terminal.
- More details can be added in a new paragraph, after an empty line (`"\n\n"`). * More details can be added in a new paragraph, after an empty line (`"\n\n"`).
- Like with docs generated from Markdown, a single line break is ignored * Like with docs generated from Markdown, a single line break is ignored
and two line breaks creates a new paragraph. and two line breaks creates a new paragraph.
- This text will be shown to the user in `rclone config` * This text will be shown to the user in `rclone config`
and in the docs (where it will be added by `make backenddocs`, and in the docs (where it will be added by `make backenddocs`,
normally run some time before next release). normally run some time before next release).
- To create options of enumeration type use the `Examples:` field. * To create options of enumeration type use the `Examples:` field.
- Each example value have their own `Help:` field, but they are treated * Each example value have their own `Help:` field, but they are treated
a bit different than the main option help text. They will be shown a bit different than the main option help text. They will be shown
as an unordered list, therefore a single line break is enough to as an unordered list, therefore a single line break is enough to
create a new list item. Also, for enumeration texts like name of create a new list item. Also, for enumeration texts like name of
@@ -321,12 +312,12 @@ combined unmodified with other information (such as any default value).
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository) Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
for small changes in the docs which makes it very easy. for small changes in the docs which makes it very easy.
## Making a release ## Making a release ##
There are separate instructions for making a release in the RELEASE.md There are separate instructions for making a release in the RELEASE.md
file. file.
## Commit messages ## Commit messages ##
Please make the first line of your commit message a summary of the Please make the first line of your commit message a summary of the
change that a user (not a developer) of rclone would like to read, and change that a user (not a developer) of rclone would like to read, and
@@ -367,7 +358,7 @@ error fixing the hang.
Fixes #1498 Fixes #1498
``` ```
## Adding a dependency ## Adding a dependency ##
rclone uses the [go rclone uses the [go
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more) modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
@@ -379,7 +370,7 @@ To add a dependency `github.com/ncw/new_dependency` see the
instructions below. These will fetch the dependency and add it to instructions below. These will fetch the dependency and add it to
`go.mod` and `go.sum`. `go.mod` and `go.sum`.
go get github.com/ncw/new_dependency GO111MODULE=on go get github.com/ncw/new_dependency
You can add constraints on that package when doing `go get` (see the You can add constraints on that package when doing `go get` (see the
go docs linked above), but don't unless you really need to. go docs linked above), but don't unless you really need to.
@@ -387,15 +378,15 @@ go docs linked above), but don't unless you really need to.
Please check in the changes generated by `go mod` including `go.mod` Please check in the changes generated by `go mod` including `go.mod`
and `go.sum` in the same commit as your other changes. and `go.sum` in the same commit as your other changes.
## Updating a dependency ## Updating a dependency ##
If you need to update a dependency then run If you need to update a dependency then run
go get golang.org/x/crypto GO111MODULE=on go get -u golang.org/x/crypto
Check in a single commit as above. Check in a single commit as above.
## Updating all the dependencies ## Updating all the dependencies ##
In order to update all the dependencies then run `make update`. This In order to update all the dependencies then run `make update`. This
just uses the go modules to update all the modules to their latest just uses the go modules to update all the modules to their latest
@@ -404,7 +395,7 @@ stable release. Check in the changes in a single commit as above.
This should be done early in the release cycle to pick up new versions This should be done early in the release cycle to pick up new versions
of packages in time for them to get some testing. of packages in time for them to get some testing.
## Updating a backend ## Updating a backend ##
If you update a backend then please run the unit tests and the If you update a backend then please run the unit tests and the
integration tests for that backend. integration tests for that backend.
@@ -419,133 +410,82 @@ integration tests.
The next section goes into more detail about the tests. The next section goes into more detail about the tests.
## Writing a new backend ## Writing a new backend ##
Choose a name. The docs here will use `remote` as an example. Choose a name. The docs here will use `remote` as an example.
Note that in rclone terminology a file system backend is called a Note that in rclone terminology a file system backend is called a
remote or an fs. remote or an fs.
### Research Research
- Look at the interfaces defined in `fs/types.go` * Look at the interfaces defined in `fs/fs.go`
- Study one or more of the existing remotes * Study one or more of the existing remotes
### Getting going Getting going
- Create `backend/remote/remote.go` (copy this from a similar remote) * Create `backend/remote/remote.go` (copy this from a similar remote)
- box is a good one to start from if you have a directory-based remote (and shows how to use the directory cache) * box is a good one to start from if you have a directory-based remote
- b2 is a good one to start from if you have a bucket-based remote * b2 is a good one to start from if you have a bucket-based remote
- Add your remote to the imports in `backend/all/all.go` * Add your remote to the imports in `backend/all/all.go`
- HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good Go SDK from the provider then use that instead. * HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
- Try to implement as many optional methods as possible as it makes the remote more usable. * Try to implement as many optional methods as possible as it makes the remote more usable.
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed * Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
- `rclone purge -v TestRemote:rclone-info` * `rclone purge -v TestRemote:rclone-info`
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info` * `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json` * `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
- open `remote.csv` in a spreadsheet and examine * open `remote.csv` in a spreadsheet and examine
### Guidelines for a speedy merge Unit tests
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend. * Create a config entry called `TestRemote` for the unit tests to use
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) if your backend is HTTP based - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything! * Create a `backend/remote/remote_test.go` - copy and adjust your example remote
- **Do** follow your example backend exactly - use the same code order, function names, layout, structure. **Don't** move stuff around and **Don't** delete the comments. * Make sure all tests pass with `go test -v`
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few backends like that - don't follow them!)
- **Do** put your API type definitions in a separate file - by preference `api/types.go`
- **Remember** we have >50 backends to maintain so keeping them as similar as possible to each other is a high priority!
### Unit tests Integration tests
- Create a config entry called `TestRemote` for the unit tests to use * Add your backend to `fstest/test_all/config.yaml`
- Create a `backend/remote/remote_test.go` - copy and adjust your example remote * Once you've done that then you can use the integration test framework from the project root:
- Make sure all tests pass with `go test -v` * go install ./...
* test_all -backends remote
### Integration tests
- Add your backend to `fstest/test_all/config.yaml`
- Once you've done that then you can use the integration test framework from the project root:
- go install ./...
- test_all -backends remote
Or if you want to run the integration tests manually: Or if you want to run the integration tests manually:
- Make sure integration tests pass with * Make sure integration tests pass with
- `cd fs/operations` * `cd fs/operations`
- `go test -v -remote TestRemote:` * `go test -v -remote TestRemote:`
- `cd fs/sync` * `cd fs/sync`
- `go test -v -remote TestRemote:` * `go test -v -remote TestRemote:`
- If your remote defines `ListR` check with this also * If your remote defines `ListR` check with this also
- `go test -v -remote TestRemote: -fast-list` * `go test -v -remote TestRemote: -fast-list`
See the [testing](#testing) section for more information on integration tests. See the [testing](#testing) section for more information on integration tests.
### Backend documentation Add your fs to the docs - you'll need to pick an icon for it from
Add your backend to the docs - you'll need to pick an icon for it from
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
alphabetical order of full name of remote (e.g. `drive` is ordered as alphabetical order of full name of remote (e.g. `drive` is ordered as
`Google Drive`) but with the local file system last. `Google Drive`) but with the local file system last.
- `README.md` - main GitHub page * `README.md` - main GitHub page
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`) * `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
- make sure this has the `autogenerated options` comments in (see your reference backend docs) * make sure this has the `autogenerated options` comments in (see your reference backend docs)
- update them in your backend with `bin/make_backend_docs.py remote` * update them with `make backenddocs` - revert any changes in other backends
- `docs/content/overview.md` - overview docs * `docs/content/overview.md` - overview docs
- `docs/content/docs.md` - list of remotes in config section * `docs/content/docs.md` - list of remotes in config section
- `docs/content/_index.md` - front page of rclone.org * `docs/content/_index.md` - front page of rclone.org
- `docs/layouts/chrome/navbar.html` - add it to the website navigation * `docs/layouts/chrome/navbar.html` - add it to the website navigation
- `bin/make_manual.py` - add the page to the `docs` constant * `bin/make_manual.py` - add the page to the `docs` constant
Once you've written the docs, run `make serve` and check they look OK Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work. in the web browser and the links (internal and external) all work.
## Adding a new s3 provider ## Writing a plugin ##
It is quite easy to add a new S3 provider to rclone.
You'll need to modify the following files
- `backend/s3/s3.go`
- Add the provider to `providerOption` at the top of the file
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
- Exclude your provider from genric config questions (eg `region` and `endpoint).
- Add the provider to the `setQuirks` function - see the documentation there.
- `docs/content/s3.md`
- Add the provider at the top of the page.
- Add a section about the provider linked from there.
- Add a transcript of a trial `rclone config` session
- Edit the transcript to remove things which might change in subsequent versions
- **Do not** alter or add to the autogenerated parts of `s3.md`
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
- `README.md` - this is the home page in github
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
- `docs/content/_index.md` - this is the home page of rclone.org
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
When adding the provider, endpoints, quirks, docs etc keep them in
alphabetical order by `Provider` name, but with `AWS` first and
`Other` last.
Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work.
Once you've written the code, test `rclone config` works to your
satisfaction, and check the integration tests work `go test -v -remote
NewS3Provider:`. You may need to adjust the quirks to get them to
pass. Some providers just can't pass the tests with control characters
in the names so if these fail and the provider doesn't support
`urlEncodeListings` in the quirks then ignore them. Note that the
`SetTier` test may also fail on non AWS providers.
For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone/rclone/commit/eb3082a1ebdb76d5625f14cedec3f5154a5e7b10).
## Writing a plugin
New features (backends, commands) can also be added "out-of-tree", through Go plugins. New features (backends, commands) can also be added "out-of-tree", through Go plugins.
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary. Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone. This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
### Usage Usage
- Naming - Naming
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`. - Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
@@ -560,7 +500,7 @@ This is useful if you can't merge your changes upstream or don't want to maintai
- Plugins must be compiled against the exact version of rclone to work. - Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source of rclone) (The rclone used during building the plugin must be the same as the source of rclone)
### Building Building
To turn your existing additions into a Go plugin, move them to an external repository To turn your existing additions into a Go plugin, move them to an external repository
and change the top-level package name to `main`. and change the top-level package name to `main`.

View File

@@ -18,9 +18,6 @@ Current active maintainers of rclone are:
| Caleb Case | @calebcase | storj backend | | Caleb Case | @calebcase | storj backend |
| wiserain | @wiserain | pikpak backend | | wiserain | @wiserain | pikpak backend |
| albertony | @albertony | | | albertony | @albertony | |
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
| Hideo Aoyama | @boukendesho | snap packaging |
| nielash | @nielash | bisync |
**This is a work in progress Draft** **This is a work in progress Draft**

34605
MANUAL.html generated

File diff suppressed because it is too large Load Diff

10860
MANUAL.md generated

File diff suppressed because it is too large Load Diff

34411
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -30,7 +30,6 @@ ifdef RELEASE_TAG
TAG := $(RELEASE_TAG) TAG := $(RELEASE_TAG)
endif endif
GO_VERSION := $(shell go version) GO_VERSION := $(shell go version)
GO_OS := $(shell go env GOOS)
ifdef BETA_SUBDIR ifdef BETA_SUBDIR
BETA_SUBDIR := /$(BETA_SUBDIR) BETA_SUBDIR := /$(BETA_SUBDIR)
endif endif
@@ -47,13 +46,7 @@ endif
.PHONY: rclone test_all vars version .PHONY: rclone test_all vars version
rclone: rclone:
ifeq ($(GO_OS),windows)
go run bin/resource_windows.go -version $(TAG) -syso resource_windows_`go env GOARCH`.syso
endif
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
ifeq ($(GO_OS),windows)
rm resource_windows_`go env GOARCH`.syso
endif
mkdir -p `go env GOPATH`/bin/ mkdir -p `go env GOPATH`/bin/
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE` mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
@@ -73,10 +66,6 @@ btest:
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip @echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
@echo "Copied markdown of beta release to clip board" @echo "Copied markdown of beta release to clip board"
btesth:
@echo "<a href="$(BETA_URL)">$(TAG)</a> on branch <a href="https://github.com/rclone/rclone/tree/$(BRANCH)">$(BRANCH)</a> (uploaded in 15-30 mins)" | xclip -r -sel clip -t text/html
@echo "Copied beta release in HTML to clip board"
version: version:
@echo '$(TAG)' @echo '$(TAG)'
@@ -109,6 +98,10 @@ build_dep:
release_dep_linux: release_dep_linux:
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
# Get the release dependencies we only install on Windows
release_dep_windows:
GOOS="" GOARCH="" go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest
# Update dependencies # Update dependencies
showupdates: showupdates:
@echo "*** Direct dependencies that could be updated ***" @echo "*** Direct dependencies that could be updated ***"
@@ -153,7 +146,7 @@ rcdocs: rclone
install: rclone install: rclone
install -d ${DESTDIR}/usr/bin install -d ${DESTDIR}/usr/bin
install ${GOPATH}/bin/rclone ${DESTDIR}/usr/bin install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
clean: clean:
go clean ./... go clean ./...

View File

@@ -51,16 +51,12 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3) * IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos) * IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
* Koofr [:page_facing_up:](https://rclone.org/koofr/) * Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage) * Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
* Linkbox [:page_facing_up:](https://rclone.org/linkbox)
* Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/) * Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/) * Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/) * Mega [:page_facing_up:](https://rclone.org/mega/)
* Memory [:page_facing_up:](https://rclone.org/memory/) * Memory [:page_facing_up:](https://rclone.org/memory/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/) * Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
* Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/) * Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
* Minio [:page_facing_up:](https://rclone.org/s3/#minio) * Minio [:page_facing_up:](https://rclone.org/s3/#minio)
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud) * Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
@@ -76,10 +72,8 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* PikPak [:page_facing_up:](https://rclone.org/pikpak/) * PikPak [:page_facing_up:](https://rclone.org/pikpak/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/) * premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/) * put.io [:page_facing_up:](https://rclone.org/putio/)
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/) * QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu) * Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/) * Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp) * RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway) * Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
@@ -90,7 +84,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath) * StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* Storj [:page_facing_up:](https://rclone.org/storj/) * Storj [:page_facing_up:](https://rclone.org/storj/)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/) * SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos) * Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi) * Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/) * WebDAV [:page_facing_up:](https://rclone.org/webdav/)

View File

@@ -41,15 +41,12 @@ Early in the next release cycle update the dependencies
* Review any pinned packages in go.mod and remove if possible * Review any pinned packages in go.mod and remove if possible
* make updatedirect * make updatedirect
* make GOTAGS=cmount * make
* make compiletest
* git commit -a -v * git commit -a -v
* make update * make update
* make GOTAGS=cmount * make
* make compiletest
* roll back any updates which didn't compile * roll back any updates which didn't compile
* git commit -a -v --amend * git commit -a -v --amend
* **NB** watch out for this changing the default go version in `go.mod`
Note that `make update` updates all direct and indirect dependencies Note that `make update` updates all direct and indirect dependencies
and there can occasionally be forwards compatibility problems with and there can occasionally be forwards compatibility problems with
@@ -93,35 +90,6 @@ Now
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}" * git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
* git push * git push
## Sponsor logos
If updating the website note that the sponsor logos have been moved out of the main repository.
You will need to checkout `/docs/static/img/logos` from https://github.com/rclone/third-party-logos
which is a private repo containing artwork from sponsors.
## Update the website between releases
Create an update website branch based off the last release
git co -b update-website
If the branch already exists, double check there are no commits that need saving.
Now reset the branch to the last release
git reset --hard v1.64.0
Create the changes, check them in, test with `make serve` then
make upload_test_website
Check out https://test.rclone.org and when happy
make upload_website
Cherry pick any changes back to master and the stable branch if it is active.
## Making a manual build of docker ## Making a manual build of docker
The rclone docker image should autobuild on via GitHub actions. If it doesn't The rclone docker image should autobuild on via GitHub actions. If it doesn't

View File

@@ -1 +1 @@
v1.65.0 v1.64.0

View File

@@ -6,7 +6,6 @@ import (
_ "github.com/rclone/rclone/backend/alias" _ "github.com/rclone/rclone/backend/alias"
_ "github.com/rclone/rclone/backend/amazonclouddrive" _ "github.com/rclone/rclone/backend/amazonclouddrive"
_ "github.com/rclone/rclone/backend/azureblob" _ "github.com/rclone/rclone/backend/azureblob"
_ "github.com/rclone/rclone/backend/azurefiles"
_ "github.com/rclone/rclone/backend/b2" _ "github.com/rclone/rclone/backend/b2"
_ "github.com/rclone/rclone/backend/box" _ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache" _ "github.com/rclone/rclone/backend/cache"
@@ -25,11 +24,9 @@ import (
_ "github.com/rclone/rclone/backend/hdfs" _ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/hidrive" _ "github.com/rclone/rclone/backend/hidrive"
_ "github.com/rclone/rclone/backend/http" _ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/imagekit"
_ "github.com/rclone/rclone/backend/internetarchive" _ "github.com/rclone/rclone/backend/internetarchive"
_ "github.com/rclone/rclone/backend/jottacloud" _ "github.com/rclone/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/koofr" _ "github.com/rclone/rclone/backend/koofr"
_ "github.com/rclone/rclone/backend/linkbox"
_ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/mailru" _ "github.com/rclone/rclone/backend/mailru"
_ "github.com/rclone/rclone/backend/mega" _ "github.com/rclone/rclone/backend/mega"
@@ -41,10 +38,8 @@ import (
_ "github.com/rclone/rclone/backend/pcloud" _ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/pikpak" _ "github.com/rclone/rclone/backend/pikpak"
_ "github.com/rclone/rclone/backend/premiumizeme" _ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/protondrive"
_ "github.com/rclone/rclone/backend/putio" _ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor" _ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/quatrix"
_ "github.com/rclone/rclone/backend/s3" _ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/seafile" _ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp" _ "github.com/rclone/rclone/backend/sftp"

View File

@@ -1,10 +1,11 @@
//go:build !plan9 && !solaris && !js //go:build !plan9 && !solaris && !js && go1.18
// +build !plan9,!solaris,!js // +build !plan9,!solaris,!js,go1.18
// Package azureblob provides an interface to the Microsoft Azure blob object storage system // Package azureblob provides an interface to the Microsoft Azure blob object storage system
package azureblob package azureblob
import ( import (
"bytes"
"context" "context"
"crypto/md5" "crypto/md5"
"encoding/base64" "encoding/base64"
@@ -17,7 +18,6 @@ import (
"net/url" "net/url"
"os" "os"
"path" "path"
"sort"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@@ -33,6 +33,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunksize" "github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
@@ -45,8 +46,10 @@ import (
"github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/sync/errgroup"
) )
const ( const (
@@ -67,16 +70,12 @@ const (
emulatorAccount = "devstoreaccount1" emulatorAccount = "devstoreaccount1"
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1" emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
) )
var ( var (
errCantUpdateArchiveTierBlobs = fserrors.NoRetryError(errors.New("can't update archive tier blob without --azureblob-archive-tier-delete")) errCantUpdateArchiveTierBlobs = fserrors.NoRetryError(errors.New("can't update archive tier blob without --azureblob-archive-tier-delete"))
// Take this when changing or reading metadata.
//
// It acts as global metadata lock so we don't bloat Object
// with an extra lock that will only very rarely be contended.
metadataMu sync.Mutex
) )
// Register with Fs // Register with Fs
@@ -96,7 +95,6 @@ Leave blank to use SAS URL or Emulator, otherwise it needs to be set.
If this is blank and if env_auth is set it will be read from the If this is blank and if env_auth is set it will be read from the
environment variable ` + "`AZURE_STORAGE_ACCOUNT_NAME`" + ` if possible. environment variable ` + "`AZURE_STORAGE_ACCOUNT_NAME`" + ` if possible.
`, `,
Sensitive: true,
}, { }, {
Name: "env_auth", Name: "env_auth",
Help: `Read credentials from runtime (environment variables, CLI or MSI). Help: `Read credentials from runtime (environment variables, CLI or MSI).
@@ -108,13 +106,11 @@ See the [authentication docs](/azureblob#authentication) for full info.`,
Help: `Storage Account Shared Key. Help: `Storage Account Shared Key.
Leave blank to use SAS URL or Emulator.`, Leave blank to use SAS URL or Emulator.`,
Sensitive: true,
}, { }, {
Name: "sas_url", Name: "sas_url",
Help: `SAS URL for container level access only. Help: `SAS URL for container level access only.
Leave blank if using account/key or Emulator.`, Leave blank if using account/key or Emulator.`,
Sensitive: true,
}, { }, {
Name: "tenant", Name: "tenant",
Help: `ID of the service principal's tenant. Also called its directory ID. Help: `ID of the service principal's tenant. Also called its directory ID.
@@ -124,7 +120,6 @@ Set this if using
- Service principal with certificate - Service principal with certificate
- User with username and password - User with username and password
`, `,
Sensitive: true,
}, { }, {
Name: "client_id", Name: "client_id",
Help: `The ID of the client in use. Help: `The ID of the client in use.
@@ -134,7 +129,6 @@ Set this if using
- Service principal with certificate - Service principal with certificate
- User with username and password - User with username and password
`, `,
Sensitive: true,
}, { }, {
Name: "client_secret", Name: "client_secret",
Help: `One of the service principal's client secrets Help: `One of the service principal's client secrets
@@ -142,7 +136,6 @@ Set this if using
Set this if using Set this if using
- Service principal with client secret - Service principal with client secret
`, `,
Sensitive: true,
}, { }, {
Name: "client_certificate_path", Name: "client_certificate_path",
Help: `Path to a PEM or PKCS12 certificate file including the private key. Help: `Path to a PEM or PKCS12 certificate file including the private key.
@@ -180,8 +173,7 @@ Optionally set this if using
Set this if using Set this if using
- User with username and password - User with username and password
`, `,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "password", Name: "password",
Help: `The user's password Help: `The user's password
@@ -224,20 +216,17 @@ msi_client_id, or msi_mi_res_id parameters.`,
Default: false, Default: false,
Advanced: true, Advanced: true,
}, { }, {
Name: "msi_object_id", Name: "msi_object_id",
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.", Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "msi_client_id", Name: "msi_client_id",
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.", Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "msi_mi_res_id", Name: "msi_mi_res_id",
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.", Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "use_emulator", Name: "use_emulator",
Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.", Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
@@ -295,10 +284,10 @@ avoid the time out.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "access_tier", Name: "access_tier",
Help: `Access tier of blob: hot, cool, cold or archive. Help: `Access tier of blob: hot, cool or archive.
Archived blobs can be restored by setting access tier to hot, cool or Archived blobs can be restored by setting access tier to hot or
cold. Leave blank if you intend to use default access tier, which is cool. Leave blank if you intend to use default access tier, which is
set at account level set at account level
If there is no "access tier" specified, rclone doesn't apply any tier. If there is no "access tier" specified, rclone doesn't apply any tier.
@@ -306,7 +295,7 @@ rclone performs "Set Tier" operation on blobs while uploading, if objects
are not modified, specifying "access tier" to new one will have no effect. are not modified, specifying "access tier" to new one will have no effect.
If blobs are in "archive tier" at remote, trying to perform data transfer If blobs are in "archive tier" at remote, trying to perform data transfer
operations from remote will not be allowed. User should first restore by operations from remote will not be allowed. User should first restore by
tiering blob to "Hot", "Cool" or "Cold".`, tiering blob to "Hot" or "Cool".`,
Advanced: true, Advanced: true,
}, { }, {
Name: "archive_tier_delete", Name: "archive_tier_delete",
@@ -338,16 +327,17 @@ to start uploading.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "memory_pool_flush_time", Name: "memory_pool_flush_time",
Default: fs.Duration(time.Minute), Default: memoryPoolFlushTime,
Advanced: true, Advanced: true,
Hide: fs.OptionHideBoth, Help: `How often internal memory buffer pools will be flushed.
Help: `How often internal memory buffer pools will be flushed. (no longer used)`,
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
}, { }, {
Name: "memory_pool_use_mmap", Name: "memory_pool_use_mmap",
Default: false, Default: memoryPoolUseMmap,
Advanced: true, Advanced: true,
Hide: fs.OptionHideBoth, Help: `Whether to use mmap buffers in internal memory pool.`,
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -432,6 +422,8 @@ type Options struct {
ArchiveTierDelete bool `config:"archive_tier_delete"` ArchiveTierDelete bool `config:"archive_tier_delete"`
UseEmulator bool `config:"use_emulator"` UseEmulator bool `config:"use_emulator"`
DisableCheckSum bool `config:"disable_checksum"` DisableCheckSum bool `config:"disable_checksum"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
PublicAccess string `config:"public_access"` PublicAccess string `config:"public_access"`
DirectoryMarkers bool `config:"directory_markers"` DirectoryMarkers bool `config:"directory_markers"`
@@ -455,6 +447,8 @@ type Fs struct {
cache *bucket.Cache // cache for container creation status cache *bucket.Cache // cache for container creation status
pacer *fs.Pacer // To pace and retry the API calls pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
poolSize int64 // size of pages in memory pool
publicAccess container.PublicAccessType // Container Public Access Level publicAccess container.PublicAccessType // Container Public Access Level
} }
@@ -467,7 +461,7 @@ type Object struct {
size int64 // Size of the object size int64 // Size of the object
mimeType string // Content-Type of the object mimeType string // Content-Type of the object
accessTier blob.AccessTier // Blob Access Tier accessTier blob.AccessTier // Blob Access Tier
meta map[string]string // blob metadata - take metadataMu when accessing meta map[string]string // blob metadata
} }
// ------------------------------------------------------------ // ------------------------------------------------------------
@@ -520,7 +514,6 @@ func (o *Object) split() (container, containerPath string) {
func validateAccessTier(tier string) bool { func validateAccessTier(tier string) bool {
return strings.EqualFold(tier, string(blob.AccessTierHot)) || return strings.EqualFold(tier, string(blob.AccessTierHot)) ||
strings.EqualFold(tier, string(blob.AccessTierCool)) || strings.EqualFold(tier, string(blob.AccessTierCool)) ||
strings.EqualFold(tier, string(blob.AccessTierCold)) ||
strings.EqualFold(tier, string(blob.AccessTierArchive)) strings.EqualFold(tier, string(blob.AccessTierArchive))
} }
@@ -650,8 +643,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.AccessTier == "" { if opt.AccessTier == "" {
opt.AccessTier = string(defaultAccessTier) opt.AccessTier = string(defaultAccessTier)
} else if !validateAccessTier(opt.AccessTier) { } else if !validateAccessTier(opt.AccessTier) {
return nil, fmt.Errorf("supported access tiers are %s, %s, %s and %s", return nil, fmt.Errorf("supported access tiers are %s, %s and %s",
string(blob.AccessTierHot), string(blob.AccessTierCool), string(blob.AccessTierCold), string(blob.AccessTierArchive)) string(blob.AccessTierHot), string(blob.AccessTierCool), string(blob.AccessTierArchive))
} }
if !validatePublicAccess((opt.PublicAccess)) { if !validatePublicAccess((opt.PublicAccess)) {
@@ -668,6 +661,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
uploadToken: pacer.NewTokenDispenser(ci.Transfers), uploadToken: pacer.NewTokenDispenser(ci.Transfers),
cache: bucket.NewCache(), cache: bucket.NewCache(),
cntSVCcache: make(map[string]*container.Client, 1), cntSVCcache: make(map[string]*container.Client, 1),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
ci.Transfers,
opt.MemoryPoolUseMmap,
),
poolSize: int64(opt.ChunkSize),
} }
f.publicAccess = container.PublicAccessType(opt.PublicAccess) f.publicAccess = container.PublicAccessType(opt.PublicAccess)
f.setRoot(root) f.setRoot(root)
@@ -962,9 +962,6 @@ func (f *Fs) getBlockBlobSVC(container, containerPath string) *blockblob.Client
// updateMetadataWithModTime adds the modTime passed in to o.meta. // updateMetadataWithModTime adds the modTime passed in to o.meta.
func (o *Object) updateMetadataWithModTime(modTime time.Time) { func (o *Object) updateMetadataWithModTime(modTime time.Time) {
metadataMu.Lock()
defer metadataMu.Unlock()
// Make sure o.meta is not nil // Make sure o.meta is not nil
if o.meta == nil { if o.meta == nil {
o.meta = make(map[string]string, 1) o.meta = make(map[string]string, 1)
@@ -1496,7 +1493,7 @@ func (f *Fs) deleteContainer(ctx context.Context, containerName string) error {
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(ctx context.Context, dir string) error {
container, directory := f.split(dir) container, directory := f.split(dir)
// Remove directory marker file // Remove directory marker file
if f.opt.DirectoryMarkers && container != "" && directory != "" { if f.opt.DirectoryMarkers && container != "" && dir != "" {
o := &Object{ o := &Object{
fs: f, fs: f,
remote: dir + "/", remote: dir + "/",
@@ -1530,10 +1527,7 @@ func (f *Fs) Hashes() hash.Set {
// Purge deletes all the files and directories including the old versions. // Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context, dir string) error { func (f *Fs) Purge(ctx context.Context, dir string) error {
container, directory := f.split(dir) container, directory := f.split(dir)
if container == "" { if container == "" || directory != "" {
return errors.New("can't purge from root")
}
if directory != "" {
// Delegate to caller if not root of a container // Delegate to caller if not root of a container
return fs.ErrorCantPurge return fs.ErrorCantPurge
} }
@@ -1590,6 +1584,19 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return f.NewObject(ctx, remote) return f.NewObject(ctx, remote)
} }
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
if size == int64(f.opt.ChunkSize) {
return f.pool
}
return pool.New(
time.Duration(f.opt.MemoryPoolFlushTime),
int(size),
f.ci.Transfers,
f.opt.MemoryPoolUseMmap,
)
}
// ------------------------------------------------------------ // ------------------------------------------------------------
// Fs returns the parent Fs // Fs returns the parent Fs
@@ -1633,9 +1640,6 @@ func (o *Object) Size() int64 {
// Set o.metadata from metadata // Set o.metadata from metadata
func (o *Object) setMetadata(metadata map[string]*string) { func (o *Object) setMetadata(metadata map[string]*string) {
metadataMu.Lock()
defer metadataMu.Unlock()
if len(metadata) > 0 { if len(metadata) > 0 {
// Lower case the metadata // Lower case the metadata
o.meta = make(map[string]string, len(metadata)) o.meta = make(map[string]string, len(metadata))
@@ -1660,9 +1664,6 @@ func (o *Object) setMetadata(metadata map[string]*string) {
// Get metadata from o.meta // Get metadata from o.meta
func (o *Object) getMetadata() (metadata map[string]*string) { func (o *Object) getMetadata() (metadata map[string]*string) {
metadataMu.Lock()
defer metadataMu.Unlock()
if len(o.meta) == 0 { if len(o.meta) == 0 {
return nil return nil
} }
@@ -1874,7 +1875,12 @@ func (o *Object) ModTime(ctx context.Context) (result time.Time) {
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
o.updateMetadataWithModTime(modTime) // Make sure o.meta is not nil
if o.meta == nil {
o.meta = make(map[string]string, 1)
}
// Set modTimeKey in it
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
blb := o.getBlobSVC() blb := o.getBlobSVC()
opt := blob.SetMetadataOptions{} opt := blob.SetMetadataOptions{}
@@ -1900,7 +1906,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var offset int64 var offset int64
var count int64 var count int64
if o.AccessTier() == blob.AccessTierArchive { if o.AccessTier() == blob.AccessTierArchive {
return nil, fmt.Errorf("blob in archive tier, you need to set tier to hot, cool, cold first") return nil, fmt.Errorf("blob in archive tier, you need to set tier to hot or cool first")
} }
fs.FixRangeOption(options, o.size) fs.FixRangeOption(options, o.size)
for _, option := range options { for _, option := range options {
@@ -1966,8 +1972,8 @@ func (rs *readSeekCloser) Close() error {
return nil return nil
} }
// increment the array as LSB binary // increment the slice passed in as LSB binary
func increment(xs *[8]byte) { func increment(xs []byte) {
for i, digit := range xs { for i, digit := range xs {
newDigit := digit + 1 newDigit := digit + 1
xs[i] = newDigit xs[i] = newDigit
@@ -1978,43 +1984,22 @@ func increment(xs *[8]byte) {
} }
} }
// record chunk number and id for Close var warnStreamUpload sync.Once
type azBlock struct {
chunkNumber int
id string
}
// Implements the fs.ChunkWriter interface // uploadMultipart uploads a file using multipart upload
type azChunkWriter struct {
chunkSize int64
size int64
f *Fs
ui uploadInfo
blocksMu sync.Mutex // protects the below
blocks []azBlock // list of blocks for finalize
binaryBlockID [8]byte // block counter as LSB first 8 bytes
o *Object
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
// //
// Pass in the remote and the src object // Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
// You can also use options to hint at the desired chunk size func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, blb *blockblob.Client, httpHeaders *blob.HTTPHeaders) (err error) {
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: remote,
}
ui, err := o.prepareUpload(ctx, src, options)
if err != nil {
return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
}
// Calculate correct partSize // Calculate correct partSize
partSize := f.opt.ChunkSize partSize := o.fs.opt.ChunkSize
totalParts := -1 totalParts := -1
size := src.Size()
// make concurrency machinery
concurrency := o.fs.opt.UploadConcurrency
if concurrency < 1 {
concurrency = 1
}
tokens := pacer.NewTokenDispenser(concurrency)
// Note that the max size of file is 4.75 TB (100 MB X 50,000 // Note that the max size of file is 4.75 TB (100 MB X 50,000
// blocks) and this is bigger than the max uncommitted block // blocks) and this is bigger than the max uncommitted block
@@ -2028,13 +2013,13 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
// 195GB which seems like a not too unreasonable limit. // 195GB which seems like a not too unreasonable limit.
if size == -1 { if size == -1 {
warnStreamUpload.Do(func() { warnStreamUpload.Do(func() {
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v", fs.Logf(o, "Streaming uploads using chunk size %v will have maximum file size of %v",
f.opt.ChunkSize, partSize*fs.SizeSuffix(blockblob.MaxBlocks)) o.fs.opt.ChunkSize, partSize*fs.SizeSuffix(blockblob.MaxBlocks))
}) })
} else { } else {
partSize = chunksize.Calculator(remote, size, blockblob.MaxBlocks, f.opt.ChunkSize) partSize = chunksize.Calculator(o, size, blockblob.MaxBlocks, o.fs.opt.ChunkSize)
if partSize > fs.SizeSuffix(blockblob.MaxStageBlockBytes) { if partSize > fs.SizeSuffix(blockblob.MaxStageBlockBytes) {
return info, nil, fmt.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), fs.SizeSuffix(blockblob.MaxBlocks), fs.SizeSuffix(blockblob.MaxStageBlockBytes)) return fmt.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), fs.SizeSuffix(blockblob.MaxBlocks), fs.SizeSuffix(blockblob.MaxStageBlockBytes))
} }
totalParts = int(fs.SizeSuffix(size) / partSize) totalParts = int(fs.SizeSuffix(size) / partSize)
if fs.SizeSuffix(size)%partSize != 0 { if fs.SizeSuffix(size)%partSize != 0 {
@@ -2044,264 +2029,173 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, partSize) fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, partSize)
chunkWriter := &azChunkWriter{ // unwrap the accounting from the input, we use wrap to put it
chunkSize: int64(partSize), // back on after the buffering
size: size, in, wrap := accounting.UnWrap(in)
f: f,
ui: ui,
o: o,
}
info = fs.ChunkWriterInfo{
ChunkSize: int64(partSize),
Concurrency: o.fs.opt.UploadConcurrency,
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
fs.Debugf(o, "open chunk writer: started multipart upload")
return info, chunkWriter, nil
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0 // FIXME it would be nice to delete uncommitted blocks
func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (int64, error) { // See: https://github.com/rclone/rclone/issues/5583
if chunkNumber < 0 { //
err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber) // However there doesn't seem to be an easy way of doing this other than
return -1, err // by deleting the target.
} //
// This means that a failed upload deletes the target which isn't ideal.
//
// Uploading a zero length blob and deleting it will remove the
// uncommitted blocks I think.
//
// Could check to see if a file exists already and if it
// doesn't then create a 0 length file and delete it to flush
// the uncommitted blocks.
//
// This is what azcopy does
// https://github.com/MicrosoftDocs/azure-docs/issues/36347#issuecomment-541457962
// defer atexit.OnError(&err, func() {
// fs.Debugf(o, "Cancelling multipart upload")
// // Code goes here!
// })()
// Upload the block, with MD5 for check // Upload the chunks
m := md5.New() var (
currentChunkSize, err := io.Copy(m, reader) g, gCtx = errgroup.WithContext(ctx)
if err != nil { remaining = fs.SizeSuffix(size) // remaining size in file for logging only, -1 if size < 0
return -1, err position = fs.SizeSuffix(0) // position in file
} memPool = o.fs.getMemoryPool(int64(partSize)) // pool to get memory from
// If no data read, don't write the chunk finished = false // set when we have read EOF
if currentChunkSize == 0 { blocks []string // list of blocks for finalize
return 0, nil binaryBlockID = make([]byte, 8) // block counter as LSB first 8 bytes
} )
md5sum := m.Sum(nil) for part := 0; !finished; part++ {
transactionalMD5 := md5sum[:] // Get a block of memory from the pool and a token which limits concurrency
tokens.Get()
buf := memPool.Get()
// increment the blockID and save the blocks for finalize free := func() {
increment(&w.binaryBlockID) memPool.Put(buf) // return the buf
blockID := base64.StdEncoding.EncodeToString(w.binaryBlockID[:]) tokens.Put() // return the token
// Save the blockID for the commit
w.blocksMu.Lock()
w.blocks = append(w.blocks, azBlock{
chunkNumber: chunkNumber,
id: blockID,
})
w.blocksMu.Unlock()
err = w.f.pacer.Call(func() (bool, error) {
// rewind the reader on retry and after reading md5
_, err = reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
} }
options := blockblob.StageBlockOptions{
// Specify the transactional md5 for the body, to be validated by the service. // Fail fast, in case an errgroup managed function returns an error
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5), // gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
free()
break
} }
_, err = w.ui.blb.StageBlock(ctx, blockID, &readSeekCloser{Reader: reader, Seeker: reader}, &options)
if err != nil { // Read the chunk
if chunkNumber <= 8 { n, err := readers.ReadFill(in, buf) // this can never return 0, nil
return w.f.shouldRetry(ctx, err) if err == io.EOF {
if n == 0 { // end if no data
free()
break
} }
// retry all chunks once have done the first few finished = true
return true, err } else if err != nil {
free()
return fmt.Errorf("multipart upload failed to read source: %w", err)
} }
return false, nil buf = buf[:n]
})
if err != nil { // increment the blockID and save the blocks for finalize
return -1, fmt.Errorf("failed to upload chunk %d with %v bytes: %w", chunkNumber+1, currentChunkSize, err) increment(binaryBlockID)
blockID := base64.StdEncoding.EncodeToString(binaryBlockID)
blocks = append(blocks, blockID)
// Transfer the chunk
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %d", part+1, totalParts, position, fs.SizeSuffix(size), len(buf))
g.Go(func() (err error) {
defer free()
// Upload the block, with MD5 for check
md5sum := md5.Sum(buf)
transactionalMD5 := md5sum[:]
err = o.fs.pacer.Call(func() (bool, error) {
bufferReader := bytes.NewReader(buf)
wrappedReader := wrap(bufferReader)
rs := readSeekCloser{wrappedReader, bufferReader}
options := blockblob.StageBlockOptions{
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5),
}
_, err = blb.StageBlock(ctx, blockID, &rs, &options)
return o.fs.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("multipart upload failed to upload part: %w", err)
}
return nil
})
// ready for next block
if size >= 0 {
remaining -= partSize
}
position += partSize
} }
err = g.Wait()
fs.Debugf(w.o, "multipart upload wrote chunk %d with %v bytes", chunkNumber+1, currentChunkSize) if err != nil {
return currentChunkSize, err return err
}
// Abort the multipart upload.
//
// FIXME it would be nice to delete uncommitted blocks.
//
// See: https://github.com/rclone/rclone/issues/5583
//
// However there doesn't seem to be an easy way of doing this other than
// by deleting the target.
//
// This means that a failed upload deletes the target which isn't ideal.
//
// Uploading a zero length blob and deleting it will remove the
// uncommitted blocks I think.
//
// Could check to see if a file exists already and if it doesn't then
// create a 0 length file and delete it to flush the uncommitted
// blocks.
//
// This is what azcopy does
// https://github.com/MicrosoftDocs/azure-docs/issues/36347#issuecomment-541457962
func (w *azChunkWriter) Abort(ctx context.Context) error {
fs.Debugf(w.o, "multipart upload aborted (did nothing - see issue #5583)")
return nil
}
// Close and finalise the multipart upload
func (w *azChunkWriter) Close(ctx context.Context) (err error) {
// sort the completed parts by part number
sort.Slice(w.blocks, func(i, j int) bool {
return w.blocks[i].chunkNumber < w.blocks[j].chunkNumber
})
// Create a list of block IDs
blockIDs := make([]string, len(w.blocks))
for i := range w.blocks {
blockIDs[i] = w.blocks[i].id
} }
options := blockblob.CommitBlockListOptions{ options := blockblob.CommitBlockListOptions{
Metadata: w.o.getMetadata(), Metadata: o.getMetadata(),
Tier: parseTier(w.f.opt.AccessTier), Tier: parseTier(o.fs.opt.AccessTier),
HTTPHeaders: &w.ui.httpHeaders, HTTPHeaders: httpHeaders,
} }
// Finalise the upload session // Finalise the upload session
err = w.f.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
_, err := w.ui.blb.CommitBlockList(ctx, blockIDs, &options) _, err := blb.CommitBlockList(ctx, blocks, &options)
return w.f.shouldRetry(ctx, err) return o.fs.shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return fmt.Errorf("failed to complete multipart upload: %w", err) return fmt.Errorf("multipart upload failed to finalize: %w", err)
} }
fs.Debugf(w.o, "multipart upload finished") return nil
return err
}
var warnStreamUpload sync.Once
// uploadMultipart uploads a file using multipart upload
//
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (ui uploadInfo, err error) {
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
Open: o.fs,
OpenOptions: options,
})
if err != nil {
return ui, err
}
return chunkWriter.(*azChunkWriter).ui, nil
} }
// uploadSinglepart uploads a short blob using a single part upload // uploadSinglepart uploads a short blob using a single part upload
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, ui uploadInfo) (err error) { func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, blb *blockblob.Client, httpHeaders *blob.HTTPHeaders) (err error) {
chunkSize := int64(o.fs.opt.ChunkSize)
// fs.Debugf(o, "Single part upload starting of object %d bytes", size) // fs.Debugf(o, "Single part upload starting of object %d bytes", size)
if size > chunkSize || size < 0 { if size > o.fs.poolSize || size < 0 {
return fmt.Errorf("internal error: single part upload size too big %d > %d", size, chunkSize) return fmt.Errorf("internal error: single part upload size too big %d > %d", size, o.fs.opt.ChunkSize)
} }
rw := multipart.NewRW() buf := o.fs.pool.Get()
defer fs.CheckClose(rw, &err) defer o.fs.pool.Put(buf)
n, err := io.CopyN(rw, in, size+1) n, err := readers.ReadFill(in, buf)
if err == nil {
// Check to see whether in is exactly len(buf) or bigger
var buf2 = []byte{0}
n2, err2 := readers.ReadFill(in, buf2)
if n2 != 0 || err2 != io.EOF {
return fmt.Errorf("single part upload read failed: object longer than expected (expecting %d but got > %d)", size, len(buf))
}
}
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return fmt.Errorf("single part upload read failed: %w", err) return fmt.Errorf("single part upload read failed: %w", err)
} }
if n != size { if int64(n) != size {
return fmt.Errorf("single part upload: expecting to read %d bytes but read %d", size, n) return fmt.Errorf("single part upload: expecting to read %d bytes but read %d", size, n)
} }
rs := &readSeekCloser{Reader: rw, Seeker: rw} b := bytes.NewReader(buf[:n])
rs := &readSeekCloser{Reader: b, Seeker: b}
options := blockblob.UploadOptions{ options := blockblob.UploadOptions{
Metadata: o.getMetadata(), Metadata: o.getMetadata(),
Tier: parseTier(o.fs.opt.AccessTier), Tier: parseTier(o.fs.opt.AccessTier),
HTTPHeaders: &ui.httpHeaders, HTTPHeaders: httpHeaders,
} }
return o.fs.pacer.Call(func() (bool, error) { // Don't retry, return a retry error instead
// rewind the reader on retry return o.fs.pacer.CallNoRetry(func() (bool, error) {
_, err = rs.Seek(0, io.SeekStart) _, err = blb.Upload(ctx, rs, &options)
if err != nil {
return false, err
}
_, err = ui.blb.Upload(ctx, rs, &options)
return o.fs.shouldRetry(ctx, err) return o.fs.shouldRetry(ctx, err)
}) })
} }
// Info needed for an upload
type uploadInfo struct {
blb *blockblob.Client
httpHeaders blob.HTTPHeaders
isDirMarker bool
}
// Prepare the object for upload
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
container, containerPath := o.split()
if container == "" || containerPath == "" {
return ui, fmt.Errorf("can't upload to root - need a container")
}
// Create parent dir/bucket if not saving directory marker
metadataMu.Lock()
_, ui.isDirMarker = o.meta[dirMetaKey]
metadataMu.Unlock()
if !ui.isDirMarker {
err = o.fs.mkdirParent(ctx, o.remote)
if err != nil {
return ui, err
}
}
// Update Mod time
o.updateMetadataWithModTime(src.ModTime(ctx))
if err != nil {
return ui, err
}
// Create the HTTP headers for the upload
ui.httpHeaders = blob.HTTPHeaders{
BlobContentType: pString(fs.MimeType(ctx, src)),
}
// Compute the Content-MD5 of the file. As we stream all uploads it
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
if !o.fs.opt.DisableCheckSum {
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil {
ui.httpHeaders.BlobContentMD5 = sourceMD5bytes
} else {
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
}
}
}
// Apply upload options (also allows one to overwrite content-type)
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
ui.httpHeaders.BlobCacheControl = pString(value)
case "content-disposition":
ui.httpHeaders.BlobContentDisposition = pString(value)
case "content-encoding":
ui.httpHeaders.BlobContentEncoding = pString(value)
case "content-language":
ui.httpHeaders.BlobContentLanguage = pString(value)
case "content-type":
ui.httpHeaders.BlobContentType = pString(value)
}
}
ui.blb = o.fs.getBlockBlobSVC(container, containerPath)
return ui, nil
}
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
@@ -2317,26 +2211,80 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errCantUpdateArchiveTierBlobs return errCantUpdateArchiveTierBlobs
} }
} }
container, containerPath := o.split()
size := src.Size() if container == "" || containerPath == "" {
multipartUpload := size < 0 || size > int64(o.fs.opt.ChunkSize) return fmt.Errorf("can't upload to root - need a container")
var ui uploadInfo }
// Create parent dir/bucket if not saving directory marker
if multipartUpload { _, isDirMarker := o.meta[dirMetaKey]
ui, err = o.uploadMultipart(ctx, in, src, options...) if !isDirMarker {
} else { err = o.fs.mkdirParent(ctx, o.remote)
ui, err = o.prepareUpload(ctx, src, options)
if err != nil { if err != nil {
return fmt.Errorf("failed to prepare upload: %w", err) return err
} }
err = o.uploadSinglepart(ctx, in, size, ui) }
// Update Mod time
fs.Debugf(nil, "o.meta = %+v", o.meta)
o.updateMetadataWithModTime(src.ModTime(ctx))
if err != nil {
return err
}
// Create the HTTP headers for the upload
httpHeaders := blob.HTTPHeaders{
BlobContentType: pString(fs.MimeType(ctx, src)),
}
// Compute the Content-MD5 of the file. As we stream all uploads it
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
if !o.fs.opt.DisableCheckSum {
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil {
httpHeaders.BlobContentMD5 = sourceMD5bytes
} else {
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
}
}
}
// Apply upload options (also allows one to overwrite content-type)
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
httpHeaders.BlobCacheControl = pString(value)
case "content-disposition":
httpHeaders.BlobContentDisposition = pString(value)
case "content-encoding":
httpHeaders.BlobContentEncoding = pString(value)
case "content-language":
httpHeaders.BlobContentLanguage = pString(value)
case "content-type":
httpHeaders.BlobContentType = pString(value)
}
}
blb := o.fs.getBlockBlobSVC(container, containerPath)
size := src.Size()
multipartUpload := size < 0 || size > o.fs.poolSize
fs.Debugf(nil, "o.meta = %+v", o.meta)
if multipartUpload {
err = o.uploadMultipart(ctx, in, size, blb, &httpHeaders)
} else {
err = o.uploadSinglepart(ctx, in, size, blb, &httpHeaders)
} }
if err != nil { if err != nil {
return err return err
} }
// Refresh metadata on object // Refresh metadata on object
if !ui.isDirMarker { if !isDirMarker {
o.clearMetaData() o.clearMetaData()
err = o.readMetaData(ctx) err = o.readMetaData(ctx)
if err != nil { if err != nil {
@@ -2425,14 +2373,13 @@ func parseTier(tier string) *blob.AccessTier {
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = &Fs{} _ fs.Fs = &Fs{}
_ fs.Copier = &Fs{} _ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{} _ fs.PutStreamer = &Fs{}
_ fs.Purger = &Fs{} _ fs.Purger = &Fs{}
_ fs.ListRer = &Fs{} _ fs.ListRer = &Fs{}
_ fs.OpenChunkWriter = &Fs{} _ fs.Object = &Object{}
_ fs.Object = &Object{} _ fs.MimeTyper = &Object{}
_ fs.MimeTyper = &Object{} _ fs.GetTierer = &Object{}
_ fs.GetTierer = &Object{} _ fs.SetTierer = &Object{}
_ fs.SetTierer = &Object{}
) )

View File

@@ -1,5 +1,5 @@
//go:build !plan9 && !solaris && !js //go:build !plan9 && !solaris && !js && go1.18
// +build !plan9,!solaris,!js // +build !plan9,!solaris,!js,go1.18
package azureblob package azureblob
@@ -20,18 +20,17 @@ func (f *Fs) InternalTest(t *testing.T) {
func TestIncrement(t *testing.T) { func TestIncrement(t *testing.T) {
for _, test := range []struct { for _, test := range []struct {
in [8]byte in []byte
want [8]byte want []byte
}{ }{
{[8]byte{0, 0, 0, 0}, [8]byte{1, 0, 0, 0}}, {[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
{[8]byte{0xFE, 0, 0, 0}, [8]byte{0xFF, 0, 0, 0}}, {[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
{[8]byte{0xFF, 0, 0, 0}, [8]byte{0, 1, 0, 0}}, {[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
{[8]byte{0, 1, 0, 0}, [8]byte{1, 1, 0, 0}}, {[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
{[8]byte{0xFF, 0xFF, 0xFF, 0xFE}, [8]byte{0, 0, 0, 0xFF}}, {[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 1}}, {[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 0, 0, 0}},
} { } {
increment(&test.in) increment(test.in)
assert.Equal(t, test.want, test.in) assert.Equal(t, test.want, test.in)
} }
} }

View File

@@ -1,7 +1,7 @@
// Test AzureBlob filesystem interface // Test AzureBlob filesystem interface
//go:build !plan9 && !solaris && !js //go:build !plan9 && !solaris && !js && go1.18
// +build !plan9,!solaris,!js // +build !plan9,!solaris,!js,go1.18
package azureblob package azureblob
@@ -19,7 +19,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:", RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil), NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool", "Cold"}, TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{ ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize, MinChunkSize: defaultChunkSize,
}, },
@@ -31,11 +31,11 @@ func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" { if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set") t.Skip("Skipping as -remote set")
} }
name := "TestAzureBlob" name := "TestAzureBlob:"
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: name + ":", RemoteName: name,
NilObject: (*Object)(nil), NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool", "Cold"}, TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{ ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize, MinChunkSize: defaultChunkSize,
}, },
@@ -62,7 +62,6 @@ func TestValidateAccessTier(t *testing.T) {
"HOT": {"HOT", true}, "HOT": {"HOT", true},
"Hot": {"Hot", true}, "Hot": {"Hot", true},
"cool": {"cool", true}, "cool": {"cool", true},
"cold": {"cold", true},
"archive": {"archive", true}, "archive": {"archive", true},
"empty": {"", false}, "empty": {"", false},
"unknown": {"unknown", false}, "unknown": {"unknown", false},

View File

@@ -1,7 +1,7 @@
// Build for azureblob for unsupported platforms to stop go complaining // Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files " // about "no buildable Go source files "
//go:build plan9 || solaris || js //go:build plan9 || solaris || js || !go1.18
// +build plan9 solaris js // +build plan9 solaris js !go1.18
package azureblob package azureblob

File diff suppressed because it is too large Load Diff

View File

@@ -1,70 +0,0 @@
//go:build !plan9 && !js
// +build !plan9,!js
package azurefiles
import (
"context"
"math/rand"
"strings"
"testing"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
)
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Authentication", f.InternalTestAuth)
}
var _ fstests.InternalTester = (*Fs)(nil)
func (f *Fs) InternalTestAuth(t *testing.T) {
t.Skip("skipping since this requires authentication credentials which are not part of repo")
shareName := "test-rclone-oct-2023"
testCases := []struct {
name string
options *Options
}{
{
name: "ConnectionString",
options: &Options{
ShareName: shareName,
ConnectionString: "",
},
},
{
name: "AccountAndKey",
options: &Options{
ShareName: shareName,
Account: "",
Key: "",
}},
{
name: "SASUrl",
options: &Options{
ShareName: shareName,
SASURL: "",
}},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
fs, err := newFsFromOptions(context.TODO(), "TestAzureFiles", "", tc.options)
assert.NoError(t, err)
dirName := randomString(10)
assert.NoError(t, fs.Mkdir(context.TODO(), dirName))
})
}
}
const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX"
func randomString(charCount int) string {
strBldr := strings.Builder{}
for i := 0; i < charCount; i++ {
randPos := rand.Int63n(52)
strBldr.WriteByte(chars[randPos])
}
return strBldr.String()
}

View File

@@ -1,18 +0,0 @@
//go:build !plan9 && !js
// +build !plan9,!js
package azurefiles
import (
"testing"
"github.com/rclone/rclone/fstest/fstests"
)
func TestIntegration(t *testing.T) {
var objPtr *Object
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureFiles:",
NilObject: objPtr,
})
}

View File

@@ -1,7 +0,0 @@
// Build for azurefiles for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || js
// +build plan9 js
package azurefiles

View File

@@ -33,18 +33,10 @@ var _ fserrors.Fataler = (*Error)(nil)
// Bucket describes a B2 bucket // Bucket describes a B2 bucket
type Bucket struct { type Bucket struct {
ID string `json:"bucketId"` ID string `json:"bucketId"`
AccountID string `json:"accountId"` AccountID string `json:"accountId"`
Name string `json:"bucketName"` Name string `json:"bucketName"`
Type string `json:"bucketType"` Type string `json:"bucketType"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
}
// LifecycleRule is a single lifecycle rule
type LifecycleRule struct {
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
FileNamePrefix string `json:"fileNamePrefix"`
} }
// Timestamp is a UTC time when this file was uploaded. It is a base // Timestamp is a UTC time when this file was uploaded. It is a base
@@ -214,10 +206,9 @@ type FileInfo struct {
// CreateBucketRequest is used to create a bucket // CreateBucketRequest is used to create a bucket
type CreateBucketRequest struct { type CreateBucketRequest struct {
AccountID string `json:"accountId"` AccountID string `json:"accountId"`
Name string `json:"bucketName"` Name string `json:"bucketName"`
Type string `json:"bucketType"` Type string `json:"bucketType"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
} }
// DeleteBucketRequest is used to create a bucket // DeleteBucketRequest is used to create a bucket
@@ -340,11 +331,3 @@ type CopyPartRequest struct {
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1) PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied. Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
} }
// UpdateBucketRequest describes a request to modify a B2 bucket
type UpdateBucketRequest struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Type string `json:"bucketType,omitempty"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
}

View File

@@ -9,7 +9,6 @@ import (
"bytes" "bytes"
"context" "context"
"crypto/sha1" "crypto/sha1"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
gohash "hash" gohash "hash"
@@ -33,7 +32,6 @@ import (
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool" "github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
@@ -59,7 +57,9 @@ const (
minChunkSize = 5 * fs.Mebi minChunkSize = 5 * fs.Mebi
defaultChunkSize = 96 * fs.Mebi defaultChunkSize = 96 * fs.Mebi
defaultUploadCutoff = 200 * fs.Mebi defaultUploadCutoff = 200 * fs.Mebi
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
) )
// Globals // Globals
@@ -74,17 +74,14 @@ func init() {
Name: "b2", Name: "b2",
Description: "Backblaze B2", Description: "Backblaze B2",
NewFs: NewFs, NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "account", Name: "account",
Help: "Account ID or Application Key ID.", Help: "Account ID or Application Key ID.",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "key", Name: "key",
Help: "Application Key.", Help: "Application Key.",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.", Help: "Endpoint for the service.\n\nLeave blank normally.",
@@ -150,18 +147,6 @@ might a maximum of "--transfers" chunks in progress at once.
5,000,000 Bytes is the minimum size.`, 5,000,000 Bytes is the minimum size.`,
Default: defaultChunkSize, Default: defaultChunkSize,
Advanced: true, Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
Note that chunks are stored in memory and there may be up to
"--transfers" * "--b2-upload-concurrency" chunks stored at once
in memory.`,
Default: 4,
Advanced: true,
}, { }, {
Name: "disable_checksum", Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files. Help: `Disable checksums for large (> upload cutoff) files.
@@ -201,41 +186,16 @@ The minimum value is 1 second. The maximum value is one week.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "memory_pool_flush_time", Name: "memory_pool_flush_time",
Default: fs.Duration(time.Minute), Default: memoryPoolFlushTime,
Advanced: true, Advanced: true,
Hide: fs.OptionHideBoth, Help: `How often internal memory buffer pools will be flushed.
Help: `How often internal memory buffer pools will be flushed. (no longer used)`, Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
}, { }, {
Name: "memory_pool_use_mmap", Name: "memory_pool_use_mmap",
Default: false, Default: memoryPoolUseMmap,
Advanced: true,
Hide: fs.OptionHideBoth,
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
}, {
Name: "lifecycle",
Help: `Set the number of days deleted files should be kept when creating a bucket.
On bucket creation, this parameter is used to create a lifecycle rule
for the entire bucket.
If lifecycle is 0 (the default) it does not create a lifecycle rule so
the default B2 behaviour applies. This is to create versions of files
on delete and overwrite and to keep them indefinitely.
If lifecycle is >0 then it creates a single rule setting the number of
days before a file that is deleted or overwritten is deleted
permanently. This is known as daysFromHidingToDeleting in the b2 docs.
The minimum value for this parameter is 1 day.
You can also enable hard_delete in the config also which will mean
deletions won't cause versions but overwrites will still cause
versions to be made.
See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket creation.
`,
Default: 0,
Advanced: true, Advanced: true,
Help: `Whether to use mmap buffers in internal memory pool.`,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -262,11 +222,11 @@ type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"` CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
DisableCheckSum bool `config:"disable_checksum"` DisableCheckSum bool `config:"disable_checksum"`
DownloadURL string `config:"download_url"` DownloadURL string `config:"download_url"`
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"` DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
Lifecycle int `config:"lifecycle"` MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
} }
@@ -291,6 +251,7 @@ type Fs struct {
authMu sync.Mutex // lock for authorizing the account authMu sync.Mutex // lock for authorizing the account
pacer *fs.Pacer // To pace and retry the API calls pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
} }
// Object describes a b2 object // Object describes a b2 object
@@ -400,18 +361,11 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
// errorHandler parses a non 2xx error response into an error // errorHandler parses a non 2xx error response into an error
func errorHandler(resp *http.Response) error { func errorHandler(resp *http.Response) error {
body, err := rest.ReadBody(resp) // Decode error response
if err != nil {
fs.Errorf(nil, "Couldn't read error out of body: %v", err)
body = nil
}
// Decode error response if there was one - they can be blank
errResponse := new(api.Error) errResponse := new(api.Error)
if len(body) > 0 { err := rest.DecodeJSON(resp, &errResponse)
err = json.Unmarshal(body, errResponse) if err != nil {
if err != nil { fs.Debugf(nil, "Couldn't decode error response: %v", err)
fs.Errorf(nil, "Couldn't decode error response: %v", err)
}
} }
if errResponse.Code == "" { if errResponse.Code == "" {
errResponse.Code = "unknown" errResponse.Code = "unknown"
@@ -455,14 +409,6 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
return return
} }
func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs
}
return
}
// setRoot changes the root of the Fs // setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) { func (f *Fs) setRoot(root string) {
f.root = parsePath(root) f.root = parsePath(root)
@@ -510,14 +456,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
uploads: make(map[string][]*api.GetUploadURLResponse), uploads: make(map[string][]*api.GetUploadURLResponse),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers), uploadToken: pacer.NewTokenDispenser(ci.Transfers),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
ci.Transfers,
opt.MemoryPoolUseMmap,
),
} }
f.setRoot(root) f.setRoot(root)
f.features = (&fs.Features{ f.features = (&fs.Features{
ReadMimeType: true, ReadMimeType: true,
WriteMimeType: true, WriteMimeType: true,
BucketBased: true, BucketBased: true,
BucketBasedRootOK: true, BucketBasedRootOK: true,
ChunkWriterDoesntSeek: true,
}).Fill(ctx, f) }).Fill(ctx, f)
// Set the test flag if required // Set the test flag if required
if opt.TestMode != "" { if opt.TestMode != "" {
@@ -644,24 +595,23 @@ func (f *Fs) clearUploadURL(bucketID string) {
f.uploadMu.Unlock() f.uploadMu.Unlock()
} }
// getRW gets a RW buffer and an upload token // getBuf gets a buffer of f.opt.ChunkSize and an upload token
// //
// If noBuf is set then it just gets an upload token // If noBuf is set then it just gets an upload token
func (f *Fs) getRW(noBuf bool) (rw *pool.RW) { func (f *Fs) getBuf(noBuf bool) (buf []byte) {
f.uploadToken.Get() f.uploadToken.Get()
if !noBuf { if !noBuf {
rw = multipart.NewRW() buf = f.pool.Get()
} }
return rw return buf
} }
// putRW returns a RW buffer to the memory pool and returns an upload // putBuf returns a buffer to the memory pool and an upload token
// token
// //
// If buf is nil then it just returns the upload token // If noBuf is set then it just returns the upload token
func (f *Fs) putRW(rw *pool.RW) { func (f *Fs) putBuf(buf []byte, noBuf bool) {
if rw != nil { if !noBuf {
_ = rw.Close() f.pool.Put(buf)
} }
f.uploadToken.Put() f.uploadToken.Put()
} }
@@ -868,7 +818,7 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
// listBuckets returns all the buckets to out // listBuckets returns all the buckets to out
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) { func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
err = f.listBucketsToFn(ctx, "", func(bucket *api.Bucket) error { err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
d := fs.NewDir(bucket.Name, time.Time{}) d := fs.NewDir(bucket.Name, time.Time{})
entries = append(entries, d) entries = append(entries, d)
return nil return nil
@@ -961,14 +911,11 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
type listBucketFn func(*api.Bucket) error type listBucketFn func(*api.Bucket) error
// listBucketsToFn lists the buckets to the function supplied // listBucketsToFn lists the buckets to the function supplied
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error { func (f *Fs) listBucketsToFn(ctx context.Context, fn listBucketFn) error {
var account = api.ListBucketsRequest{ var account = api.ListBucketsRequest{
AccountID: f.info.AccountID, AccountID: f.info.AccountID,
BucketID: f.info.Allowed.BucketID, BucketID: f.info.Allowed.BucketID,
} }
if bucketName != "" && account.BucketID == "" {
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
}
var response api.ListBucketsResponse var response api.ListBucketsResponse
opts := rest.Opts{ opts := rest.Opts{
@@ -1014,7 +961,7 @@ func (f *Fs) getbucketType(ctx context.Context, bucket string) (bucketType strin
if bucketType != "" { if bucketType != "" {
return bucketType, nil return bucketType, nil
} }
err = f.listBucketsToFn(ctx, bucket, func(bucket *api.Bucket) error { err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
// listBucketsToFn reads bucket Types // listBucketsToFn reads bucket Types
return nil return nil
}) })
@@ -1049,7 +996,7 @@ func (f *Fs) getBucketID(ctx context.Context, bucket string) (bucketID string, e
if bucketID != "" { if bucketID != "" {
return bucketID, nil return bucketID, nil
} }
err = f.listBucketsToFn(ctx, bucket, func(bucket *api.Bucket) error { err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
// listBucketsToFn sets IDs // listBucketsToFn sets IDs
return nil return nil
}) })
@@ -1113,11 +1060,6 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
Name: f.opt.Enc.FromStandardName(bucket), Name: f.opt.Enc.FromStandardName(bucket),
Type: "allPrivate", Type: "allPrivate",
} }
if f.opt.Lifecycle > 0 {
request.LifecycleRules = []api.LifecycleRule{{
DaysFromHidingToDeleting: &f.opt.Lifecycle,
}}
}
var response api.Bucket var response api.Bucket
err := f.pacer.Call(func() (bool, error) { err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response) resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
@@ -1338,7 +1280,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
// If newInfo is nil then the metadata will be copied otherwise it // If newInfo is nil then the metadata will be copied otherwise it
// will be replaced with newInfo // will be replaced with newInfo
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *api.File) (err error) { func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *api.File) (err error) {
if srcObj.size > int64(f.opt.CopyCutoff) { if srcObj.size >= int64(f.opt.CopyCutoff) {
if newInfo == nil { if newInfo == nil {
newInfo, err = srcObj.getMetaData(ctx) newInfo, err = srcObj.getMetaData(ctx)
if err != nil { if err != nil {
@@ -1349,11 +1291,7 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
if err != nil { if err != nil {
return err return err
} }
err = up.Copy(ctx) return up.Upload(ctx)
if err != nil {
return err
}
return dstObj.decodeMetaDataFileInfo(up.info)
} }
dstBucket, dstPath := dstObj.split() dstBucket, dstPath := dstObj.split()
@@ -1482,7 +1420,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
if err != nil { if err != nil {
return "", err return "", err
} }
absPath := "/" + urlEncode(bucketPath) absPath := "/" + bucketPath
link = RootURL + "/file/" + urlEncode(bucket) + absPath link = RootURL + "/file/" + urlEncode(bucket) + absPath
bucketType, err := f.getbucketType(ctx, bucket) bucketType, err := f.getbucketType(ctx, bucket)
if err != nil { if err != nil {
@@ -1921,11 +1859,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil { if err != nil {
return err return err
} }
if size < 0 { if size == -1 {
// Check if the file is large enough for a chunked upload (needs to be at least two chunks) // Check if the file is large enough for a chunked upload (needs to be at least two chunks)
rw := o.fs.getRW(false) buf := o.fs.getBuf(false)
n, err := io.CopyN(rw, in, int64(o.fs.opt.ChunkSize)) n, err := io.ReadFull(in, buf)
if err == nil { if err == nil {
bufReader := bufio.NewReader(in) bufReader := bufio.NewReader(in)
in = bufReader in = bufReader
@@ -1936,34 +1874,26 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "File is big enough for chunked streaming") fs.Debugf(o, "File is big enough for chunked streaming")
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil) up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
if err != nil { if err != nil {
o.fs.putRW(rw) o.fs.putBuf(buf, false)
return err return err
} }
// NB Stream returns the buffer and token // NB Stream returns the buffer and token
err = up.Stream(ctx, rw) return up.Stream(ctx, buf)
if err != nil { } else if err == io.EOF || err == io.ErrUnexpectedEOF {
return err
}
return o.decodeMetaDataFileInfo(up.info)
} else if err == io.EOF {
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n) fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
defer o.fs.putRW(rw) defer o.fs.putBuf(buf, false)
size = n size = int64(n)
in = rw in = bytes.NewReader(buf[:n])
} else { } else {
o.fs.putRW(rw) o.fs.putBuf(buf, false)
return err return err
} }
} else if size > int64(o.fs.opt.UploadCutoff) { } else if size > int64(o.fs.opt.UploadCutoff) {
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{ up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
Open: o.fs,
OpenOptions: options,
})
if err != nil { if err != nil {
return err return err
} }
up := chunkWriter.(*largeUpload) return up.Upload(ctx)
return o.decodeMetaDataFileInfo(up.info)
} }
modTime := src.ModTime(ctx) modTime := src.ModTime(ctx)
@@ -2071,41 +2001,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.decodeMetaDataFileInfo(&response) return o.decodeMetaDataFileInfo(&response)
} }
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
// FIXME what if file is smaller than 1 chunk?
if f.opt.Versions {
return info, nil, errNotWithVersions
}
if f.opt.VersionAt.IsSet() {
return info, nil, errNotWithVersionAt
}
//size := src.Size()
// Temporary Object under construction
o := &Object{
fs: f,
remote: remote,
}
bucket, _ := o.split()
err = f.makeBucket(ctx, bucket)
if err != nil {
return info, nil, err
}
info = fs.ChunkWriterInfo{
ChunkSize: int64(f.opt.ChunkSize),
Concurrency: o.fs.opt.UploadConcurrency,
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil)
return info, up, err
}
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove(ctx context.Context) error {
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
@@ -2131,149 +2026,16 @@ func (o *Object) ID() string {
return o.id return o.id
} }
var lifecycleHelp = fs.CommandHelp{
Name: "lifecycle",
Short: "Read or set the lifecycle for a bucket",
Long: `This command can be used to read or set the lifecycle for a bucket.
Usage Examples:
To show the current lifecycle rules:
rclone backend lifecycle b2:bucket
This will dump something like this showing the lifecycle rules.
[
{
"daysFromHidingToDeleting": 1,
"daysFromUploadingToHiding": null,
"fileNamePrefix": ""
}
]
If there are no lifecycle rules (the default) then it will just return [].
To reset the current lifecycle rules:
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
This will run and then print the new lifecycle rules as above.
Rclone only lets you set lifecycles for the whole bucket with the
fileNamePrefix = "".
You can't disable versioning with B2. The best you can do is to set
the daysFromHidingToDeleting to 1 day. You can enable hard_delete in
the config also which will mean deletions won't cause versions but
overwrites will still cause versions to be made.
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
`,
Opts: map[string]string{
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
},
}
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
var newRule api.LifecycleRule
if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" {
days, err := strconv.Atoi(daysStr)
if err != nil {
return nil, fmt.Errorf("bad daysFromHidingToDeleting: %w", err)
}
newRule.DaysFromHidingToDeleting = &days
}
if daysStr := opt["daysFromUploadingToHiding"]; daysStr != "" {
days, err := strconv.Atoi(daysStr)
if err != nil {
return nil, fmt.Errorf("bad daysFromUploadingToHiding: %w", err)
}
newRule.DaysFromUploadingToHiding = &days
}
bucketName, _ := f.split("")
if bucketName == "" {
return nil, errors.New("bucket required")
}
var bucket *api.Bucket
if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil {
bucketID, err := f.getBucketID(ctx, bucketName)
if err != nil {
return nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_update_bucket",
}
var request = api.UpdateBucketRequest{
ID: bucketID,
AccountID: f.info.AccountID,
LifecycleRules: []api.LifecycleRule{newRule},
}
var response api.Bucket
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
bucket = &response
} else {
err = f.listBucketsToFn(ctx, bucketName, func(b *api.Bucket) error {
bucket = b
return nil
})
if err != nil {
return nil, err
}
}
if bucket == nil {
return nil, fs.ErrorDirNotFound
}
return bucket.LifecycleRules, nil
}
var commandHelp = []fs.CommandHelp{
lifecycleHelp,
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "lifecycle":
return f.lifecycleCommand(ctx, name, arg, opt)
default:
return nil, fs.ErrorCommandNotFound
}
}
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = &Fs{} _ fs.Fs = &Fs{}
_ fs.Purger = &Fs{} _ fs.Purger = &Fs{}
_ fs.Copier = &Fs{} _ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{} _ fs.PutStreamer = &Fs{}
_ fs.CleanUpper = &Fs{} _ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{} _ fs.ListRer = &Fs{}
_ fs.PublicLinker = &Fs{} _ fs.PublicLinker = &Fs{}
_ fs.OpenChunkWriter = &Fs{} _ fs.Object = &Object{}
_ fs.Commander = &Fs{} _ fs.MimeTyper = &Object{}
_ fs.Object = &Object{} _ fs.IDer = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}
) )

View File

@@ -5,7 +5,6 @@ import (
"time" "time"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
) )
// Test b2 string encoding // Test b2 string encoding
@@ -169,10 +168,3 @@ func TestParseTimeString(t *testing.T) {
} }
} }
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
func (f *Fs) InternalTest(t *testing.T) {
// Internal tests go here
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -28,12 +28,7 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs) return f.setUploadCutoff(cs)
} }
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setCopyCutoff(cs)
}
var ( var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil) _ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil) _ fstests.SetUploadCutoffer = (*Fs)(nil)
_ fstests.SetCopyCutoffer = (*Fs)(nil)
) )

View File

@@ -5,6 +5,7 @@
package b2 package b2
import ( import (
"bytes"
"context" "context"
"crypto/sha1" "crypto/sha1"
"encoding/hex" "encoding/hex"
@@ -13,6 +14,7 @@ import (
"io" "io"
"strings" "strings"
"sync" "sync"
"time"
"github.com/rclone/rclone/backend/b2/api" "github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
@@ -78,14 +80,12 @@ type largeUpload struct {
wrap accounting.WrapFn // account parts being transferred wrap accounting.WrapFn // account parts being transferred
id string // ID of the file being uploaded id string // ID of the file being uploaded
size int64 // total size size int64 // total size
parts int // calculated number of parts, if known parts int64 // calculated number of parts, if known
sha1smu sync.Mutex // mutex to protect sha1s
sha1s []string // slice of SHA1s for each part sha1s []string // slice of SHA1s for each part
uploadMu sync.Mutex // lock for upload variable uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
chunkSize int64 // chunk size to use chunkSize int64 // chunk size to use
src *Object // if copying, object we are reading from src *Object // if copying, object we are reading from
info *api.FileInfo // final response with info about the object
} }
// newLargeUpload starts an upload of object o from in with metadata in src // newLargeUpload starts an upload of object o from in with metadata in src
@@ -93,16 +93,18 @@ type largeUpload struct {
// If newInfo is set then metadata from that will be used instead of reading it from src // If newInfo is set then metadata from that will be used instead of reading it from src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) { func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
size := src.Size() size := src.Size()
parts := 0 parts := int64(0)
sha1SliceSize := int64(maxParts)
chunkSize := defaultChunkSize chunkSize := defaultChunkSize
if size == -1 { if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize) fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else { } else {
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize) chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
parts = int(size / int64(chunkSize)) parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 { if size%int64(chunkSize) != 0 {
parts++ parts++
} }
sha1SliceSize = parts
} }
opts := rest.Opts{ opts := rest.Opts{
@@ -150,7 +152,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
id: response.ID, id: response.ID,
size: size, size: size,
parts: parts, parts: parts,
sha1s: make([]string, 0, 16), sha1s: make([]string, sha1SliceSize),
chunkSize: int64(chunkSize), chunkSize: int64(chunkSize),
} }
// unwrap the accounting from the input, we use wrap to put it // unwrap the accounting from the input, we use wrap to put it
@@ -169,26 +171,24 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
// This should be returned with returnUploadURL when finished // This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) { func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock() up.uploadMu.Lock()
if len(up.uploads) > 0 { defer up.uploadMu.Unlock()
if len(up.uploads) == 0 {
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
} else {
upload, up.uploads = up.uploads[0], up.uploads[1:] upload, up.uploads = up.uploads[0], up.uploads[1:]
up.uploadMu.Unlock()
return upload, nil
}
up.uploadMu.Unlock()
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err = up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get upload URL: %w", err)
} }
return upload, nil return upload, nil
} }
@@ -203,39 +203,10 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
up.uploadMu.Unlock() up.uploadMu.Unlock()
} }
// Add an sha1 to the being built up sha1s // Transfer a chunk
func (up *largeUpload) addSha1(chunkNumber int, sha1 string) { func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
up.sha1smu.Lock() err := up.f.pacer.Call(func() (bool, error) {
defer up.sha1smu.Unlock() fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
if len(up.sha1s) < chunkNumber+1 {
up.sha1s = append(up.sha1s, make([]string, chunkNumber+1-len(up.sha1s))...)
}
up.sha1s[chunkNumber] = sha1
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) {
// Only account after the checksum reads have been done
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
do.DelayAccounting(1)
}
err = up.f.pacer.Call(func() (bool, error) {
// Discover the size by seeking to the end
size, err = reader.Seek(0, io.SeekEnd)
if err != nil {
return false, err
}
// rewind the reader on retry and after reading size
_, err = reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
fs.Debugf(up.o, "Sending chunk %d length %d", chunkNumber, size)
// Get upload URL // Get upload URL
upload, err := up.getUploadURL(ctx) upload, err := up.getUploadURL(ctx)
@@ -243,8 +214,8 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
return false, err return false, err
} }
in := newHashAppendingReader(reader, sha1.New()) in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
sizeWithHash := size + int64(in.AdditionalLength()) size := int64(len(body)) + int64(in.AdditionalLength())
// Authorization // Authorization
// //
@@ -274,10 +245,10 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
Body: up.wrap(in), Body: up.wrap(in),
ExtraHeaders: map[string]string{ ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken, "Authorization": upload.AuthorizationToken,
"X-Bz-Part-Number": fmt.Sprintf("%d", chunkNumber+1), "X-Bz-Part-Number": fmt.Sprintf("%d", part),
sha1Header: "hex_digits_at_end", sha1Header: "hex_digits_at_end",
}, },
ContentLength: &sizeWithHash, ContentLength: &size,
} }
var response api.UploadPartResponse var response api.UploadPartResponse
@@ -285,7 +256,7 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response) resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
retry, err := up.f.shouldRetry(ctx, resp, err) retry, err := up.f.shouldRetry(ctx, resp, err)
if err != nil { if err != nil {
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err) fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
} }
// On retryable error clear PartUploadURL // On retryable error clear PartUploadURL
if retry { if retry {
@@ -293,30 +264,30 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
upload = nil upload = nil
} }
up.returnUploadURL(upload) up.returnUploadURL(upload)
up.addSha1(chunkNumber, in.HexSum()) up.sha1s[part-1] = in.HexSum()
return retry, err return retry, err
}) })
if err != nil { if err != nil {
fs.Debugf(up.o, "Error sending chunk %d: %v", chunkNumber, err) fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
} else { } else {
fs.Debugf(up.o, "Done sending chunk %d", chunkNumber) fs.Debugf(up.o, "Done sending chunk %d", part)
} }
return size, err return err
} }
// Copy a chunk // Copy a chunk
func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64) error { func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
err := up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize) fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/b2_copy_part", Path: "/b2_copy_part",
} }
offset := int64(part) * up.chunkSize // where we are in the source file offset := (part - 1) * up.chunkSize // where we are in the source file
var request = api.CopyPartRequest{ var request = api.CopyPartRequest{
SourceID: up.src.id, SourceID: up.src.id,
LargeFileID: up.id, LargeFileID: up.id,
PartNumber: int64(part + 1), PartNumber: part,
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1), Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
} }
var response api.UploadPartResponse var response api.UploadPartResponse
@@ -325,7 +296,7 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
if err != nil { if err != nil {
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err) fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
} }
up.addSha1(part, response.SHA1) up.sha1s[part-1] = response.SHA1
return retry, err return retry, err
}) })
if err != nil { if err != nil {
@@ -336,8 +307,8 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
return err return err
} }
// Close closes off the large upload // finish closes off the large upload
func (up *largeUpload) Close(ctx context.Context) error { func (up *largeUpload) finish(ctx context.Context) error {
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts) fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
@@ -355,12 +326,11 @@ func (up *largeUpload) Close(ctx context.Context) error {
if err != nil { if err != nil {
return err return err
} }
up.info = &response return up.o.decodeMetaDataFileInfo(&response)
return nil
} }
// Abort aborts the large upload // cancel aborts the large upload
func (up *largeUpload) Abort(ctx context.Context) error { func (up *largeUpload) cancel(ctx context.Context) error {
fs.Debugf(up.o, "Cancelling large file %s", up.what) fs.Debugf(up.o, "Cancelling large file %s", up.what)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
@@ -385,105 +355,157 @@ func (up *largeUpload) Abort(ctx context.Context) error {
// reaches EOF. // reaches EOF.
// //
// Note that initialUploadBlock must be returned to f.putBuf() // Note that initialUploadBlock must be returned to f.putBuf()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW) (err error) { func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })() defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id) fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
var ( var (
g, gCtx = errgroup.WithContext(ctx) g, gCtx = errgroup.WithContext(ctx)
hasMoreParts = true hasMoreParts = true
) )
up.size = initialUploadBlock.Size() up.size = int64(len(initialUploadBlock))
up.parts = 0 g.Go(func() error {
for part := 0; hasMoreParts; part++ { for part := int64(1); hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency. // Get a block of memory from the pool and token which limits concurrency.
var rw *pool.RW var buf []byte
if part == 0 { if part == 1 {
rw = initialUploadBlock buf = initialUploadBlock
} else { } else {
rw = up.f.getRW(false) buf = up.f.getBuf(false)
}
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putRW(rw)
break
}
// Read the chunk
var n int64
if part == 0 {
n = rw.Size()
} else {
n, err = io.CopyN(rw, up.in, up.chunkSize)
if err == io.EOF {
if n == 0 {
fs.Debugf(up.o, "Not sending empty chunk after EOF - ending.")
up.f.putRW(rw)
break
} else {
fs.Debugf(up.o, "Read less than a full chunk %d, making this the last one.", n)
}
hasMoreParts = false
} else if err != nil {
// other kinds of errors indicate failure
up.f.putRW(rw)
return err
} }
}
// Keep stats up to date // Fail fast, in case an errgroup managed function returns an error
up.parts += 1 // gCtx is cancelled. There is no point in uploading all the other parts.
up.size += n if gCtx.Err() != nil {
if part > maxParts { up.f.putBuf(buf, false)
up.f.putRW(rw) return nil
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts) }
}
part := part // for the closure // Read the chunk
g.Go(func() (err error) { var n int
defer up.f.putRW(rw) if part == 1 {
_, err = up.WriteChunk(gCtx, part, rw) n = len(buf)
return err } else {
}) n, err = io.ReadFull(up.in, buf)
} if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putBuf(buf, false)
return nil
} else if err != nil {
// other kinds of errors indicate failure
up.f.putBuf(buf, false)
return err
}
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
up.f.putBuf(buf, false)
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, false)
return up.transferChunk(gCtx, part, buf)
})
}
return nil
})
err = g.Wait() err = g.Wait()
if err != nil { if err != nil {
return err return err
} }
return up.Close(ctx) up.sha1s = up.sha1s[:up.parts]
return up.finish(ctx)
} }
// Copy the chunks from the source to the destination // Upload uploads the chunks from the input
func (up *largeUpload) Copy(ctx context.Context) (err error) { func (up *largeUpload) Upload(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })() defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id) fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
var ( var (
g, gCtx = errgroup.WithContext(ctx) g, gCtx = errgroup.WithContext(ctx)
remaining = up.size remaining = up.size
uploadPool *pool.Pool
ci = fs.GetConfig(ctx)
) )
g.SetLimit(up.f.opt.UploadConcurrency) // If using large chunk size then make a temporary pool
for part := 0; part < up.parts; part++ { if up.chunkSize <= int64(up.f.opt.ChunkSize) {
// Fail fast, in case an errgroup managed function returns an error uploadPool = up.f.pool
// gCtx is cancelled. There is no point in copying all the other parts. } else {
if gCtx.Err() != nil { uploadPool = pool.New(
break time.Duration(up.f.opt.MemoryPoolFlushTime),
} int(up.chunkSize),
ci.Transfers,
reqSize := remaining up.f.opt.MemoryPoolUseMmap,
if reqSize >= up.chunkSize { )
reqSize = up.chunkSize defer uploadPool.Flush()
}
part := part // for the closure
g.Go(func() (err error) {
return up.copyChunk(gCtx, part, reqSize)
})
remaining -= reqSize
} }
// Get an upload token and a buffer
getBuf := func() (buf []byte) {
up.f.getBuf(true)
if !up.doCopy {
buf = uploadPool.Get()
}
return buf
}
// Put an upload token and a buffer
putBuf := func(buf []byte) {
if !up.doCopy {
uploadPool.Put(buf)
}
up.f.putBuf(nil, true)
}
g.Go(func() error {
for part := int64(1); part <= up.parts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
buf := getBuf()
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
putBuf(buf)
return nil
}
reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
if !up.doCopy {
// Read the chunk
buf = buf[:reqSize]
_, err = io.ReadFull(up.in, buf)
if err != nil {
putBuf(buf)
return err
}
}
part := part // for the closure
g.Go(func() (err error) {
defer putBuf(buf)
if !up.doCopy {
err = up.transferChunk(gCtx, part, buf)
} else {
err = up.copyChunk(gCtx, part, reqSize)
}
return err
})
remaining -= reqSize
}
return nil
})
err = g.Wait() err = g.Wait()
if err != nil { if err != nil {
return err return err
} }
return up.Close(ctx) return up.finish(ctx)
} }

View File

@@ -52,7 +52,7 @@ func (e *Error) Error() string {
out += ": " + e.Message out += ": " + e.Message
} }
if e.ContextInfo != nil { if e.ContextInfo != nil {
out += fmt.Sprintf(" (%s)", string(e.ContextInfo)) out += fmt.Sprintf(" (%+v)", e.ContextInfo)
} }
return out return out
} }
@@ -63,7 +63,7 @@ var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo // ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by" var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
// Types of things in Item/ItemMini // Types of things in Item
const ( const (
ItemTypeFolder = "folder" ItemTypeFolder = "folder"
ItemTypeFile = "file" ItemTypeFile = "file"
@@ -72,31 +72,20 @@ const (
ItemStatusDeleted = "deleted" ItemStatusDeleted = "deleted"
) )
// ItemMini is a subset of the elements in a full Item returned by some API calls
type ItemMini struct {
Type string `json:"type"`
ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
}
// Item describes a folder or a file as returned by Get Folder Items and others // Item describes a folder or a file as returned by Get Folder Items and others
type Item struct { type Item struct {
Type string `json:"type"` Type string `json:"type"`
ID string `json:"id"` ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"` SequenceID string `json:"sequence_id"`
Etag string `json:"etag"` Etag string `json:"etag"`
SHA1 string `json:"sha1"` SHA1 string `json:"sha1"`
Name string `json:"name"` Name string `json:"name"`
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261 Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
CreatedAt Time `json:"created_at"` CreatedAt Time `json:"created_at"`
ModifiedAt Time `json:"modified_at"` ModifiedAt Time `json:"modified_at"`
ContentCreatedAt Time `json:"content_created_at"` ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"` ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
Parent ItemMini `json:"parent"`
SharedLink struct { SharedLink struct {
URL string `json:"url,omitempty"` URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"` Access string `json:"access,omitempty"`
@@ -167,7 +156,19 @@ type PreUploadCheckResponse struct {
// PreUploadCheckConflict is returned in the ContextInfo error field // PreUploadCheckConflict is returned in the ContextInfo error field
// from PreUploadCheck when the error code is "item_name_in_use" // from PreUploadCheck when the error code is "item_name_in_use"
type PreUploadCheckConflict struct { type PreUploadCheckConflict struct {
Conflicts ItemMini `json:"conflicts"` Conflicts struct {
Type string `json:"type"`
ID string `json:"id"`
FileVersion struct {
Type string `json:"type"`
ID string `json:"id"`
Sha1 string `json:"sha1"`
} `json:"file_version"`
SequenceID string `json:"sequence_id"`
Etag string `json:"etag"`
Sha1 string `json:"sha1"`
Name string `json:"name"`
} `json:"conflicts"`
} }
// UpdateFileModTime is used in Update File Info // UpdateFileModTime is used in Update File Info
@@ -280,30 +281,3 @@ type User struct {
Address string `json:"address"` Address string `json:"address"`
AvatarURL string `json:"avatar_url"` AvatarURL string `json:"avatar_url"`
} }
// FileTreeChangeEventTypes are the events that can require cache invalidation
var FileTreeChangeEventTypes = map[string]struct{}{
"ITEM_COPY": {},
"ITEM_CREATE": {},
"ITEM_MAKE_CURRENT_VERSION": {},
"ITEM_MODIFY": {},
"ITEM_MOVE": {},
"ITEM_RENAME": {},
"ITEM_TRASH": {},
"ITEM_UNDELETE_VIA_TRASH": {},
"ITEM_UPLOAD": {},
}
// Event is an array element in the response returned from /events
type Event struct {
EventType string `json:"event_type"`
EventID string `json:"event_id"`
Source Item `json:"source"`
}
// Events is returned from /events
type Events struct {
ChunkSize int64 `json:"chunk_size"`
Entries []Event `json:"entries"`
NextStreamPosition int64 `json:"next_stream_position"`
}

View File

@@ -77,7 +77,7 @@ var (
) )
type boxCustomClaims struct { type boxCustomClaims struct {
jwt.StandardClaims jwt.RegisteredClaims
BoxSubType string `json:"box_sub_type,omitempty"` BoxSubType string `json:"box_sub_type,omitempty"`
} }
@@ -107,18 +107,16 @@ func init() {
return nil, nil return nil, nil
}, },
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "root_folder_id", Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.", Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "0", Default: "0",
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "box_config_file", Name: "box_config_file",
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp, Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
}, { }, {
Name: "access_token", Name: "access_token",
Help: "Box App Primary Access Token\n\nLeave blank normally.", Help: "Box App Primary Access Token\n\nLeave blank normally.",
Sensitive: true,
}, { }, {
Name: "box_sub_type", Name: "box_sub_type",
Default: "user", Default: "user",
@@ -149,23 +147,6 @@ func init() {
Default: "", Default: "",
Help: "Only show items owned by the login (email address) passed in.", Help: "Only show items owned by the login (email address) passed in.",
Advanced: true, Advanced: true,
}, {
Name: "impersonate",
Default: "",
Help: `Impersonate this user ID when using a service account.
Setting this flag allows rclone, when using a JWT service account, to
act on behalf of another user by setting the as-user header.
The user ID is the Box identifier for a user. User IDs can found for
any user via the GET /users endpoint, which is only available to
admins, or by calling the GET /users/me endpoint with an authenticated
user session.
See: https://developer.box.com/guides/authentication/jwt/as-user/
`,
Advanced: true,
Sensitive: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -225,14 +206,12 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomC
} }
claims = &boxCustomClaims{ claims = &boxCustomClaims{
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely RegisteredClaims: jwt.RegisteredClaims{
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019 ID: val,
StandardClaims: jwt.StandardClaims{
Id: val,
Issuer: boxConfig.BoxAppSettings.ClientID, Issuer: boxConfig.BoxAppSettings.ClientID,
Subject: boxConfig.EnterpriseID, Subject: boxConfig.EnterpriseID,
Audience: tokenURL, Audience: jwt.ClaimStrings{tokenURL},
ExpiresAt: time.Now().Add(time.Second * 45).Unix(), ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Second * 45)),
}, },
BoxSubType: boxSubType, BoxSubType: boxSubType,
} }
@@ -279,29 +258,19 @@ type Options struct {
AccessToken string `config:"access_token"` AccessToken string `config:"access_token"`
ListChunk int `config:"list_chunk"` ListChunk int `config:"list_chunk"`
OwnedBy string `config:"owned_by"` OwnedBy string `config:"owned_by"`
Impersonate string `config:"impersonate"`
}
// ItemMeta defines metadata we cache for each Item ID
type ItemMeta struct {
SequenceID int64 // the most recent event processed for this item
ParentID string // ID of the parent directory of this item
Name string // leaf name of this item
} }
// Fs represents a remote box // Fs represents a remote box
type Fs struct { type Fs struct {
name string // name of this remote name string // name of this remote
root string // the path we are working on root string // the path we are working on
opt Options // parsed options opt Options // parsed options
features *fs.Features // optional features features *fs.Features // optional features
srv *rest.Client // the connection to the server srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry tokenRenewer *oauthutil.Renew // renew the token on expiry
uploadToken *pacer.TokenDispenser // control concurrency uploadToken *pacer.TokenDispenser // control concurrency
itemMetaCacheMu *sync.Mutex // protects itemMetaCache
itemMetaCache map[string]ItemMeta // map of Item ID to selected metadata
} }
// Object describes a box object // Object describes a box object
@@ -380,7 +349,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
// readMetaDataForPath reads the metadata from the path // readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) { func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
// defer log.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err) // defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false) leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
if err != nil { if err != nil {
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
@@ -389,30 +358,20 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
return nil, err return nil, err
} }
// Use preupload to find the ID found, err := f.listAll(ctx, directoryID, false, true, true, func(item *api.Item) bool {
itemMini, err := f.preUploadCheck(ctx, leaf, directoryID, -1) if strings.EqualFold(item.Name, leaf) {
if err != nil { info = item
return nil, err return true
} }
if itemMini == nil { return false
return nil, fs.ErrorObjectNotFound
}
// Now we have the ID we can look up the object proper
opts := rest.Opts{
Method: "GET",
Path: "/files/" + itemMini.ID,
Parameters: fieldsValue(),
}
var item api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &item)
return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &item, nil if !found {
return nil, fs.ErrorObjectNotFound
}
return info, nil
} }
// errorHandler parses a non 2xx error response into an error // errorHandler parses a non 2xx error response into an error
@@ -459,14 +418,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL), srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers), uploadToken: pacer.NewTokenDispenser(ci.Transfers),
itemMetaCacheMu: new(sync.Mutex),
itemMetaCache: make(map[string]ItemMeta),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,
@@ -479,11 +436,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken) f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
} }
// If using impersonate set an as-user header
if f.opt.Impersonate != "" {
f.srv.SetHeader("as-user", f.opt.Impersonate)
}
jsonFile, ok := m.Get("box_config_file") jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type") boxSubType, boxSubTypeOk := m.Get("box_sub_type")
@@ -726,17 +678,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
entries = append(entries, o) entries = append(entries, o)
} }
// Cache some metadata for this Item to help us process events later
// on. In particular, the box event API does not provide the old path
// of the Item when it is renamed/deleted/moved/etc.
f.itemMetaCacheMu.Lock()
cachedItemMeta, found := f.itemMetaCache[info.ID]
if !found || cachedItemMeta.SequenceID < info.SequenceID {
f.itemMetaCache[info.ID] = ItemMeta{SequenceID: info.SequenceID, ParentID: directoryID, Name: info.Name}
}
f.itemMetaCacheMu.Unlock()
return false return false
}) })
if err != nil { if err != nil {
@@ -772,7 +713,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
// //
// It returns "", nil if the file is good to go // It returns "", nil if the file is good to go
// It returns "ID", nil if the file must be updated // It returns "ID", nil if the file must be updated
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (item *api.ItemMini, err error) { func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (ID string, err error) {
check := api.PreUploadCheck{ check := api.PreUploadCheck{
Name: f.opt.Enc.FromStandardName(leaf), Name: f.opt.Enc.FromStandardName(leaf),
Parent: api.Parent{ Parent: api.Parent{
@@ -797,16 +738,16 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
var conflict api.PreUploadCheckConflict var conflict api.PreUploadCheckConflict
err = json.Unmarshal(apiErr.ContextInfo, &conflict) err = json.Unmarshal(apiErr.ContextInfo, &conflict)
if err != nil { if err != nil {
return nil, fmt.Errorf("pre-upload check: JSON decode failed: %w", err) return "", fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
} }
if conflict.Conflicts.Type != api.ItemTypeFile { if conflict.Conflicts.Type != api.ItemTypeFile {
return nil, fs.ErrorIsDir return "", fmt.Errorf("pre-upload check: can't overwrite non file with file: %w", err)
} }
return &conflict.Conflicts, nil return conflict.Conflicts.ID, nil
} }
return nil, fmt.Errorf("pre-upload check: %w", err) return "", fmt.Errorf("pre-upload check: %w", err)
} }
return nil, nil return "", nil
} }
// Put the object // Put the object
@@ -827,11 +768,11 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// Preflight check the upload, which returns the ID if the // Preflight check the upload, which returns the ID if the
// object already exists // object already exists
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size()) ID, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
if err != nil { if err != nil {
return nil, err return nil, err
} }
if item == nil { if ID == "" {
return f.PutUnchecked(ctx, in, src, options...) return f.PutUnchecked(ctx, in, src, options...)
} }
@@ -839,7 +780,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
o := &Object{ o := &Object{
fs: f, fs: f,
remote: remote, remote: remote,
id: item.ID, id: ID,
} }
return o, o.Update(ctx, in, src, options...) return o, o.Update(ctx, in, src, options...)
} }
@@ -1176,7 +1117,7 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
// CleanUp empties the trash // CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) (err error) { func (f *Fs) CleanUp(ctx context.Context) (err error) {
var ( var (
deleteErrors atomic.Uint64 deleteErrors = int64(0)
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers) concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
wg sync.WaitGroup wg sync.WaitGroup
) )
@@ -1192,7 +1133,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
err := f.deletePermanently(ctx, item.Type, item.ID) err := f.deletePermanently(ctx, item.Type, item.ID)
if err != nil { if err != nil {
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err) fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
deleteErrors.Add(1) atomic.AddInt64(&deleteErrors, 1)
} }
}() }()
} else { } else {
@@ -1201,277 +1142,12 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
return false return false
}) })
wg.Wait() wg.Wait()
if deleteErrors.Load() != 0 { if deleteErrors != 0 {
return fmt.Errorf("failed to delete %d trash items", deleteErrors.Load()) return fmt.Errorf("failed to delete %d trash items", deleteErrors)
} }
return err return err
} }
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
//
// Automatically restarts itself in case of unexpected behavior of the remote.
//
// Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
go func() {
// get the `stream_position` early so all changes from now on get processed
streamPosition, err := f.changeNotifyStreamPosition(ctx)
if err != nil {
fs.Infof(f, "Failed to get StreamPosition: %s", err)
}
// box can send duplicate Event IDs. Use this map to track and filter
// the ones we've already processed.
processedEventIDs := make(map[string]time.Time)
var ticker *time.Ticker
var tickerC <-chan time.Time
for {
select {
case pollInterval, ok := <-pollIntervalChan:
if !ok {
if ticker != nil {
ticker.Stop()
}
return
}
if ticker != nil {
ticker.Stop()
ticker, tickerC = nil, nil
}
if pollInterval != 0 {
ticker = time.NewTicker(pollInterval)
tickerC = ticker.C
}
case <-tickerC:
if streamPosition == "" {
streamPosition, err = f.changeNotifyStreamPosition(ctx)
if err != nil {
fs.Infof(f, "Failed to get StreamPosition: %s", err)
continue
}
}
// Garbage collect EventIDs older than 1 minute
for eventID, timestamp := range processedEventIDs {
if time.Since(timestamp) > time.Minute {
delete(processedEventIDs, eventID)
}
}
streamPosition, err = f.changeNotifyRunner(ctx, notifyFunc, streamPosition, processedEventIDs)
if err != nil {
fs.Infof(f, "Change notify listener failure: %s", err)
}
}
}
}()
}
func (f *Fs) changeNotifyStreamPosition(ctx context.Context) (streamPosition string, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/events",
Parameters: fieldsValue(),
}
opts.Parameters.Set("stream_position", "now")
opts.Parameters.Set("stream_type", "changes")
var result api.Events
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", err
}
return strconv.FormatInt(result.NextStreamPosition, 10), nil
}
// Attempts to construct the full path for an object, given the ID of its
// parent directory and the name of the object.
//
// Can return "" if the parentID is not currently in the directory cache.
func (f *Fs) getFullPath(parentID string, childName string) (fullPath string) {
fullPath = ""
name := f.opt.Enc.ToStandardName(childName)
if parentID != "" {
if parentDir, ok := f.dirCache.GetInv(parentID); ok {
if len(parentDir) > 0 {
fullPath = parentDir + "/" + name
} else {
fullPath = name
}
}
} else {
// No parent, this object is at the root
fullPath = name
}
return fullPath
}
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), streamPosition string, processedEventIDs map[string]time.Time) (nextStreamPosition string, err error) {
nextStreamPosition = streamPosition
for {
limit := f.opt.ListChunk
// box only allows a max of 500 events
if limit > 500 {
limit = 500
}
opts := rest.Opts{
Method: "GET",
Path: "/events",
Parameters: fieldsValue(),
}
opts.Parameters.Set("stream_position", nextStreamPosition)
opts.Parameters.Set("stream_type", "changes")
opts.Parameters.Set("limit", strconv.Itoa(limit))
var result api.Events
var resp *http.Response
fs.Debugf(f, "Checking for changes on remote (next_stream_position: %q)", nextStreamPosition)
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", err
}
if result.ChunkSize != int64(len(result.Entries)) {
return "", fmt.Errorf("invalid response to event request, chunk_size (%v) not equal to number of entries (%v)", result.ChunkSize, len(result.Entries))
}
nextStreamPosition = strconv.FormatInt(result.NextStreamPosition, 10)
if result.ChunkSize == 0 {
return nextStreamPosition, nil
}
type pathToClear struct {
path string
entryType fs.EntryType
}
var pathsToClear []pathToClear
newEventIDs := 0
for _, entry := range result.Entries {
eventDetails := fmt.Sprintf("[%q(%d)|%s|%s|%s|%s]", entry.Source.Name, entry.Source.SequenceID,
entry.Source.Type, entry.EventType, entry.Source.ID, entry.EventID)
if entry.EventID == "" {
fs.Debugf(f, "%s ignored due to missing EventID", eventDetails)
continue
}
if _, ok := processedEventIDs[entry.EventID]; ok {
fs.Debugf(f, "%s ignored due to duplicate EventID", eventDetails)
continue
}
processedEventIDs[entry.EventID] = time.Now()
newEventIDs++
if entry.Source.ID == "" { // missing File or Folder ID
fs.Debugf(f, "%s ignored due to missing SourceID", eventDetails)
continue
}
if entry.Source.Type != api.ItemTypeFile && entry.Source.Type != api.ItemTypeFolder { // event is not for a file or folder
fs.Debugf(f, "%s ignored due to unsupported SourceType", eventDetails)
continue
}
// Only interested in event types that result in a file tree change
if _, found := api.FileTreeChangeEventTypes[entry.EventType]; !found {
fs.Debugf(f, "%s ignored due to unsupported EventType", eventDetails)
continue
}
f.itemMetaCacheMu.Lock()
itemMeta, cachedItemMetaFound := f.itemMetaCache[entry.Source.ID]
if cachedItemMetaFound {
if itemMeta.SequenceID >= entry.Source.SequenceID {
// Item in the cache has the same or newer SequenceID than
// this event. Ignore this event, it must be old.
f.itemMetaCacheMu.Unlock()
fs.Debugf(f, "%s ignored due to old SequenceID (%q)", eventDetails, itemMeta.SequenceID)
continue
}
// This event is newer. Delete its entry from the cache,
// we'll notify about its change below, then it's up to a
// future list operation to repopulate the cache.
delete(f.itemMetaCache, entry.Source.ID)
}
f.itemMetaCacheMu.Unlock()
entryType := fs.EntryDirectory
if entry.Source.Type == api.ItemTypeFile {
entryType = fs.EntryObject
}
// The box event only includes the new path for the object (e.g.
// the path after the object was moved). If there was an old path
// saved in our cache, it must be cleared.
if cachedItemMetaFound {
path := f.getFullPath(itemMeta.ParentID, itemMeta.Name)
if path != "" {
fs.Debugf(f, "%s added old path (%q) for notify", eventDetails, path)
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
} else {
fs.Debugf(f, "%s old parent not cached", eventDetails)
}
// If this is a directory, also delete it from the dir cache.
// This will effectively invalidate the item metadata cache
// entries for all descendents of this directory, since we
// will no longer be able to construct a full path for them.
// This is exactly what we want, since we don't want to notify
// on the paths of these descendents if one of their ancestors
// has been renamed/deleted.
if entry.Source.Type == api.ItemTypeFolder {
f.dirCache.FlushDir(path)
}
}
// If the item is "active", then it is not trashed or deleted, so
// it potentially has a valid parent.
//
// Construct the new path of the object, based on the Parent ID
// and its name. If we get an empty result, it means we don't
// currently know about this object so notification is unnecessary.
if entry.Source.ItemStatus == api.ItemStatusActive {
path := f.getFullPath(entry.Source.Parent.ID, entry.Source.Name)
if path != "" {
fs.Debugf(f, "%s added new path (%q) for notify", eventDetails, path)
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
} else {
fs.Debugf(f, "%s new parent not found", eventDetails)
}
}
}
// box can sometimes repeatedly return the same Event IDs within a
// short period of time. If it stops giving us new ones, treat it
// the same as if it returned us none at all.
if newEventIDs == 0 {
return nextStreamPosition, nil
}
notifiedPaths := make(map[string]bool)
for _, p := range pathsToClear {
if _, ok := notifiedPaths[p.path]; ok {
continue
}
notifiedPaths[p.path] = true
notifyFunc(p.path, p.entryType)
}
fs.Debugf(f, "Received %v events, resulting in %v paths and %v notifications", len(result.Entries), len(pathsToClear), len(notifiedPaths))
}
}
// DirCacheFlush resets the directory cache - used in testing as an // DirCacheFlush resets the directory cache - used in testing as an
// optional interface // optional interface
func (f *Fs) DirCacheFlush() { func (f *Fs) DirCacheFlush() {

View File

@@ -76,19 +76,17 @@ func init() {
Name: "plex_url", Name: "plex_url",
Help: "The URL of the Plex server.", Help: "The URL of the Plex server.",
}, { }, {
Name: "plex_username", Name: "plex_username",
Help: "The username of the Plex user.", Help: "The username of the Plex user.",
Sensitive: true,
}, { }, {
Name: "plex_password", Name: "plex_password",
Help: "The password of the Plex user.", Help: "The password of the Plex user.",
IsPassword: true, IsPassword: true,
}, { }, {
Name: "plex_token", Name: "plex_token",
Help: "The plex token for authentication - auto set normally.", Help: "The plex token for authentication - auto set normally.",
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "plex_insecure", Name: "plex_insecure",
Help: "Skip all certificate verification when connecting to the Plex server.", Help: "Skip all certificate verification when connecting to the Plex server.",

View File

@@ -18,7 +18,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:", RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil), NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"}, UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
}) })

View File

@@ -160,11 +160,11 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
minSize := 5242880 minSize := 5242880
maxSize := 10485760 maxSize := 10485760
totalFiles := 10 totalFiles := 10
randInstance := rand.New(rand.NewSource(time.Now().Unix())) rand.Seed(time.Now().Unix())
lastFile := "" lastFile := ""
for i := 0; i < totalFiles; i++ { for i := 0; i < totalFiles; i++ {
size := int64(randInstance.Intn(maxSize-minSize) + minSize) size := int64(rand.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size) testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin" remote := "test/" + strconv.Itoa(i) + ".bin"
runInstance.writeRemoteReader(t, rootFs, remote, testReader) runInstance.writeRemoteReader(t, rootFs, remote, testReader)

View File

@@ -40,7 +40,6 @@ func TestIntegration(t *testing.T) {
UnimplementableFsMethods: []string{ UnimplementableFsMethods: []string{
"PublicLink", "PublicLink",
"OpenWriterAt", "OpenWriterAt",
"OpenChunkWriter",
"MergeDirs", "MergeDirs",
"DirCacheFlush", "DirCacheFlush",
"UserInfo", "UserInfo",

View File

@@ -914,7 +914,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return do(ctx, uRemote, expire, unlink) return do(ctx, uRemote, expire, unlink)
} }
// PutUnchecked in to the remote path with the modTime given of the given size // Put in to the remote path with the modTime given of the given size
// //
// May create the object even if it returns an error - if so // May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return // will return the object and the error, otherwise will return

View File

@@ -11,7 +11,7 @@ import (
) )
var ( var (
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "OpenChunkWriter"} unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect"}
unimplementableObjectMethods = []string{} unimplementableObjectMethods = []string{}
) )

View File

@@ -257,16 +257,6 @@ func isMetadataFile(filename string) bool {
return strings.HasSuffix(filename, metaFileExt) return strings.HasSuffix(filename, metaFileExt)
} }
// Checks whether a file is a metadata file and returns the original
// file name and a flag indicating whether it was a metadata file or
// not.
func unwrapMetadataFile(filename string) (string, bool) {
if !isMetadataFile(filename) {
return "", false
}
return filename[:len(filename)-len(metaFileExt)], true
}
// makeDataName generates the file name for a data file with specified compression mode // makeDataName generates the file name for a data file with specified compression mode
func makeDataName(remote string, size int64, mode int) (newRemote string) { func makeDataName(remote string, size int64, mode int) (newRemote string) {
if mode != Uncompressed { if mode != Uncompressed {
@@ -989,8 +979,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
wrappedNotifyFunc := func(path string, entryType fs.EntryType) { wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
fs.Logf(f, "path %q entryType %d", path, entryType) fs.Logf(f, "path %q entryType %d", path, entryType)
var ( var (
wrappedPath string wrappedPath string
isMetadataFile bool
) )
switch entryType { switch entryType {
case fs.EntryDirectory: case fs.EntryDirectory:
@@ -998,10 +987,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
case fs.EntryObject: case fs.EntryObject:
// Note: All we really need to do to monitor the object is to check whether the metadata changed, // Note: All we really need to do to monitor the object is to check whether the metadata changed,
// as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same. // as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same.
wrappedPath, isMetadataFile = unwrapMetadataFile(path) wrappedPath = makeMetadataName(path)
if !isMetadataFile {
return
}
default: default:
fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType) fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType)
return return

View File

@@ -14,26 +14,23 @@ import (
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
var defaultOpt = fstests.Opt{
RemoteName: "TestCompress:",
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"OpenChunkWriter",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{},
}
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &defaultOpt) opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{}}
fstests.Run(t, &opt)
} }
// TestRemoteGzip tests GZIP compression // TestRemoteGzip tests GZIP compression
@@ -43,13 +40,27 @@ func TestRemoteGzip(t *testing.T) {
} }
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip") tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
name := "TestCompressGzip" name := "TestCompressGzip"
opt := defaultOpt fstests.Run(t, &fstests.Opt{
opt.RemoteName = name + ":" RemoteName: name + ":",
opt.ExtraConfig = []fstests.ExtraConfigItem{ NilObject: (*Object)(nil),
{Name: name, Key: "type", Value: "compress"}, UnimplementableFsMethods: []string{
{Name: name, Key: "remote", Value: tempdir}, "OpenWriterAt",
{Name: name, Key: "compression_mode", Value: "gzip"}, "MergeDirs",
} "DirCacheFlush",
opt.QuickTestOK = true "PutUnchecked",
fstests.Run(t, &opt) "PutStream",
"UserInfo",
"Disconnect",
},
UnimplementableObjectMethods: []string{
"GetTier",
"SetTier",
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
},
QuickTestOK: true,
})
} }

View File

@@ -24,7 +24,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName, RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil), NilObject: (*crypt.Object)(nil),
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }
@@ -45,7 +45,7 @@ func TestStandardBase32(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato")}, {Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"}, {Name: name, Key: "filename_encryption", Value: "standard"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -67,7 +67,7 @@ func TestStandardBase64(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "standard"}, {Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base64"}, {Name: name, Key: "filename_encoding", Value: "base64"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -89,7 +89,7 @@ func TestStandardBase32768(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "standard"}, {Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base32768"}, {Name: name, Key: "filename_encoding", Value: "base32768"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -111,7 +111,7 @@ func TestOff(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")}, {Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "off"}, {Name: name, Key: "filename_encryption", Value: "off"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -137,7 +137,7 @@ func TestObfuscate(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "obfuscate"}, {Name: name, Key: "filename_encryption", Value: "obfuscate"},
}, },
SkipBadWindowsCharacters: true, SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -164,7 +164,7 @@ func TestNoDataObfuscate(t *testing.T) {
{Name: name, Key: "no_data_encryption", Value: "true"}, {Name: name, Key: "no_data_encryption", Value: "true"},
}, },
SkipBadWindowsCharacters: true, SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })

View File

@@ -71,7 +71,7 @@ const (
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum. // 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
minChunkSize = fs.SizeSuffix(googleapi.MinUploadChunkSize) minChunkSize = fs.SizeSuffix(googleapi.MinUploadChunkSize)
defaultChunkSize = 8 * fs.Mebi defaultChunkSize = 8 * fs.Mebi
partialFields = "id,name,size,md5Checksum,sha1Checksum,sha256Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks,resourceKey" partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks,resourceKey"
listRGrouping = 50 // number of IDs to search at once when using ListR listRGrouping = 50 // number of IDs to search at once when using ListR
listRInputBuffer = 1000 // size of input buffer when using ListR listRInputBuffer = 1000 // size of input buffer when using ListR
defaultXDGIcon = "text-html" defaultXDGIcon = "text-html"
@@ -143,41 +143,6 @@ var (
_linkTemplates map[string]*template.Template // available link types _linkTemplates map[string]*template.Template // available link types
) )
// rwChoices type for fs.Bits
type rwChoices struct{}
func (rwChoices) Choices() []fs.BitsChoicesInfo {
return []fs.BitsChoicesInfo{
{Bit: uint64(rwOff), Name: "off"},
{Bit: uint64(rwRead), Name: "read"},
{Bit: uint64(rwWrite), Name: "write"},
}
}
// rwChoice type alias
type rwChoice = fs.Bits[rwChoices]
const (
rwRead rwChoice = 1 << iota
rwWrite
rwOff rwChoice = 0
)
// Examples for the options
var rwExamples = fs.OptionExamples{{
Value: rwOff.String(),
Help: "Do not read or write the value",
}, {
Value: rwRead.String(),
Help: "Read the value only",
}, {
Value: rwWrite.String(),
Help: "Write the value only",
}, {
Value: (rwRead | rwWrite).String(),
Help: "Read and Write the value.",
}}
// Parse the scopes option returning a slice of scopes // Parse the scopes option returning a slice of scopes
func driveScopes(scopesString string) (scopes []string) { func driveScopes(scopesString string) (scopes []string) {
if scopesString == "" { if scopesString == "" {
@@ -285,13 +250,9 @@ func init() {
} }
return nil, fmt.Errorf("unknown state %q", config.State) return nil, fmt.Errorf("unknown state %q", config.State)
}, },
MetadataInfo: &fs.MetadataInfo{
System: systemMetadataInfo,
Help: `User metadata is stored in the properties field of the drive object.`,
},
Options: append(driveOAuthOptions(), []fs.Option{{ Options: append(driveOAuthOptions(), []fs.Option{{
Name: "scope", Name: "scope",
Help: "Comma separated list of scopes that rclone should use when requesting access from drive.", Help: "Scope that rclone should use when requesting access from drive.",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "drive", Value: "drive",
Help: "Full access all files, excluding Application Data Folder.", Help: "Full access all files, excluding Application Data Folder.",
@@ -316,23 +277,20 @@ Leave blank normally.
Fill in to access "Computers" folders (see docs), or for rclone to use Fill in to access "Computers" folders (see docs), or for rclone to use
a non root folder as its starting point. a non root folder as its starting point.
`, `,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "service_account_file", Name: "service_account_file",
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp, Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, { }, {
Name: "service_account_credentials", Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideConfigurator, Hide: fs.OptionHideConfigurator,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "team_drive", Name: "team_drive",
Help: "ID of the Shared Drive (Team Drive).", Help: "ID of the Shared Drive (Team Drive).",
Hide: fs.OptionHideConfigurator, Hide: fs.OptionHideConfigurator,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "auth_owner_only", Name: "auth_owner_only",
Default: false, Default: false,
@@ -359,35 +317,16 @@ rather than shortcuts themselves when doing server side copies.`,
Default: false, Default: false,
Help: "Skip google documents in all listings.\n\nIf given, gdocs practically become invisible to rclone.", Help: "Skip google documents in all listings.\n\nIf given, gdocs practically become invisible to rclone.",
Advanced: true, Advanced: true,
}, {
Name: "show_all_gdocs",
Default: false,
Help: `Show all Google Docs including non-exportable ones in listings.
If you try a server side copy on a Google Form without this flag, you
will get this error:
No export formats found for "application/vnd.google-apps.form"
However adding this flag will allow the form to be server side copied.
Note that rclone doesn't add extensions to the Google Docs file names
in this mode.
Do **not** use this flag when trying to download Google Docs - rclone
will fail to download them.
`,
Advanced: true,
}, { }, {
Name: "skip_checksum_gphotos", Name: "skip_checksum_gphotos",
Default: false, Default: false,
Help: `Skip checksums on Google photos and videos only. Help: `Skip MD5 checksum on Google photos and videos only.
Use this if you get checksum errors when transferring Google photos or Use this if you get checksum errors when transferring Google photos or
videos. videos.
Setting this flag will cause Google photos and videos to return a Setting this flag will cause Google photos and videos to return a
blank checksums. blank MD5 checksum.
Google photos are identified by being in the "photos" space. Google photos are identified by being in the "photos" space.
@@ -477,11 +416,10 @@ date is used.`,
Help: "Size of listing chunk 100-1000, 0 to disable.", Help: "Size of listing chunk 100-1000, 0 to disable.",
Advanced: true, Advanced: true,
}, { }, {
Name: "impersonate", Name: "impersonate",
Default: "", Default: "",
Help: `Impersonate this user when using a service account.`, Help: `Impersonate this user when using a service account.`,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "alternate_export", Name: "alternate_export",
Default: false, Default: false,
@@ -652,82 +590,9 @@ This resource key requirement only applies to a subset of old files.
Note also that opening the folder once in the web interface (with the Note also that opening the folder once in the web interface (with the
user you've authenticated rclone with) seems to be enough so that the user you've authenticated rclone with) seems to be enough so that the
resource key is not needed. resource key is no needed.
`,
Advanced: true,
Sensitive: true,
}, {
Name: "fast_list_bug_fix",
Help: `Work around a bug in Google Drive listing.
Normally rclone will work around a bug in Google Drive when using
--fast-list (ListR) where the search "(A in parents) or (B in
parents)" returns nothing sometimes. See #3114, #4289 and
https://issuetracker.google.com/issues/149522397
Rclone detects this by finding no items in more than one directory
when listing and retries them as lists of individual directories.
This means that if you have a lot of empty directories rclone will end
up listing them all individually and this can take many more API
calls.
This flag allows the work-around to be disabled. This is **not**
recommended in normal use - only if you have a particular case you are
having trouble with like many empty directories.
`, `,
Advanced: true, Advanced: true,
Default: true,
}, {
Name: "metadata_owner",
Help: `Control whether owner should be read or written in metadata.
Owner is a standard part of the file metadata so is easy to read. But it
isn't always desirable to set the owner from the metadata.
Note that you can't set the owner on Shared Drives, and that setting
ownership will generate an email to the new owner (this can't be
disabled), and you can't transfer ownership to someone outside your
organization.
`,
Advanced: true,
Default: rwRead,
Examples: rwExamples,
}, {
Name: "metadata_permissions",
Help: `Control whether permissions should be read or written in metadata.
Reading permissions metadata from files can be done quickly, but it
isn't always desirable to set the permissions from the metadata.
Note that rclone drops any inherited permissions on Shared Drives and
any owner permission on My Drives as these are duplicated in the owner
metadata.
`,
Advanced: true,
Default: rwOff,
Examples: rwExamples,
}, {
Name: "metadata_labels",
Help: `Control whether labels should be read or written in metadata.
Reading labels metadata from files takes an extra API transaction and
will slow down listings. It isn't always desirable to set the labels
from the metadata.
The format of labels is documented in the drive API documentation at
https://developers.google.com/drive/api/reference/rest/v3/Label -
rclone just provides a JSON dump of this format.
When setting labels, the label and fields must already exist - rclone
will not create them. This means that if you are transferring labels
from two different accounts you will have to create the labels in
advance and use the metadata mapper to translate the IDs between the
two accounts.
`,
Advanced: true,
Default: rwOff,
Examples: rwExamples,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -775,7 +640,6 @@ type Options struct {
UseTrash bool `config:"use_trash"` UseTrash bool `config:"use_trash"`
CopyShortcutContent bool `config:"copy_shortcut_content"` CopyShortcutContent bool `config:"copy_shortcut_content"`
SkipGdocs bool `config:"skip_gdocs"` SkipGdocs bool `config:"skip_gdocs"`
ShowAllGdocs bool `config:"show_all_gdocs"`
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"` SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
SharedWithMe bool `config:"shared_with_me"` SharedWithMe bool `config:"shared_with_me"`
TrashedOnly bool `config:"trashed_only"` TrashedOnly bool `config:"trashed_only"`
@@ -803,10 +667,6 @@ type Options struct {
SkipShortcuts bool `config:"skip_shortcuts"` SkipShortcuts bool `config:"skip_shortcuts"`
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"` SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
ResourceKey string `config:"resource_key"` ResourceKey string `config:"resource_key"`
FastListBugFix bool `config:"fast_list_bug_fix"`
MetadataOwner rwChoice `config:"metadata_owner"`
MetadataPermissions rwChoice `config:"metadata_permissions"`
MetadataLabels rwChoice `config:"metadata_labels"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"` EnvAuth bool `config:"env_auth"`
} }
@@ -828,25 +688,23 @@ type Fs struct {
exportExtensions []string // preferred extensions to download docs exportExtensions []string // preferred extensions to download docs
importMimeTypes []string // MIME types to convert to docs importMimeTypes []string // MIME types to convert to docs
isTeamDrive bool // true if this is a team drive isTeamDrive bool // true if this is a team drive
fileFields googleapi.Field // fields to fetch file info with
m configmap.Mapper m configmap.Mapper
grouping int32 // number of IDs to search at once in ListR - read with atomic grouping int32 // number of IDs to search at once in ListR - read with atomic
listRmu *sync.Mutex // protects listRempties listRmu *sync.Mutex // protects listRempties
listRempties map[string]struct{} // IDs of supposedly empty directories which triggered grouping disable listRempties map[string]struct{} // IDs of supposedly empty directories which triggered grouping disable
dirResourceKeys *sync.Map // map directory ID to resource key dirResourceKeys *sync.Map // map directory ID to resource key
permissionsMu *sync.Mutex // protect the below
permissions map[string]*drive.Permission // map permission IDs to Permissions
} }
type baseObject struct { type baseObject struct {
fs *Fs // what this object is part of fs *Fs // what this object is part of
remote string // The remote path remote string // The remote path
id string // Drive Id of this object id string // Drive Id of this object
modifiedDate string // RFC3339 time it was last modified modifiedDate string // RFC3339 time it was last modified
mimeType string // The object MIME type mimeType string // The object MIME type
bytes int64 // size of the object bytes int64 // size of the object
parents []string // IDs of the parent directories parents []string // IDs of the parent directories
resourceKey *string // resourceKey is needed for link shared objects resourceKey *string // resourceKey is needed for link shared objects
metadata *fs.Metadata // metadata if known
} }
type documentObject struct { type documentObject struct {
baseObject baseObject
@@ -865,8 +723,6 @@ type Object struct {
baseObject baseObject
url string // Download URL of this object url string // Download URL of this object
md5sum string // md5sum of the object md5sum string // md5sum of the object
sha1sum string // sha1sum of the object
sha256sum string // sha256sum of the object
v2Download bool // generate v2 download link ondemand v2Download bool // generate v2 download link ondemand
} }
@@ -1095,7 +951,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
list.Header().Add("X-Goog-Drive-Resource-Keys", resourceKeysHeader) list.Header().Add("X-Goog-Drive-Resource-Keys", resourceKeysHeader)
} }
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.getFileFields(ctx)) fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.fileFields)
OUTER: OUTER:
for { for {
@@ -1369,10 +1225,9 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
listRmu: new(sync.Mutex), listRmu: new(sync.Mutex),
listRempties: make(map[string]struct{}), listRempties: make(map[string]struct{}),
dirResourceKeys: new(sync.Map), dirResourceKeys: new(sync.Map),
permissionsMu: new(sync.Mutex),
permissions: make(map[string]*drive.Permission),
} }
f.isTeamDrive = opt.TeamDriveID != "" f.isTeamDrive = opt.TeamDriveID != ""
f.fileFields = f.getFileFields()
f.features = (&fs.Features{ f.features = (&fs.Features{
DuplicateFiles: true, DuplicateFiles: true,
ReadMimeType: true, ReadMimeType: true,
@@ -1380,9 +1235,6 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs, ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
FilterAware: true, FilterAware: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
}).Fill(ctx, f) }).Fill(ctx, f)
// Create a new authorized Drive client. // Create a new authorized Drive client.
@@ -1487,7 +1339,7 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
return f, nil return f, nil
} }
func (f *Fs) newBaseObject(ctx context.Context, remote string, info *drive.File) (o baseObject, err error) { func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
modifiedDate := info.ModifiedTime modifiedDate := info.ModifiedTime
if f.opt.UseCreatedDate { if f.opt.UseCreatedDate {
modifiedDate = info.CreatedTime modifiedDate = info.CreatedTime
@@ -1498,7 +1350,7 @@ func (f *Fs) newBaseObject(ctx context.Context, remote string, info *drive.File)
if f.opt.SizeAsQuota { if f.opt.SizeAsQuota {
size = info.QuotaBytesUsed size = info.QuotaBytesUsed
} }
o = baseObject{ return baseObject{
fs: f, fs: f,
remote: remote, remote: remote,
id: info.Id, id: info.Id,
@@ -1507,15 +1359,10 @@ func (f *Fs) newBaseObject(ctx context.Context, remote string, info *drive.File)
bytes: size, bytes: size,
parents: info.Parents, parents: info.Parents,
} }
err = nil
if fs.GetConfig(ctx).Metadata {
err = o.parseMetadata(ctx, info)
}
return o, err
} }
// getFileFields gets the fields for a normal file Get or List // getFileFields gets the fields for a normal file Get or List
func (f *Fs) getFileFields(ctx context.Context) (fields googleapi.Field) { func (f *Fs) getFileFields() (fields googleapi.Field) {
fields = partialFields fields = partialFields
if f.opt.AuthOwnerOnly { if f.opt.AuthOwnerOnly {
fields += ",owners" fields += ",owners"
@@ -1529,53 +1376,40 @@ func (f *Fs) getFileFields(ctx context.Context) (fields googleapi.Field) {
if f.opt.SizeAsQuota { if f.opt.SizeAsQuota {
fields += ",quotaBytesUsed" fields += ",quotaBytesUsed"
} }
if fs.GetConfig(ctx).Metadata {
fields += "," + metadataFields
}
return fields return fields
} }
// newRegularObject creates an fs.Object for a normal drive.File // newRegularObject creates an fs.Object for a normal drive.File
func (f *Fs) newRegularObject(ctx context.Context, remote string, info *drive.File) (obj fs.Object, err error) { func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video // wipe checksum if SkipChecksumGphotos and file is type Photo or Video
if f.opt.SkipChecksumGphotos { if f.opt.SkipChecksumGphotos {
for _, space := range info.Spaces { for _, space := range info.Spaces {
if space == "photos" { if space == "photos" {
info.Md5Checksum = "" info.Md5Checksum = ""
info.Sha1Checksum = ""
info.Sha256Checksum = ""
break break
} }
} }
} }
o := &Object{ o := &Object{
baseObject: f.newBaseObject(remote, info),
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, actualID(info.Id)), url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, actualID(info.Id)),
md5sum: strings.ToLower(info.Md5Checksum), md5sum: strings.ToLower(info.Md5Checksum),
sha1sum: strings.ToLower(info.Sha1Checksum),
sha256sum: strings.ToLower(info.Sha256Checksum),
v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize), v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
} }
o.baseObject, err = f.newBaseObject(ctx, remote, info)
if err != nil {
return nil, err
}
if info.ResourceKey != "" { if info.ResourceKey != "" {
o.resourceKey = &info.ResourceKey o.resourceKey = &info.ResourceKey
} }
return o, nil return o
} }
// newDocumentObject creates an fs.Object for a google docs drive.File // newDocumentObject creates an fs.Object for a google docs drive.File
func (f *Fs) newDocumentObject(ctx context.Context, remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) { func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
mediaType, _, err := mime.ParseMediaType(exportMimeType) mediaType, _, err := mime.ParseMediaType(exportMimeType)
if err != nil { if err != nil {
return nil, err return nil, err
} }
url := info.ExportLinks[mediaType] url := info.ExportLinks[mediaType]
baseObject, err := f.newBaseObject(ctx, remote+extension, info) baseObject := f.newBaseObject(remote+extension, info)
if err != nil {
return nil, err
}
baseObject.bytes = -1 baseObject.bytes = -1
baseObject.mimeType = exportMimeType baseObject.mimeType = exportMimeType
return &documentObject{ return &documentObject{
@@ -1587,7 +1421,7 @@ func (f *Fs) newDocumentObject(ctx context.Context, remote string, info *drive.F
} }
// newLinkObject creates an fs.Object that represents a link a google docs drive.File // newLinkObject creates an fs.Object that represents a link a google docs drive.File
func (f *Fs) newLinkObject(ctx context.Context, remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) { func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
t := linkTemplate(exportMimeType) t := linkTemplate(exportMimeType)
if t == nil { if t == nil {
return nil, fmt.Errorf("unsupported link type %s", exportMimeType) return nil, fmt.Errorf("unsupported link type %s", exportMimeType)
@@ -1606,10 +1440,7 @@ func (f *Fs) newLinkObject(ctx context.Context, remote string, info *drive.File,
return nil, fmt.Errorf("executing template failed: %w", err) return nil, fmt.Errorf("executing template failed: %w", err)
} }
baseObject, err := f.newBaseObject(ctx, remote+extension, info) baseObject := f.newBaseObject(remote+extension, info)
if err != nil {
return nil, err
}
baseObject.bytes = int64(buf.Len()) baseObject.bytes = int64(buf.Len())
baseObject.mimeType = exportMimeType baseObject.mimeType = exportMimeType
return &linkObject{ return &linkObject{
@@ -1625,7 +1456,7 @@ func (f *Fs) newLinkObject(ctx context.Context, remote string, info *drive.File,
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.File) (fs.Object, error) { func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.File) (fs.Object, error) {
// If item has MD5 sum it is a file stored on drive // If item has MD5 sum it is a file stored on drive
if info.Md5Checksum != "" { if info.Md5Checksum != "" {
return f.newRegularObject(ctx, remote, info) return f.newRegularObject(remote, info), nil
} }
extension, exportName, exportMimeType, isDocument := f.findExportFormat(ctx, info) extension, exportName, exportMimeType, isDocument := f.findExportFormat(ctx, info)
@@ -1656,15 +1487,13 @@ func (f *Fs) newObjectWithExportInfo(
case info.MimeType == shortcutMimeTypeDangling: case info.MimeType == shortcutMimeTypeDangling:
// Pretend a dangling shortcut is a regular object // Pretend a dangling shortcut is a regular object
// It will error if used, but appear in listings so it can be deleted // It will error if used, but appear in listings so it can be deleted
return f.newRegularObject(ctx, remote, info) return f.newRegularObject(remote, info), nil
case info.Md5Checksum != "": case info.Md5Checksum != "":
// If item has MD5 sum it is a file stored on drive // If item has MD5 sum it is a file stored on drive
return f.newRegularObject(ctx, remote, info) return f.newRegularObject(remote, info), nil
case f.opt.SkipGdocs: case f.opt.SkipGdocs:
fs.Debugf(remote, "Skipping google document type %q", info.MimeType) fs.Debugf(remote, "Skipping google document type %q", info.MimeType)
return nil, fs.ErrorObjectNotFound return nil, fs.ErrorObjectNotFound
case f.opt.ShowAllGdocs:
return f.newDocumentObject(ctx, remote, info, "", info.MimeType)
default: default:
// If item MimeType is in the ExportFormats then it is a google doc // If item MimeType is in the ExportFormats then it is a google doc
if !isDocument { if !isDocument {
@@ -1676,9 +1505,9 @@ func (f *Fs) newObjectWithExportInfo(
return nil, fs.ErrorObjectNotFound return nil, fs.ErrorObjectNotFound
} }
if isLinkMimeType(exportMimeType) { if isLinkMimeType(exportMimeType) {
return f.newLinkObject(ctx, remote, info, extension, exportMimeType) return f.newLinkObject(remote, info, extension, exportMimeType)
} }
return f.newDocumentObject(ctx, remote, info, extension, exportMimeType) return f.newDocumentObject(remote, info, extension, exportMimeType)
} }
} }
@@ -2057,7 +1886,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
// drive where (A in parents) or (B in parents) returns nothing // drive where (A in parents) or (B in parents) returns nothing
// sometimes. See #3114, #4289 and // sometimes. See #3114, #4289 and
// https://issuetracker.google.com/issues/149522397 // https://issuetracker.google.com/issues/149522397
if f.opt.FastListBugFix && len(dirs) > 1 && !foundItems { if len(dirs) > 1 && !foundItems {
if atomic.SwapInt32(&f.grouping, 1) != 1 { if atomic.SwapInt32(&f.grouping, 1) != 1 {
fs.Debugf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs)) fs.Debugf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs))
} }
@@ -2306,7 +2135,7 @@ func (f *Fs) resolveShortcut(ctx context.Context, item *drive.File) (newItem *dr
fs.Errorf(nil, "Expecting shortcutDetails in %v", item) fs.Errorf(nil, "Expecting shortcutDetails in %v", item)
return item, nil return item, nil
} }
newItem, err = f.getFile(ctx, item.ShortcutDetails.TargetId, f.getFileFields(ctx)) newItem, err = f.getFile(ctx, item.ShortcutDetails.TargetId, f.fileFields)
if err != nil { if err != nil {
var gerr *googleapi.Error var gerr *googleapi.Error
if errors.As(err, &gerr) && gerr.Code == 404 { if errors.As(err, &gerr) && gerr.Code == 404 {
@@ -2438,10 +2267,6 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
} else { } else {
createInfo.MimeType = fs.MimeTypeFromName(remote) createInfo.MimeType = fs.MimeTypeFromName(remote)
} }
updateMetadata, err := f.fetchAndUpdateMetadata(ctx, src, options, createInfo, false)
if err != nil {
return nil, err
}
var info *drive.File var info *drive.File
if size >= 0 && size < int64(f.opt.UploadCutoff) { if size >= 0 && size < int64(f.opt.UploadCutoff) {
@@ -2466,10 +2291,6 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
return nil, err return nil, err
} }
} }
err = updateMetadata(ctx, info)
if err != nil {
return nil, err
}
return f.newObjectWithInfo(ctx, remote, info) return f.newObjectWithInfo(ctx, remote, info)
} }
@@ -3158,7 +2979,7 @@ func (f *Fs) DirCacheFlush() {
// Hashes returns the supported hash sets. // Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set { func (f *Fs) Hashes() hash.Set {
return hash.NewHashSet(hash.MD5, hash.SHA1, hash.SHA256) return hash.Set(hash.MD5)
} }
func (f *Fs) changeChunkSize(chunkSizeString string) (err error) { func (f *Fs) changeChunkSize(chunkSizeString string) (err error) {
@@ -3387,7 +3208,7 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
// copy file with id to dest // copy file with id to dest
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) { func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
info, err := f.getFile(ctx, id, f.getFileFields(ctx)) info, err := f.getFile(ctx, id, f.fileFields)
if err != nil { if err != nil {
return fmt.Errorf("couldn't find id: %w", err) return fmt.Errorf("couldn't find id: %w", err)
} }
@@ -3719,16 +3540,10 @@ func (o *baseObject) Remote() string {
// Hash returns the Md5sum of an object returning a lowercase hex string // Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t == hash.MD5 { if t != hash.MD5 {
return o.md5sum, nil return "", hash.ErrUnsupported
} }
if t == hash.SHA1 { return o.md5sum, nil
return o.sha1sum, nil
}
if t == hash.SHA256 {
return o.sha256sum, nil
}
return "", hash.ErrUnsupported
} }
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 { if t != hash.MD5 {
@@ -4067,20 +3882,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
MimeType: srcMimeType, MimeType: srcMimeType,
ModifiedTime: src.ModTime(ctx).Format(timeFormatOut), ModifiedTime: src.ModTime(ctx).Format(timeFormatOut),
} }
updateMetadata, err := o.fs.fetchAndUpdateMetadata(ctx, src, options, updateInfo, true)
if err != nil {
return err
}
info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src) info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src)
if err != nil { if err != nil {
return err return err
} }
err = updateMetadata(ctx, info)
if err != nil {
return err
}
newO, err := o.fs.newObjectWithInfo(ctx, o.remote, info) newO, err := o.fs.newObjectWithInfo(ctx, o.remote, info)
if err != nil { if err != nil {
return err return err
@@ -4166,26 +3971,6 @@ func (o *baseObject) ParentID() string {
return "" return ""
} }
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *baseObject) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
if o.metadata != nil {
return *o.metadata, nil
}
fs.Debugf(o, "Fetching metadata")
id := actualID(o.id)
info, err := o.fs.getFile(ctx, id, o.fs.getFileFields(ctx))
if err != nil {
return nil, err
}
err = o.parseMetadata(ctx, info)
if err != nil {
return nil, err
}
return *o.metadata, nil
}
func (o *documentObject) ext() string { func (o *documentObject) ext() string {
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:] return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
} }
@@ -4247,7 +4032,6 @@ var (
_ fs.MimeTyper = (*Object)(nil) _ fs.MimeTyper = (*Object)(nil)
_ fs.IDer = (*Object)(nil) _ fs.IDer = (*Object)(nil)
_ fs.ParentIDer = (*Object)(nil) _ fs.ParentIDer = (*Object)(nil)
_ fs.Metadataer = (*Object)(nil)
_ fs.Object = (*documentObject)(nil) _ fs.Object = (*documentObject)(nil)
_ fs.MimeTyper = (*documentObject)(nil) _ fs.MimeTyper = (*documentObject)(nil)
_ fs.IDer = (*documentObject)(nil) _ fs.IDer = (*documentObject)(nil)

View File

@@ -1,608 +0,0 @@
package drive
import (
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"sync"
"github.com/rclone/rclone/fs"
"golang.org/x/sync/errgroup"
drive "google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
)
// system metadata keys which this backend owns
var systemMetadataInfo = map[string]fs.MetadataHelp{
"content-type": {
Help: "The MIME type of the file.",
Type: "string",
Example: "text/plain",
},
"mtime": {
Help: "Time of last modification with mS accuracy.",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999Z07:00",
},
"btime": {
Help: "Time of file birth (creation) with mS accuracy. Note that this is only writable on fresh uploads - it can't be written for updates.",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999Z07:00",
},
"copy-requires-writer-permission": {
Help: "Whether the options to copy, print, or download this file, should be disabled for readers and commenters.",
Type: "boolean",
Example: "true",
},
"writers-can-share": {
Help: "Whether users with only writer permission can modify the file's permissions. Not populated for items in shared drives.",
Type: "boolean",
Example: "false",
},
"viewed-by-me": {
Help: "Whether the file has been viewed by this user.",
Type: "boolean",
Example: "true",
ReadOnly: true,
},
"owner": {
Help: "The owner of the file. Usually an email address. Enable with --drive-metadata-owner.",
Type: "string",
Example: "user@example.com",
},
"permissions": {
Help: "Permissions in a JSON dump of Google drive format. On shared drives these will only be present if they aren't inherited. Enable with --drive-metadata-permissions.",
Type: "JSON",
Example: "{}",
},
"folder-color-rgb": {
Help: "The color for a folder or a shortcut to a folder as an RGB hex string.",
Type: "string",
Example: "881133",
},
"description": {
Help: "A short description of the file.",
Type: "string",
Example: "Contract for signing",
},
"starred": {
Help: "Whether the user has starred the file.",
Type: "boolean",
Example: "false",
},
"labels": {
Help: "Labels attached to this file in a JSON dump of Googled drive format. Enable with --drive-metadata-labels.",
Type: "JSON",
Example: "[]",
},
}
// Extra fields we need to fetch to implement the system metadata above
var metadataFields = googleapi.Field(strings.Join([]string{
"copyRequiresWriterPermission",
"description",
"folderColorRgb",
"hasAugmentedPermissions",
"owners",
"permissionIds",
"permissions",
"properties",
"starred",
"viewedByMe",
"viewedByMeTime",
"writersCanShare",
}, ","))
// Fields we need to read from permissions
var permissionsFields = googleapi.Field(strings.Join([]string{
"*",
"permissionDetails/*",
}, ","))
// getPermission returns permissions for the fileID and permissionID passed in
func (f *Fs) getPermission(ctx context.Context, fileID, permissionID string, useCache bool) (perm *drive.Permission, inherited bool, err error) {
f.permissionsMu.Lock()
defer f.permissionsMu.Unlock()
if useCache {
perm = f.permissions[permissionID]
if perm != nil {
return perm, false, nil
}
}
fs.Debugf(f, "Fetching permission %q", permissionID)
err = f.pacer.Call(func() (bool, error) {
perm, err = f.svc.Permissions.Get(fileID, permissionID).
Fields(permissionsFields).
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return nil, false, err
}
inherited = len(perm.PermissionDetails) > 0 && perm.PermissionDetails[0].Inherited
cleanPermission(perm)
// cache the permission
f.permissions[permissionID] = perm
return perm, inherited, err
}
// Set the permissions on the info
func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions []*drive.Permission) (err error) {
for _, perm := range permissions {
if perm.Role == "owner" {
// ignore owner permissions - these are set with owner
continue
}
cleanPermissionForWrite(perm)
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Permissions.Create(info.Id, perm).
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("failed to set permission: %w", err)
}
}
return nil
}
// Clean attributes from permissions which we can't write
func cleanPermissionForWrite(perm *drive.Permission) {
perm.Deleted = false
perm.DisplayName = ""
perm.Id = ""
perm.Kind = ""
perm.PermissionDetails = nil
perm.TeamDrivePermissionDetails = nil
}
// Clean and cache the permission if not already cached
func (f *Fs) cleanAndCachePermission(perm *drive.Permission) {
f.permissionsMu.Lock()
defer f.permissionsMu.Unlock()
cleanPermission(perm)
if _, found := f.permissions[perm.Id]; !found {
f.permissions[perm.Id] = perm
}
}
// Clean fields we don't need to keep from the permission
func cleanPermission(perm *drive.Permission) {
// DisplayName: Output only. The "pretty" name of the value of the
// permission. The following is a list of examples for each type of
// permission: * `user` - User's full name, as defined for their Google
// account, such as "Joe Smith." * `group` - Name of the Google Group,
// such as "The Company Administrators." * `domain` - String domain
// name, such as "thecompany.com." * `anyone` - No `displayName` is
// present.
perm.DisplayName = ""
// Kind: Output only. Identifies what kind of resource this is. Value:
// the fixed string "drive#permission".
perm.Kind = ""
// PermissionDetails: Output only. Details of whether the permissions on
// this shared drive item are inherited or directly on this item. This
// is an output-only field which is present only for shared drive items.
perm.PermissionDetails = nil
// PhotoLink: Output only. A link to the user's profile photo, if
// available.
perm.PhotoLink = ""
// TeamDrivePermissionDetails: Output only. Deprecated: Output only. Use
// `permissionDetails` instead.
perm.TeamDrivePermissionDetails = nil
}
// Fields we need to read from labels
var labelsFields = googleapi.Field(strings.Join([]string{
"*",
}, ","))
// getLabels returns labels for the fileID passed in
func (f *Fs) getLabels(ctx context.Context, fileID string) (labels []*drive.Label, err error) {
fs.Debugf(f, "Fetching labels for %q", fileID)
listLabels := f.svc.Files.ListLabels(fileID).
Fields(labelsFields).
Context(ctx)
for {
var info *drive.LabelList
err = f.pacer.Call(func() (bool, error) {
info, err = listLabels.Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
labels = append(labels, info.Labels...)
if info.NextPageToken == "" {
break
}
listLabels.PageToken(info.NextPageToken)
}
for _, label := range labels {
cleanLabel(label)
}
return labels, nil
}
// Set the labels on the info
func (f *Fs) setLabels(ctx context.Context, info *drive.File, labels []*drive.Label) (err error) {
if len(labels) == 0 {
return nil
}
req := drive.ModifyLabelsRequest{}
for _, label := range labels {
req.LabelModifications = append(req.LabelModifications, &drive.LabelModification{
FieldModifications: labelFieldsToFieldModifications(label.Fields),
LabelId: label.Id,
})
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Files.ModifyLabels(info.Id, &req).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("failed to set owner: %w", err)
}
return nil
}
// Convert label fields into something which can set the fields
func labelFieldsToFieldModifications(fields map[string]drive.LabelField) (out []*drive.LabelFieldModification) {
for id, field := range fields {
var emails []string
for _, user := range field.User {
emails = append(emails, user.EmailAddress)
}
out = append(out, &drive.LabelFieldModification{
// FieldId: The ID of the field to be modified.
FieldId: id,
// SetDateValues: Replaces the value of a dateString Field with these
// new values. The string must be in the RFC 3339 full-date format:
// YYYY-MM-DD.
SetDateValues: field.DateString,
// SetIntegerValues: Replaces the value of an `integer` field with these
// new values.
SetIntegerValues: field.Integer,
// SetSelectionValues: Replaces a `selection` field with these new
// values.
SetSelectionValues: field.Selection,
// SetTextValues: Sets the value of a `text` field.
SetTextValues: field.Text,
// SetUserValues: Replaces a `user` field with these new values. The
// values must be valid email addresses.
SetUserValues: emails,
})
}
return out
}
// Clean fields we don't need to keep from the label
func cleanLabel(label *drive.Label) {
// Kind: This is always drive#label
label.Kind = ""
for name, field := range label.Fields {
// Kind: This is always drive#labelField.
field.Kind = ""
// Note the fields are copies so we need to write them
// back to the map
label.Fields[name] = field
}
}
// Parse the metadata from drive item
//
// It should return nil if there is no Metadata
func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err error) {
metadata := make(fs.Metadata, 16)
// Dump user metadata first as it overrides system metadata
for k, v := range info.Properties {
metadata[k] = v
}
// System metadata
metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission)
metadata["writers-can-share"] = fmt.Sprint(info.WritersCanShare)
metadata["viewed-by-me"] = fmt.Sprint(info.ViewedByMe)
metadata["content-type"] = info.MimeType
// Owners: Output only. The owner of this file. Only certain legacy
// files may have more than one owner. This field isn't populated for
// items in shared drives.
if o.fs.opt.MetadataOwner.IsSet(rwRead) && len(info.Owners) > 0 {
user := info.Owners[0]
if len(info.Owners) > 1 {
fs.Logf(o, "Ignoring more than 1 owner")
}
if user != nil {
id := user.EmailAddress
if id == "" {
id = user.DisplayName
}
metadata["owner"] = id
}
}
if o.fs.opt.MetadataPermissions.IsSet(rwRead) {
// We only write permissions out if they are not inherited.
//
// On My Drives permissions seem to be attached to every item
// so they will always be written out.
//
// On Shared Drives only non-inherited permissions will be
// written out.
// To read the inherited permissions flag will mean we need to
// read the permissions for each object and the cache will be
// useless. However shared drives don't return permissions
// only permissionIds so will need to fetch them for each
// object. We use HasAugmentedPermissions to see if there are
// special permissions before fetching them to save transactions.
// HasAugmentedPermissions: Output only. Whether there are permissions
// directly on this file. This field is only populated for items in
// shared drives.
if o.fs.isTeamDrive && !info.HasAugmentedPermissions {
// Don't process permissions if there aren't any specifically set
info.Permissions = nil
info.PermissionIds = nil
}
// PermissionIds: Output only. List of permission IDs for users with
// access to this file.
//
// Only process these if we have no Permissions
if len(info.PermissionIds) > 0 && len(info.Permissions) == 0 {
info.Permissions = make([]*drive.Permission, 0, len(info.PermissionIds))
g, gCtx := errgroup.WithContext(ctx)
g.SetLimit(o.fs.ci.Checkers)
var mu sync.Mutex // protect the info.Permissions from concurrent writes
for _, permissionID := range info.PermissionIds {
permissionID := permissionID
g.Go(func() error {
// must fetch the team drive ones individually to check the inherited flag
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
if err != nil {
return fmt.Errorf("failed to read permission: %w", err)
}
// Don't write inherited permissions out
if inherited {
return nil
}
// Don't write owner role out - these are covered by the owner metadata
if perm.Role == "owner" {
return nil
}
mu.Lock()
info.Permissions = append(info.Permissions, perm)
mu.Unlock()
return nil
})
}
err = g.Wait()
if err != nil {
return err
}
} else {
// Clean the fetched permissions
for _, perm := range info.Permissions {
o.fs.cleanAndCachePermission(perm)
}
}
// Permissions: Output only. The full list of permissions for the file.
// This is only available if the requesting user can share the file. Not
// populated for items in shared drives.
if len(info.Permissions) > 0 {
buf, err := json.Marshal(info.Permissions)
if err != nil {
return fmt.Errorf("failed to marshal permissions: %w", err)
}
metadata["permissions"] = string(buf)
}
// Permission propagation
// https://developers.google.com/drive/api/guides/manage-sharing#permission-propagation
// Leads me to believe that in non shared drives, permissions
// are added to each item when you set permissions for a
// folder whereas in shared drives they are inherited and
// placed on the item directly.
}
if info.FolderColorRgb != "" {
metadata["folder-color-rgb"] = info.FolderColorRgb
}
if info.Description != "" {
metadata["description"] = info.Description
}
metadata["starred"] = fmt.Sprint(info.Starred)
metadata["btime"] = info.CreatedTime
metadata["mtime"] = info.ModifiedTime
if o.fs.opt.MetadataLabels.IsSet(rwRead) {
// FIXME would be really nice if we knew if files had labels
// before listing but we need to know all possible label IDs
// to get it in the listing.
labels, err := o.fs.getLabels(ctx, actualID(info.Id))
if err != nil {
return fmt.Errorf("failed to fetch labels: %w", err)
}
buf, err := json.Marshal(labels)
if err != nil {
return fmt.Errorf("failed to marshal labels: %w", err)
}
metadata["labels"] = string(buf)
}
o.metadata = &metadata
return nil
}
// Set the owner on the info
func (f *Fs) setOwner(ctx context.Context, info *drive.File, owner string) (err error) {
perm := drive.Permission{
Role: "owner",
EmailAddress: owner,
// Type: The type of the grantee. Valid values are: * `user` * `group` *
// `domain` * `anyone` When creating a permission, if `type` is `user`
// or `group`, you must provide an `emailAddress` for the user or group.
// When `type` is `domain`, you must provide a `domain`. There isn't
// extra information required for an `anyone` type.
Type: "user",
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Permissions.Create(info.Id, &perm).
SupportsAllDrives(true).
TransferOwnership(true).
// SendNotificationEmail(false). - required apparently!
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("failed to set owner: %w", err)
}
return nil
}
// Call back to set metadata that can't be set on the upload/update
//
// The *drive.File passed in holds the current state of the drive.File
// and this should update it with any modifications.
type updateMetadataFn func(context.Context, *drive.File) error
// read the metadata from meta and write it into updateInfo
//
// update should be true if this is being used to create metadata for
// an update/PATCH call as the rules on what can be updated are
// slightly different there.
//
// It returns a callback which should be called to finish the updates
// after the data is uploaded.
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update bool) (callback updateMetadataFn, err error) {
callbackFns := []updateMetadataFn{}
callback = func(ctx context.Context, info *drive.File) error {
for _, fn := range callbackFns {
err := fn(ctx, info)
if err != nil {
return err
}
}
return nil
}
// merge metadata into request and user metadata
for k, v := range meta {
k, v := k, v
// parse a boolean from v and write into out
parseBool := func(out *bool) error {
b, err := strconv.ParseBool(v)
if err != nil {
return fmt.Errorf("can't parse metadata %q = %q: %w", k, v, err)
}
*out = b
return nil
}
switch k {
case "copy-requires-writer-permission":
if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
return nil, err
}
case "writers-can-share":
if err := parseBool(&updateInfo.WritersCanShare); err != nil {
return nil, err
}
case "viewed-by-me":
// Can't write this
case "content-type":
updateInfo.MimeType = v
case "owner":
if !f.opt.MetadataOwner.IsSet(rwWrite) {
continue
}
// Can't set Owner on upload so need to set afterwards
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
return f.setOwner(ctx, info, v)
})
case "permissions":
if !f.opt.MetadataPermissions.IsSet(rwWrite) {
continue
}
var perms []*drive.Permission
err := json.Unmarshal([]byte(v), &perms)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal permissions: %w", err)
}
// Can't set Permissions on upload so need to set afterwards
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
return f.setPermissions(ctx, info, perms)
})
case "labels":
if !f.opt.MetadataLabels.IsSet(rwWrite) {
continue
}
var labels []*drive.Label
err := json.Unmarshal([]byte(v), &labels)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal labels: %w", err)
}
// Can't set Labels on upload so need to set afterwards
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
return f.setLabels(ctx, info, labels)
})
case "folder-color-rgb":
updateInfo.FolderColorRgb = v
case "description":
updateInfo.Description = v
case "starred":
if err := parseBool(&updateInfo.Starred); err != nil {
return nil, err
}
case "btime":
if update {
fs.Debugf(f, "Skipping btime metadata as can't update it on an existing file: %v", v)
} else {
updateInfo.CreatedTime = v
}
case "mtime":
updateInfo.ModifiedTime = v
default:
if updateInfo.Properties == nil {
updateInfo.Properties = make(map[string]string, 1)
}
updateInfo.Properties[k] = v
}
}
return callback, nil
}
// Fetch metadata and update updateInfo if --metadata is in use
func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, updateInfo *drive.File, update bool) (callback updateMetadataFn, err error) {
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
if err != nil {
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
}
callback, err = f.updateMetadata(ctx, updateInfo, meta, update)
if err != nil {
return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
}
return callback, nil
}

View File

@@ -8,19 +8,121 @@ package dropbox
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"sync"
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/atexit"
) )
const (
maxBatchSize = 1000 // max size the batch can be
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
defaultBatchSizeAsync = 100 // default batch size if async
)
// batcher holds info about the current items waiting for upload
type batcher struct {
f *Fs // Fs this batch is part of
mode string // configured batch mode
size int // maximum size for batch
timeout time.Duration // idle timeout for batch
async bool // whether we are using async batching
in chan batcherRequest // incoming items to batch
closed chan struct{} // close to indicate batcher shut down
atexit atexit.FnHandle // atexit handle
shutOnce sync.Once // make sure we shutdown once only
wg sync.WaitGroup // wait for shutdown
}
// batcherRequest holds an incoming request with a place for a reply
type batcherRequest struct {
commitInfo *files.UploadSessionFinishArg
result chan<- batcherResponse
}
// Return true if batcherRequest is the quit request
func (br *batcherRequest) isQuit() bool {
return br.commitInfo == nil
}
// Send this to get the engine to quit
var quitRequest = batcherRequest{}
// batcherResponse holds a response to be delivered to clients waiting
// for a batch to complete.
type batcherResponse struct {
err error
entry *files.FileMetadata
}
// newBatcher creates a new batcher structure
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
if size > maxBatchSize || size < 0 {
return nil, fmt.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
}
async := false
switch mode {
case "sync":
if size <= 0 {
ci := fs.GetConfig(ctx)
size = ci.Transfers
}
if timeout <= 0 {
timeout = defaultTimeoutSync
}
case "async":
if size <= 0 {
size = defaultBatchSizeAsync
}
if timeout <= 0 {
timeout = defaultTimeoutAsync
}
async = true
case "off":
size = 0
default:
return nil, fmt.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
}
b := &batcher{
f: f,
mode: mode,
size: size,
timeout: timeout,
async: async,
in: make(chan batcherRequest, size),
closed: make(chan struct{}),
}
if b.Batching() {
b.atexit = atexit.Register(b.Shutdown)
b.wg.Add(1)
go b.commitLoop(context.Background())
}
return b, nil
}
// Batching returns true if batching is active
func (b *batcher) Batching() bool {
return b.size > 0
}
// finishBatch commits the batch, returning a batch status to poll or maybe complete // finishBatch commits the batch, returning a batch status to poll or maybe complete
func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) { func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
var arg = &files.UploadSessionFinishBatchArg{ var arg = &files.UploadSessionFinishBatchArg{
Entries: items, Entries: items,
} }
err = f.pacer.Call(func() (bool, error) { err = b.f.pacer.Call(func() (bool, error) {
complete, err = f.srv.UploadSessionFinishBatchV2(arg) complete, err = b.f.srv.UploadSessionFinishBatchV2(arg)
// If error is insufficient space then don't retry // If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok { if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace { if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
@@ -37,10 +139,23 @@ func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinish
return complete, nil return complete, nil
} }
// Called by the batcher to commit a batch // commit a batch
func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []*files.FileMetadata, errors []error) (err error) { func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
// If commit fails then signal clients if sync
var signalled = b.async
defer func() {
if err != nil && !signalled {
// Signal to clients that there was an error
for _, result := range results {
result <- batcherResponse{err: err}
}
}
}()
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
fs.Debugf(b.f, "Committing %s", desc)
// finalise the batch getting either a result or a job id to poll // finalise the batch getting either a result or a job id to poll
complete, err := f.finishBatch(ctx, items) complete, err := b.finishBatch(ctx, items)
if err != nil { if err != nil {
return err return err
} }
@@ -51,13 +166,19 @@ func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinish
return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries)) return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
} }
// Format results for return // Report results to clients
var (
errorTag = ""
errorCount = 0
)
for i := range results { for i := range results {
item := entries[i] item := entries[i]
resp := batcherResponse{}
if item.Tag == "success" { if item.Tag == "success" {
results[i] = item.Success resp.entry = item.Success
} else { } else {
errorTag := item.Tag errorCount++
errorTag = item.Tag
if item.Failure != nil { if item.Failure != nil {
errorTag = item.Failure.Tag errorTag = item.Failure.Tag
if item.Failure.LookupFailed != nil { if item.Failure.LookupFailed != nil {
@@ -70,9 +191,112 @@ func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinish
errorTag += "/" + item.Failure.PropertiesError.Tag errorTag += "/" + item.Failure.PropertiesError.Tag
} }
} }
errors[i] = fmt.Errorf("upload failed: %s", errorTag) resp.err = fmt.Errorf("batch upload failed: %s", errorTag)
}
if !b.async {
results[i] <- resp
} }
} }
// Show signalled so no need to report error to clients from now on
signalled = true
// Report an error if any failed in the batch
if errorTag != "" {
return fmt.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
}
fs.Debugf(b.f, "Committed %s", desc)
return nil return nil
} }
// commitLoop runs the commit engine in the background
func (b *batcher) commitLoop(ctx context.Context) {
var (
items []*files.UploadSessionFinishArg // current batch of uncommitted files
results []chan<- batcherResponse // current batch of clients awaiting results
idleTimer = time.NewTimer(b.timeout)
commit = func() {
err := b.commitBatch(ctx, items, results)
if err != nil {
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
}
items, results = nil, nil
}
)
defer b.wg.Done()
defer idleTimer.Stop()
idleTimer.Stop()
outer:
for {
select {
case req := <-b.in:
if req.isQuit() {
break outer
}
items = append(items, req.commitInfo)
results = append(results, req.result)
idleTimer.Stop()
if len(items) >= b.size {
commit()
} else {
idleTimer.Reset(b.timeout)
}
case <-idleTimer.C:
if len(items) > 0 {
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
commit()
}
}
}
// commit any remaining items
if len(items) > 0 {
commit()
}
}
// Shutdown finishes any pending batches then shuts everything down
//
// Can be called from atexit handler
func (b *batcher) Shutdown() {
if !b.Batching() {
return
}
b.shutOnce.Do(func() {
atexit.Unregister(b.atexit)
fs.Infof(b.f, "Committing uploads - please wait...")
// show that batcher is shutting down
close(b.closed)
// quit the commitLoop by sending a quitRequest message
//
// Note that we don't close b.in because that will
// cause write to closed channel in Commit when we are
// exiting due to a signal.
b.in <- quitRequest
b.wg.Wait()
})
}
// Commit commits the file using a batch call, first adding it to the
// batch and then waiting for the batch to complete in a synchronous
// way if async is not set.
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
select {
case <-b.closed:
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
default:
}
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
resp := make(chan batcherResponse, 1)
b.in <- batcherRequest{
commitInfo: commitInfo,
result: resp,
}
// If running async then don't wait for the result
if b.async {
return nil, nil
}
result := <-resp
return result.entry, result.err
}

View File

@@ -47,7 +47,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/batcher"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
@@ -122,14 +121,6 @@ var (
// Errors // Errors
errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode")) errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode"))
// Configure the batcher
defaultBatcherOptions = batcher.Options{
MaxBatchSize: 1000,
DefaultTimeoutSync: 500 * time.Millisecond,
DefaultTimeoutAsync: 10 * time.Second,
DefaultBatchSizeAsync: 100,
}
) )
// Gets an oauth config with the right scopes // Gets an oauth config with the right scopes
@@ -161,7 +152,7 @@ func init() {
}, },
}) })
}, },
Options: append(append(oauthutil.SharedOptions, []fs.Option{{ Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "chunk_size", Name: "chunk_size",
Help: fmt.Sprintf(`Upload chunk size (< %v). Help: fmt.Sprintf(`Upload chunk size (< %v).
@@ -191,9 +182,8 @@ client_secret) to use this option as currently rclone's default set of
permissions doesn't include "members.read". This can be added once permissions doesn't include "members.read". This can be added once
v1.55 or later is in use everywhere. v1.55 or later is in use everywhere.
`, `,
Default: "", Default: "",
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "shared_files", Name: "shared_files",
Help: `Instructs rclone to work on individual shared files. Help: `Instructs rclone to work on individual shared files.
@@ -219,6 +209,68 @@ Note that we don't unmount the shared folder afterwards so the
shared folder.`, shared folder.`,
Default: false, Default: false,
Advanced: true, Advanced: true,
}, {
Name: "batch_mode",
Help: `Upload file batching sync|async|off.
This sets the batch mode used by rclone.
For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)
This has 3 possible values
- off - no batching
- sync - batch uploads and check completion (default)
- async - batch upload and don't check completion
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
`,
Default: "sync",
Advanced: true,
}, {
Name: "batch_size",
Help: `Max number of files in upload batch.
This sets the batch size of files to upload. It has to be less than 1000.
By default this is 0 which means rclone which calculate the batch size
depending on the setting of batch_mode.
- batch_mode: async - default batch_size is 100
- batch_mode: sync - default batch_size is the same as --transfers
- batch_mode: off - not in use
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
Setting this is a great idea if you are uploading lots of small files
as it will make them a lot quicker. You can use --transfers 32 to
maximise throughput.
`,
Default: 0,
Advanced: true,
}, {
Name: "batch_timeout",
Help: `Max time to allow an idle upload batch before uploading.
If an upload batch is idle for more than this long then it will be
uploaded.
The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use.
- batch_mode: async - default batch_timeout is 10s
- batch_mode: sync - default batch_timeout is 500ms
- batch_mode: off - not in use
`,
Default: fs.Duration(0),
Advanced: true,
}, {
Name: "batch_commit_timeout",
Help: `Max time to wait for a batch to finish committing`,
Default: fs.Duration(10 * time.Minute),
Advanced: true,
}, { }, {
Name: "pacer_min_sleep", Name: "pacer_min_sleep",
Default: defaultMinSleep, Default: defaultMinSleep,
@@ -237,22 +289,23 @@ shared folder.`,
encoder.EncodeDel | encoder.EncodeDel |
encoder.EncodeRightSpace | encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8, encoder.EncodeInvalidUtf8,
}}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...), }}...),
}) })
} }
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"` Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"` SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"` SharedFolders bool `config:"shared_folders"`
BatchMode string `config:"batch_mode"` BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"` BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"` BatchTimeout fs.Duration `config:"batch_timeout"`
AsyncBatch bool `config:"async_batch"` BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"` AsyncBatch bool `config:"async_batch"`
Enc encoder.MultiEncoder `config:"encoding"` PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
Enc encoder.MultiEncoder `config:"encoding"`
} }
// Fs represents a remote dropbox server // Fs represents a remote dropbox server
@@ -271,7 +324,7 @@ type Fs struct {
slashRootSlash string // root with "/" prefix and postfix, lowercase slashRootSlash string // root with "/" prefix and postfix, lowercase
pacer *fs.Pacer // To pace the API calls pacer *fs.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none ns string // The namespace we are using or "" for none
batcher *batcher.Batcher[*files.UploadSessionFinishArg, *files.FileMetadata] batcher *batcher // batch builder
} }
// Object describes a dropbox object // Object describes a dropbox object
@@ -397,11 +450,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ci: ci, ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
batcherOptions := defaultBatcherOptions f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
batcherOptions.Mode = f.opt.BatchMode
batcherOptions.Size = f.opt.BatchSize
batcherOptions.Timeout = time.Duration(f.opt.BatchTimeout)
f.batcher, err = batcher.New(ctx, f, f.commitBatch, batcherOptions)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -946,7 +995,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
if root == "/" { if root == "/" {
return errors.New("can't remove root directory") return errors.New("can't remove root directory")
} }
encRoot := f.opt.Enc.FromStandardPath(root)
if check { if check {
// check directory exists // check directory exists
@@ -955,9 +1003,10 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return fmt.Errorf("Rmdir: %w", err) return fmt.Errorf("Rmdir: %w", err)
} }
root = f.opt.Enc.FromStandardPath(root)
// check directory empty // check directory empty
arg := files.ListFolderArg{ arg := files.ListFolderArg{
Path: encRoot, Path: root,
Recursive: false, Recursive: false,
} }
if root == "/" { if root == "/" {
@@ -978,7 +1027,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
// remove it // remove it
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: encRoot}) _, err = f.srv.DeleteV2(&files.DeleteArg{Path: root})
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
return err return err
@@ -1672,7 +1721,7 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
// If we are batching then we should have written all the data now // If we are batching then we should have written all the data now
// store the commit info now for a batch commit // store the commit info now for a batch commit
if o.fs.batcher.Batching() { if o.fs.batcher.Batching() {
return o.fs.batcher.Commit(ctx, o.remote, args) return o.fs.batcher.Commit(ctx, args)
} }
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {

View File

@@ -28,14 +28,14 @@ var retryErrorCodes = []int{
509, // Bandwidth Limit Exceeded 509, // Bandwidth Limit Exceeded
} }
var errorRegex = regexp.MustCompile(`#(\d{1,3})`) var errorRegex = regexp.MustCompile(`#\d{1,3}`)
func parseFichierError(err error) int { func parseFichierError(err error) int {
matches := errorRegex.FindStringSubmatch(err.Error()) matches := errorRegex.FindStringSubmatch(err.Error())
if len(matches) == 0 { if len(matches) == 0 {
return 0 return 0
} }
code, err := strconv.Atoi(matches[1]) code, err := strconv.Atoi(matches[0])
if err != nil { if err != nil {
fs.Debugf(nil, "failed parsing fichier error: %v", err) fs.Debugf(nil, "failed parsing fichier error: %v", err)
return 0 return 0
@@ -408,32 +408,6 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
return response, nil return response, nil
} }
func (f *Fs) moveDir(ctx context.Context, folderID int, newLeaf string, destinationFolderID int) (response *MoveDirResponse, err error) {
request := &MoveDirRequest{
FolderID: folderID,
DestinationFolderID: destinationFolderID,
Rename: newLeaf,
// DestinationUser: destinationUser,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/mv.cgi",
}
response = &MoveDirResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("couldn't move dir: %w", err)
}
return response, nil
}
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) { func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
request := &CopyFileRequest{ request := &CopyFileRequest{
URLs: []string{url}, URLs: []string{url},

View File

@@ -38,9 +38,8 @@ func init() {
Description: "1Fichier", Description: "1Fichier",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.", Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
Name: "api_key", Name: "api_key",
Sensitive: true,
}, { }, {
Help: "If you want to download a shared folder, add this parameter.", Help: "If you want to download a shared folder, add this parameter.",
Name: "shared_folder", Name: "shared_folder",
@@ -488,51 +487,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil return dstObj, nil
} }
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove.
//
// If destination exists then return fs.ErrorDirExists.
//
// This is complicated by the fact that we can't use moveDir to move
// to a different directory AND rename at the same time as it can
// overwrite files in the source directory.
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
if err != nil {
return err
}
srcIDnumeric, err := strconv.Atoi(srcID)
if err != nil {
return err
}
dstDirectoryIDnumeric, err := strconv.Atoi(dstDirectoryID)
if err != nil {
return err
}
var resp *MoveDirResponse
resp, err = f.moveDir(ctx, srcIDnumeric, dstLeaf, dstDirectoryIDnumeric)
if err != nil {
return fmt.Errorf("couldn't rename leaf: %w", err)
}
if resp.Status != "OK" {
return fmt.Errorf("couldn't rename leaf: %s", resp.Message)
}
srcFs.dirCache.FlushDir(srcRemote)
return nil
}
// Copy src to this remote using server side move operations. // Copy src to this remote using server side move operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
@@ -606,7 +560,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var ( var (
_ fs.Fs = (*Fs)(nil) _ fs.Fs = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil) _ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil) _ fs.Copier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil) _ fs.PutUncheckeder = (*Fs)(nil)

View File

@@ -70,22 +70,6 @@ type MoveFileResponse struct {
URLs []string `json:"urls"` URLs []string `json:"urls"`
} }
// MoveDirRequest is the request structure of the corresponding request
type MoveDirRequest struct {
FolderID int `json:"folder_id"`
DestinationFolderID int `json:"destination_folder_id,omitempty"`
DestinationUser string `json:"destination_user"`
Rename string `json:"rename,omitempty"`
}
// MoveDirResponse is the response structure of the corresponding request
type MoveDirResponse struct {
Status string `json:"status"`
Message string `json:"message"`
OldName string `json:"old_name"`
NewName string `json:"new_name"`
}
// CopyFileRequest is the request structure of the corresponding request // CopyFileRequest is the request structure of the corresponding request
type CopyFileRequest struct { type CopyFileRequest struct {
URLs []string `json:"urls"` URLs []string `json:"urls"`

View File

@@ -84,7 +84,6 @@ Leave blank normally.
Fill in to make rclone start with directory of a given ID. Fill in to make rclone start with directory of a given ID.
`, `,
Sensitive: true,
}, { }, {
Name: "permanent_token", Name: "permanent_token",
Help: `Permanent Authentication Token. Help: `Permanent Authentication Token.
@@ -98,7 +97,6 @@ These tokens are normally valid for several years.
For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
`, `,
Sensitive: true,
}, { }, {
Name: "token", Name: "token",
Help: `Session Token. Help: `Session Token.
@@ -108,8 +106,7 @@ usually valid for 1 hour.
Don't set this value - rclone will set it automatically. Don't set this value - rclone will set it automatically.
`, `,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "token_expiry", Name: "token_expiry",
Help: `Token expiry time. Help: `Token expiry time.
@@ -158,9 +155,9 @@ type Fs struct {
tokenMu sync.Mutex // hold when reading the token tokenMu sync.Mutex // hold when reading the token
token string // current access token token string // current access token
tokenExpiry time.Time // time the current token expires tokenExpiry time.Time // time the current token expires
tokenExpired atomic.Int32 tokenExpired int32 // read and written with atomic
canCopyWithName bool // set if detected that can use fi_name in copy canCopyWithName bool // set if detected that can use fi_name in copy
precision time.Duration // precision reported precision time.Duration // precision reported
} }
// Object describes a filefabric object // Object describes a filefabric object
@@ -243,7 +240,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
err = status // return the error from the RPC err = status // return the error from the RPC
code := status.GetCode() code := status.GetCode()
if code == "login_token_expired" { if code == "login_token_expired" {
f.tokenExpired.Add(1) atomic.AddInt32(&f.tokenExpired, 1)
} else { } else {
for _, retryCode := range retryStatusCodes { for _, retryCode := range retryStatusCodes {
if code == retryCode.code { if code == retryCode.code {
@@ -323,12 +320,12 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
var refreshed = false var refreshed = false
defer func() { defer func() {
if refreshed { if refreshed {
f.tokenExpired.Store(0) atomic.StoreInt32(&f.tokenExpired, 0)
} }
f.tokenMu.Unlock() f.tokenMu.Unlock()
}() }()
expired := f.tokenExpired.Load() != 0 expired := atomic.LoadInt32(&f.tokenExpired) != 0
if expired { if expired {
fs.Debugf(f, "Token invalid - refreshing") fs.Debugf(f, "Token invalid - refreshing")
} }

View File

@@ -15,7 +15,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/jlaffaye/ftp" "github.com/rclone/ftp"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
@@ -28,7 +28,6 @@ import (
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/proxy"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
) )
@@ -49,15 +48,13 @@ func init() {
Description: "FTP", Description: "FTP",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "host", Name: "host",
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".", Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "user", Name: "user",
Help: "FTP username.", Help: "FTP username.",
Default: currentUser, Default: currentUser,
Sensitive: true,
}, { }, {
Name: "port", Name: "port",
Help: "FTP port number.", Help: "FTP port number.",
@@ -175,18 +172,6 @@ Enabled by default. Use 0 to disable.`,
If this is set and no password is supplied then rclone will ask for a password If this is set and no password is supplied then rclone will ask for a password
`, `,
Advanced: true, Advanced: true,
}, {
Name: "socks_proxy",
Default: "",
Help: `Socks 5 proxy host.
Supports the format user:pass@host:port, user@host:port, host:port.
Example:
myUser:myPass@localhost:9005
`,
Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -231,7 +216,6 @@ type Options struct {
ShutTimeout fs.Duration `config:"shut_timeout"` ShutTimeout fs.Duration `config:"shut_timeout"`
AskPassword bool `config:"ask_password"` AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
SocksProxy string `config:"socks_proxy"`
} }
// Fs represents a remote FTP server // Fs represents a remote FTP server
@@ -249,6 +233,7 @@ type Fs struct {
pool []*ftp.ServerConn pool []*ftp.ServerConn
drain *time.Timer // used to drain the pool when we stop using the connections drain *time.Timer // used to drain the pool when we stop using the connections
tokens *pacer.TokenDispenser tokens *pacer.TokenDispenser
tlsConf *tls.Config
pacer *fs.Pacer // pacer for FTP connections pacer *fs.Pacer // pacer for FTP connections
fGetTime bool // true if the ftp library accepts GetTime fGetTime bool // true if the ftp library accepts GetTime
fSetTime bool // true if the ftp library accepts SetTime fSetTime bool // true if the ftp library accepts SetTime
@@ -361,36 +346,10 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
return fserrors.ShouldRetry(err), err return fserrors.ShouldRetry(err), err
} }
// Get a TLS config with a unique session cache.
//
// We can't share session caches between connections.
//
// See: https://github.com/rclone/rclone/issues/7234
func (f *Fs) tlsConfig() *tls.Config {
var tlsConfig *tls.Config
if f.opt.TLS || f.opt.ExplicitTLS {
tlsConfig = &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
if f.opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
}
if f.opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
}
return tlsConfig
}
// Open a new connection to the FTP server. // Open a new connection to the FTP server.
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) { func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
fs.Debugf(f, "Connecting to FTP server") fs.Debugf(f, "Connecting to FTP server")
// tls.Config for this connection only. Will be used for data
// and control connections.
tlsConfig := f.tlsConfig()
// Make ftp library dial with fshttp dialer optionally using TLS // Make ftp library dial with fshttp dialer optionally using TLS
initialConnection := true initialConnection := true
dial := func(network, address string) (conn net.Conn, err error) { dial := func(network, address string) (conn net.Conn, err error) {
@@ -398,17 +357,12 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
defer func() { defer func() {
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err) fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
}() }()
baseDialer := fshttp.NewDialer(ctx) conn, err = fshttp.NewDialer(ctx).Dial(network, address)
if f.opt.SocksProxy != "" {
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
} else {
conn, err = baseDialer.Dial(network, address)
}
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Connect using cleartext only for non TLS // Connect using cleartext only for non TLS
if tlsConfig == nil { if f.tlsConf == nil {
return conn, nil return conn, nil
} }
// Initial connection only needs to be cleartext for explicit TLS // Initial connection only needs to be cleartext for explicit TLS
@@ -417,7 +371,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
return conn, nil return conn, nil
} }
// Upgrade connection to TLS // Upgrade connection to TLS
tlsConn := tls.Client(conn, tlsConfig) tlsConn := tls.Client(conn, f.tlsConf)
// Do the initial handshake - tls.Client doesn't do it for us // Do the initial handshake - tls.Client doesn't do it for us
// If we do this then connections to proftpd/pureftpd lock up // If we do this then connections to proftpd/pureftpd lock up
// See: https://github.com/rclone/rclone/issues/6426 // See: https://github.com/rclone/rclone/issues/6426
@@ -439,9 +393,9 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.TLS { if f.opt.TLS {
// Our dialer takes care of TLS but ftp library also needs tlsConf // Our dialer takes care of TLS but ftp library also needs tlsConf
// as a trigger for sending PSBZ and PROT options to server. // as a trigger for sending PSBZ and PROT options to server.
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig)) ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
} else if f.opt.ExplicitTLS { } else if f.opt.ExplicitTLS {
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig)) ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
} }
if f.opt.DisableEPSV { if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true)) ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
@@ -596,6 +550,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
if opt.TLS && opt.ExplicitTLS { if opt.TLS && opt.ExplicitTLS {
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config") return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
} }
var tlsConfig *tls.Config
if opt.TLS || opt.ExplicitTLS {
tlsConfig = &tls.Config{
ServerName: opt.Host,
InsecureSkipVerify: opt.SkipVerifyTLSCert,
}
if opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(opt.TLSCacheSize)
}
if opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
}
u := protocol + path.Join(dialAddr+"/", root) u := protocol + path.Join(dialAddr+"/", root)
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
@@ -608,6 +575,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
pass: pass, pass: pass,
dialAddr: dialAddr, dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency), tokens: pacer.NewTokenDispenser(opt.Concurrency),
tlsConf: tlsConfig,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{

View File

@@ -91,21 +91,18 @@ func init() {
}) })
}, },
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "project_number", Name: "project_number",
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.", Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
Sensitive: true,
}, { }, {
Name: "user_project", Name: "user_project",
Help: "User project.\n\nOptional - needed only for requester pays.", Help: "User project.\n\nOptional - needed only for requester pays.",
Sensitive: true,
}, { }, {
Name: "service_account_file", Name: "service_account_file",
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp, Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, { }, {
Name: "service_account_credentials", Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
Sensitive: true,
}, { }, {
Name: "anonymous", Name: "anonymous",
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.", Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
@@ -1310,11 +1307,10 @@ func (o *Object) Storable() bool {
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
url := o.url
if o.fs.opt.UserProject != "" { if o.fs.opt.UserProject != "" {
url += "&userProject=" + o.fs.opt.UserProject o.url = o.url + "&userProject=" + o.fs.opt.UserProject
} }
req, err := http.NewRequestWithContext(ctx, "GET", url, nil) req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@@ -29,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/batcher"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
@@ -72,14 +71,6 @@ var (
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL, RedirectURL: oauthutil.RedirectURL,
} }
// Configure the batcher
defaultBatcherOptions = batcher.Options{
MaxBatchSize: 50,
DefaultTimeoutSync: 1000 * time.Millisecond,
DefaultTimeoutAsync: 10 * time.Second,
DefaultBatchSizeAsync: 50,
}
) )
// Register with Fs // Register with Fs
@@ -120,7 +111,7 @@ will count towards storage in your Google Account.`)
} }
return nil, fmt.Errorf("unknown state %q", config.State) return nil, fmt.Errorf("unknown state %q", config.State)
}, },
Options: append(append(oauthutil.SharedOptions, []fs.Option{{ Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "read_only", Name: "read_only",
Default: false, Default: false,
Help: `Set to make the Google Photos backend read only. Help: `Set to make the Google Photos backend read only.
@@ -167,7 +158,7 @@ listings and won't be transferred.`,
Default: (encoder.Base | Default: (encoder.Base |
encoder.EncodeCrLf | encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8), encoder.EncodeInvalidUtf8),
}}...), defaultBatcherOptions.FsOptions("")...), }}...),
}) })
} }
@@ -178,9 +169,6 @@ type Options struct {
StartYear int `config:"start_year"` StartYear int `config:"start_year"`
IncludeArchived bool `config:"include_archived"` IncludeArchived bool `config:"include_archived"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
} }
// Fs represents a remote storage server // Fs represents a remote storage server
@@ -199,7 +187,6 @@ type Fs struct {
uploadedMu sync.Mutex // to protect the below uploadedMu sync.Mutex // to protect the below
uploaded dirtree.DirTree // record of uploaded items uploaded dirtree.DirTree // record of uploaded items
createMu sync.Mutex // held when creating albums to prevent dupes createMu sync.Mutex // held when creating albums to prevent dupes
batcher *batcher.Batcher[uploadedItem, *api.MediaItem]
} }
// Object describes a storage object // Object describes a storage object
@@ -325,14 +312,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
albums: map[bool]*albums{}, albums: map[bool]*albums{},
uploaded: dirtree.New(), uploaded: dirtree.New(),
} }
batcherOptions := defaultBatcherOptions
batcherOptions.Mode = f.opt.BatchMode
batcherOptions.Size = f.opt.BatchSize
batcherOptions.Timeout = time.Duration(f.opt.BatchTimeout)
f.batcher, err = batcher.New(ctx, f, f.commitBatch, batcherOptions)
if err != nil {
return nil, err
}
f.features = (&fs.Features{ f.features = (&fs.Features{
ReadMimeType: true, ReadMimeType: true,
}).Fill(ctx, f) }).Fill(ctx, f)
@@ -802,13 +781,6 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None) return hash.Set(hash.None)
} }
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
f.batcher.Shutdown()
return nil
}
// ------------------------------------------------------------ // ------------------------------------------------------------
// Fs returns the parent Fs // Fs returns the parent Fs
@@ -989,82 +961,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return resp.Body, err return resp.Body, err
} }
// input to the batcher
type uploadedItem struct {
AlbumID string // desired album
UploadToken string // upload ID
}
// Commit a batch of items to albumID returning the errors in errors
func (f *Fs) commitBatchAlbumID(ctx context.Context, items []uploadedItem, results []*api.MediaItem, errors []error, albumID string) {
// Create the media item from an UploadToken, optionally adding to an album
opts := rest.Opts{
Method: "POST",
Path: "/mediaItems:batchCreate",
}
var request = api.BatchCreateRequest{
AlbumID: albumID,
}
itemsInBatch := 0
for i := range items {
if items[i].AlbumID == albumID {
request.NewMediaItems = append(request.NewMediaItems, api.NewMediaItem{
SimpleMediaItem: api.SimpleMediaItem{
UploadToken: items[i].UploadToken,
},
})
itemsInBatch++
}
}
var result api.BatchCreateResponse
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
err = fmt.Errorf("failed to create media item: %w", err)
}
if err == nil && len(result.NewMediaItemResults) != itemsInBatch {
err = fmt.Errorf("bad response to BatchCreate expecting %d items but got %d", itemsInBatch, len(result.NewMediaItemResults))
}
j := 0
for i := range items {
if items[i].AlbumID == albumID {
if err == nil {
media := &result.NewMediaItemResults[j]
if media.Status.Code != 0 {
errors[i] = fmt.Errorf("upload failed: %s (%d)", media.Status.Message, media.Status.Code)
} else {
results[i] = &media.MediaItem
}
} else {
errors[i] = err
}
j++
}
}
}
// Called by the batcher to commit a batch
func (f *Fs) commitBatch(ctx context.Context, items []uploadedItem, results []*api.MediaItem, errors []error) (err error) {
// Discover all the AlbumIDs as we have to upload these separately
//
// Should maybe have one batcher per AlbumID
albumIDs := map[string]struct{}{}
for i := range items {
albumIDs[items[i].AlbumID] = struct{}{}
}
// batch the albums
for albumID := range albumIDs {
// errors returned in errors
f.commitBatchAlbumID(ctx, items, results, errors, albumID)
}
return nil
}
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
@@ -1125,26 +1021,37 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errors.New("empty upload token") return errors.New("empty upload token")
} }
uploaded := uploadedItem{ // Create the media item from an UploadToken, optionally adding to an album
AlbumID: albumID, opts = rest.Opts{
UploadToken: uploadToken, Method: "POST",
Path: "/mediaItems:batchCreate",
} }
var request = api.BatchCreateRequest{
// Save the upload into an album AlbumID: albumID,
var info *api.MediaItem NewMediaItems: []api.NewMediaItem{
if o.fs.batcher.Batching() { {
info, err = o.fs.batcher.Commit(ctx, o.remote, uploaded) SimpleMediaItem: api.SimpleMediaItem{
} else { UploadToken: uploadToken,
errors := make([]error, 1) },
results := make([]*api.MediaItem, 1) },
err = o.fs.commitBatch(ctx, []uploadedItem{uploaded}, results, errors) },
if err != nil {
err = errors[0]
info = results[0]
}
} }
var result api.BatchCreateResponse
o.setMetaData(info) err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return fmt.Errorf("failed to create media item: %w", err)
}
if len(result.NewMediaItemResults) != 1 {
return errors.New("bad response to BatchCreate wrong number of items")
}
mediaItemResult := result.NewMediaItemResults[0]
if mediaItemResult.Status.Code != 0 {
return fmt.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
}
o.setMetaData(&mediaItemResult.MediaItem)
// Add upload to internal storage // Add upload to internal storage
if pattern.isUpload { if pattern.isUpload {

View File

@@ -23,7 +23,6 @@ func TestIntegration(t *testing.T) {
NilObject: (*hasher.Object)(nil), NilObject: (*hasher.Object)(nil),
UnimplementableFsMethods: []string{ UnimplementableFsMethods: []string{
"OpenWriterAt", "OpenWriterAt",
"OpenChunkWriter",
}, },
UnimplementableObjectMethods: []string{}, UnimplementableObjectMethods: []string{},
} }

View File

@@ -21,7 +21,6 @@ import (
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/pacer"
) )
// Fs represents a HDFS server // Fs represents a HDFS server
@@ -32,15 +31,8 @@ type Fs struct {
opt Options // options for this backend opt Options // options for this backend
ci *fs.ConfigInfo // global config ci *fs.ConfigInfo // global config
client *hdfs.Client client *hdfs.Client
pacer *fs.Pacer // pacer for API calls
} }
const (
minSleep = 20 * time.Millisecond
maxSleep = 10 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go // copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
func getKerberosClient() (*krb.Client, error) { func getKerberosClient() (*krb.Client, error) {
configPath := os.Getenv("KRB5_CONFIG") configPath := os.Getenv("KRB5_CONFIG")
@@ -93,7 +85,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
options := hdfs.ClientOptions{ options := hdfs.ClientOptions{
Addresses: opt.Namenode, Addresses: []string{opt.Namenode},
UseDatanodeHostname: false, UseDatanodeHostname: false,
} }
@@ -122,7 +114,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt: *opt, opt: *opt,
ci: fs.GetConfig(ctx), ci: fs.GetConfig(ctx),
client: client, client: client,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{

View File

@@ -19,11 +19,9 @@ func init() {
Description: "Hadoop distributed file system", Description: "Hadoop distributed file system",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "namenode", Name: "namenode",
Help: "Hadoop name nodes and ports.\n\nE.g. \"namenode-1:8020,namenode-2:8020,...\" to connect to host namenodes at port 8020.", Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
Required: true, Required: true,
Sensitive: true,
Default: fs.CommaSepList{},
}, { }, {
Name: "username", Name: "username",
Help: "Hadoop user name.", Help: "Hadoop user name.",
@@ -31,7 +29,6 @@ func init() {
Value: "root", Value: "root",
Help: "Connect to hdfs as root.", Help: "Connect to hdfs as root.",
}}, }},
Sensitive: true,
}, { }, {
Name: "service_principal_name", Name: "service_principal_name",
Help: `Kerberos service principal name for the namenode. Help: `Kerberos service principal name for the namenode.
@@ -39,8 +36,7 @@ func init() {
Enables KERBEROS authentication. Specifies the Service Principal Name Enables KERBEROS authentication. Specifies the Service Principal Name
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\" (SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`, for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "data_transfer_protection", Name: "data_transfer_protection",
Help: `Kerberos data transfer protection: authentication|integrity|privacy. Help: `Kerberos data transfer protection: authentication|integrity|privacy.
@@ -66,7 +62,7 @@ and 'privacy'. Used only with KERBEROS enabled.`,
// Options for this backend // Options for this backend
type Options struct { type Options struct {
Namenode fs.CommaSepList `config:"namenode"` Namenode string `config:"namenode"`
Username string `config:"username"` Username string `config:"username"`
ServicePrincipalName string `config:"service_principal_name"` ServicePrincipalName string `config:"service_principal_name"`
DataTransferProtection string `config:"data_transfer_protection"` DataTransferProtection string `config:"data_transfer_protection"`

View File

@@ -5,12 +5,10 @@ package hdfs
import ( import (
"context" "context"
"errors"
"io" "io"
"path" "path"
"time" "time"
"github.com/colinmarc/hdfs/v2"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
@@ -108,7 +106,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update object // Update object
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
realpath := o.fs.realpath(o.remote) realpath := o.fs.realpath(src.Remote())
dirname := path.Dir(realpath) dirname := path.Dir(realpath)
fs.Debugf(o.fs, "update [%s]", realpath) fs.Debugf(o.fs, "update [%s]", realpath)
@@ -143,23 +141,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err return err
} }
// If the datanodes have acknowledged all writes but not yet err = out.Close()
// to the namenode, FileWriter.Close can return ErrReplicating
// (wrapped in an os.PathError). This indicates that all data
// has been written, but the lease is still open for the file.
//
// It is safe in this case to either ignore the error (and let
// the lease expire on its own) or to call Close multiple
// times until it completes without an error. The Java client,
// for context, always chooses to retry, with exponential
// backoff.
err = o.fs.pacer.Call(func() (bool, error) {
err := out.Close()
if err == nil {
return false, nil
}
return errors.Is(err, hdfs.ErrReplicating), err
})
if err != nil { if err != nil {
cleanup() cleanup()
return err return err

View File

@@ -36,7 +36,6 @@ func init() {
Name: "http", Name: "http",
Description: "HTTP", Description: "HTTP",
NewFs: NewFs, NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "url", Name: "url",
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.", Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
@@ -211,42 +210,6 @@ func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Op
return createFileResult() return createFileResult()
} }
// Make the http connection with opt
func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err error) {
if len(opt.Headers)%2 != 0 {
return false, errors.New("odd number of headers supplied")
}
if !strings.HasSuffix(opt.Endpoint, "/") {
opt.Endpoint += "/"
}
// Parse the endpoint and stick the root onto it
base, err := url.Parse(opt.Endpoint)
if err != nil {
return false, err
}
u, err := rest.URLJoin(base, rest.URLPathEscape(f.root))
if err != nil {
return false, err
}
client := fshttp.NewClient(ctx)
endpoint, isFile := getFsEndpoint(ctx, client, u.String(), opt)
fs.Debugf(nil, "Root: %s", endpoint)
u, err = url.Parse(endpoint)
if err != nil {
return false, err
}
// Update f with the new parameters
f.httpClient = client
f.endpoint = u
f.endpointURL = u.String()
return isFile, nil
}
// NewFs creates a new Fs object from the name and root. It connects to // NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file. // the host specified in the config file.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
@@ -257,23 +220,47 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, err return nil, err
} }
if len(opt.Headers)%2 != 0 {
return nil, errors.New("odd number of headers supplied")
}
if !strings.HasSuffix(opt.Endpoint, "/") {
opt.Endpoint += "/"
}
// Parse the endpoint and stick the root onto it
base, err := url.Parse(opt.Endpoint)
if err != nil {
return nil, err
}
u, err := rest.URLJoin(base, rest.URLPathEscape(root))
if err != nil {
return nil, err
}
client := fshttp.NewClient(ctx)
endpoint, isFile := getFsEndpoint(ctx, client, u.String(), opt)
fs.Debugf(nil, "Root: %s", endpoint)
u, err = url.Parse(endpoint)
if err != nil {
return nil, err
}
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
ci: ci, ci: ci,
httpClient: client,
endpoint: u,
endpointURL: u.String(),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
}).Fill(ctx, f) }).Fill(ctx, f)
// Make the http connection
isFile, err := f.httpConnection(ctx, opt)
if err != nil {
return nil, err
}
if isFile { if isFile {
// return an error with an fs which points to the parent // return an error with an fs which points to the parent
return f, fs.ErrorIsFile return f, fs.ErrorIsFile
@@ -698,66 +685,10 @@ func (o *Object) MimeType(ctx context.Context) string {
return o.contentType return o.contentType
} }
var commandHelp = []fs.CommandHelp{{
Name: "set",
Short: "Set command for updating the config parameters.",
Long: `This set command can be used to update the config parameters
for a running http backend.
Usage Examples:
rclone backend set remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=remote: -o url=https://example.com
The option keys are named as they are in the config file.
This rebuilds the connection to the http backend when it is called with
the new parameters. Only new parameters need be passed as the values
will default to those currently in use.
It doesn't return anything.
`,
}}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "set":
newOpt := f.opt
err := configstruct.Set(configmap.Simple(opt), &newOpt)
if err != nil {
return nil, fmt.Errorf("reading config: %w", err)
}
_, err = f.httpConnection(ctx, &newOpt)
if err != nil {
return nil, fmt.Errorf("updating session: %w", err)
}
f.opt = newOpt
keys := []string{}
for k := range opt {
keys = append(keys, k)
}
fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
return nil, nil
default:
return nil, fs.ErrorCommandNotFound
}
}
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = &Fs{} _ fs.Fs = &Fs{}
_ fs.PutStreamer = &Fs{} _ fs.PutStreamer = &Fs{}
_ fs.Object = &Object{} _ fs.Object = &Object{}
_ fs.MimeTyper = &Object{} _ fs.MimeTyper = &Object{}
_ fs.Commander = &Fs{}
) )

View File

@@ -1,66 +0,0 @@
// Package client provides a client for interacting with the ImageKit API.
package client
import (
"context"
"fmt"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/rest"
)
// ImageKit main struct
type ImageKit struct {
Prefix string
UploadPrefix string
Timeout int64
UploadTimeout int64
PrivateKey string
PublicKey string
URLEndpoint string
HTTPClient *rest.Client
}
// NewParams is a struct to define parameters to imagekit
type NewParams struct {
PrivateKey string
PublicKey string
URLEndpoint string
}
// New returns ImageKit object from environment variables
func New(ctx context.Context, params NewParams) (*ImageKit, error) {
privateKey := params.PrivateKey
publicKey := params.PublicKey
endpointURL := params.URLEndpoint
switch {
case privateKey == "":
return nil, fmt.Errorf("ImageKit.io URL endpoint is required")
case publicKey == "":
return nil, fmt.Errorf("ImageKit.io public key is required")
case endpointURL == "":
return nil, fmt.Errorf("ImageKit.io private key is required")
}
cliCtx, cliCfg := fs.AddConfig(ctx)
cliCfg.UserAgent = "rclone/imagekit"
client := rest.NewClient(fshttp.NewClient(cliCtx))
client.SetUserPass(privateKey, "")
client.SetHeader("Accept", "application/json")
return &ImageKit{
Prefix: "https://api.imagekit.io/v2",
UploadPrefix: "https://upload.imagekit.io/api/v2",
Timeout: 60,
UploadTimeout: 3600,
PrivateKey: params.PrivateKey,
PublicKey: params.PublicKey,
URLEndpoint: params.URLEndpoint,
HTTPClient: client,
}, nil
}

View File

@@ -1,252 +0,0 @@
package client
import (
"context"
"errors"
"fmt"
"net/http"
"net/url"
"time"
"github.com/rclone/rclone/lib/rest"
"gopkg.in/validator.v2"
)
// FilesOrFolderParam struct is a parameter type to ListFiles() function to search / list media library files.
type FilesOrFolderParam struct {
Path string `json:"path,omitempty"`
Limit int `json:"limit,omitempty"`
Skip int `json:"skip,omitempty"`
SearchQuery string `json:"searchQuery,omitempty"`
}
// AITag represents an AI tag for a media library file.
type AITag struct {
Name string `json:"name"`
Confidence float32 `json:"confidence"`
Source string `json:"source"`
}
// File represents media library File details.
type File struct {
FileID string `json:"fileId"`
Name string `json:"name"`
FilePath string `json:"filePath"`
Type string `json:"type"`
VersionInfo map[string]string `json:"versionInfo"`
IsPrivateFile *bool `json:"isPrivateFile"`
CustomCoordinates *string `json:"customCoordinates"`
URL string `json:"url"`
Thumbnail string `json:"thumbnail"`
FileType string `json:"fileType"`
Mime string `json:"mime"`
Height int `json:"height"`
Width int `json:"Width"`
Size uint64 `json:"size"`
HasAlpha bool `json:"hasAlpha"`
CustomMetadata map[string]any `json:"customMetadata,omitempty"`
EmbeddedMetadata map[string]any `json:"embeddedMetadata"`
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
Tags []string `json:"tags"`
AITags []AITag `json:"AITags"`
}
// Folder represents media library Folder details.
type Folder struct {
*File
FolderPath string `json:"folderPath"`
}
// CreateFolderParam represents parameter to create folder api
type CreateFolderParam struct {
FolderName string `validate:"nonzero" json:"folderName"`
ParentFolderPath string `validate:"nonzero" json:"parentFolderPath"`
}
// DeleteFolderParam represents parameter to delete folder api
type DeleteFolderParam struct {
FolderPath string `validate:"nonzero" json:"folderPath"`
}
// MoveFolderParam represents parameter to move folder api
type MoveFolderParam struct {
SourceFolderPath string `validate:"nonzero" json:"sourceFolderPath"`
DestinationPath string `validate:"nonzero" json:"destinationPath"`
}
// JobIDResponse respresents response struct with JobID for folder operations
type JobIDResponse struct {
JobID string `json:"jobId"`
}
// JobStatus represents response Data to job status api
type JobStatus struct {
JobID string `json:"jobId"`
Type string `json:"type"`
Status string `json:"status"`
}
// File represents media library File details.
func (ik *ImageKit) File(ctx context.Context, fileID string) (*http.Response, *File, error) {
data := &File{}
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
Method: "GET",
Path: fmt.Sprintf("/files/%s/details", fileID),
RootURL: ik.Prefix,
IgnoreStatus: true,
}, nil, data)
return response, data, err
}
// Files retrieves media library files. Filter options can be supplied as FilesOrFolderParam.
func (ik *ImageKit) Files(ctx context.Context, params FilesOrFolderParam, includeVersion bool) (*http.Response, *[]File, error) {
var SearchQuery = `type = "file"`
if includeVersion {
SearchQuery = `type IN ["file", "file-version"]`
}
if params.SearchQuery != "" {
SearchQuery = params.SearchQuery
}
parameters := url.Values{}
parameters.Set("skip", fmt.Sprintf("%d", params.Skip))
parameters.Set("limit", fmt.Sprintf("%d", params.Limit))
parameters.Set("path", params.Path)
parameters.Set("searchQuery", SearchQuery)
data := &[]File{}
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
Method: "GET",
Path: "/files",
RootURL: ik.Prefix,
Parameters: parameters,
}, nil, data)
return response, data, err
}
// DeleteFile removes file by FileID from media library
func (ik *ImageKit) DeleteFile(ctx context.Context, fileID string) (*http.Response, error) {
var err error
if fileID == "" {
return nil, errors.New("fileID can not be empty")
}
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
Method: "DELETE",
Path: fmt.Sprintf("/files/%s", fileID),
RootURL: ik.Prefix,
NoResponse: true,
}, nil, nil)
return response, err
}
// Folders retrieves media library files. Filter options can be supplied as FilesOrFolderParam.
func (ik *ImageKit) Folders(ctx context.Context, params FilesOrFolderParam) (*http.Response, *[]Folder, error) {
var SearchQuery = `type = "folder"`
if params.SearchQuery != "" {
SearchQuery = params.SearchQuery
}
parameters := url.Values{}
parameters.Set("skip", fmt.Sprintf("%d", params.Skip))
parameters.Set("limit", fmt.Sprintf("%d", params.Limit))
parameters.Set("path", params.Path)
parameters.Set("searchQuery", SearchQuery)
data := &[]Folder{}
resp, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
Method: "GET",
Path: "/files",
RootURL: ik.Prefix,
Parameters: parameters,
}, nil, data)
if err != nil {
return resp, data, err
}
return resp, data, err
}
// CreateFolder creates a new folder in media library
func (ik *ImageKit) CreateFolder(ctx context.Context, param CreateFolderParam) (*http.Response, error) {
var err error
if err = validator.Validate(&param); err != nil {
return nil, err
}
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
Method: "POST",
Path: "/folder",
RootURL: ik.Prefix,
NoResponse: true,
}, param, nil)
return response, err
}
// DeleteFolder removes the folder from media library
func (ik *ImageKit) DeleteFolder(ctx context.Context, param DeleteFolderParam) (*http.Response, error) {
var err error
if err = validator.Validate(&param); err != nil {
return nil, err
}
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
Method: "DELETE",
Path: "/folder",
RootURL: ik.Prefix,
NoResponse: true,
}, param, nil)
return response, err
}
// MoveFolder moves given folder to new path in media library
func (ik *ImageKit) MoveFolder(ctx context.Context, param MoveFolderParam) (*http.Response, *JobIDResponse, error) {
var err error
var response = &JobIDResponse{}
if err = validator.Validate(&param); err != nil {
return nil, nil, err
}
resp, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
Method: "PUT",
Path: "bulkJobs/moveFolder",
RootURL: ik.Prefix,
}, param, response)
return resp, response, err
}
// BulkJobStatus retrieves the status of a bulk job by job ID.
func (ik *ImageKit) BulkJobStatus(ctx context.Context, jobID string) (*http.Response, *JobStatus, error) {
var err error
var response = &JobStatus{}
if jobID == "" {
return nil, nil, errors.New("jobId can not be blank")
}
resp, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
Method: "GET",
Path: "bulkJobs/" + jobID,
RootURL: ik.Prefix,
}, nil, response)
return resp, response, err
}

View File

@@ -1,96 +0,0 @@
package client
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"github.com/rclone/rclone/lib/rest"
)
// UploadParam defines upload parameters
type UploadParam struct {
FileName string `json:"fileName"`
Folder string `json:"folder,omitempty"` // default value: /
Tags string `json:"tags,omitempty"`
IsPrivateFile *bool `json:"isPrivateFile,omitempty"` // default: false
}
// UploadResult defines the response structure for the upload API
type UploadResult struct {
FileID string `json:"fileId"`
Name string `json:"name"`
URL string `json:"url"`
ThumbnailURL string `json:"thumbnailUrl"`
Height int `json:"height"`
Width int `json:"Width"`
Size uint64 `json:"size"`
FilePath string `json:"filePath"`
AITags []map[string]any `json:"AITags"`
VersionInfo map[string]string `json:"versionInfo"`
}
// Upload uploads an asset to a imagekit account.
//
// The asset can be:
// - the actual data (io.Reader)
// - the Data URI (Base64 encoded), max ~60 MB (62,910,000 chars)
// - the remote FTP, HTTP or HTTPS URL address of an existing file
//
// https://docs.imagekit.io/api-reference/upload-file-api/server-side-file-upload
func (ik *ImageKit) Upload(ctx context.Context, file io.Reader, param UploadParam) (*http.Response, *UploadResult, error) {
var err error
if param.FileName == "" {
return nil, nil, errors.New("Upload: Filename is required")
}
// Initialize URL values
formParams := url.Values{}
formParams.Add("useUniqueFileName", fmt.Sprint(false))
// Add individual fields to URL values
if param.FileName != "" {
formParams.Add("fileName", param.FileName)
}
if param.Tags != "" {
formParams.Add("tags", param.Tags)
}
if param.Folder != "" {
formParams.Add("folder", param.Folder)
}
if param.IsPrivateFile != nil {
formParams.Add("isPrivateFile", fmt.Sprintf("%v", *param.IsPrivateFile))
}
response := &UploadResult{}
formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName)
if err != nil {
return nil, nil, fmt.Errorf("failed to make multipart upload: %w", err)
}
opts := rest.Opts{
Method: "POST",
Path: "/files/upload",
RootURL: ik.UploadPrefix,
Body: formReader,
ContentType: contentType,
}
resp, err := ik.HTTPClient.CallJSON(ctx, &opts, nil, response)
if err != nil {
return resp, response, err
}
return resp, response, err
}

View File

@@ -1,72 +0,0 @@
package client
import (
"crypto/hmac"
"crypto/sha1"
"encoding/hex"
"fmt"
neturl "net/url"
"strconv"
"strings"
"time"
)
// URLParam represents parameters for generating url
type URLParam struct {
Path string
Src string
URLEndpoint string
Signed bool
ExpireSeconds int64
QueryParameters map[string]string
}
// URL generates url from URLParam
func (ik *ImageKit) URL(params URLParam) (string, error) {
var resultURL string
var url *neturl.URL
var err error
var endpoint = params.URLEndpoint
if endpoint == "" {
endpoint = ik.URLEndpoint
}
endpoint = strings.TrimRight(endpoint, "/") + "/"
if params.QueryParameters == nil {
params.QueryParameters = make(map[string]string)
}
if url, err = neturl.Parse(params.Src); err != nil {
return "", err
}
query := url.Query()
for k, v := range params.QueryParameters {
query.Set(k, v)
}
url.RawQuery = query.Encode()
resultURL = url.String()
if params.Signed {
now := time.Now().Unix()
var expires = strconv.FormatInt(now+params.ExpireSeconds, 10)
var path = strings.Replace(resultURL, endpoint, "", 1)
path = path + expires
mac := hmac.New(sha1.New, []byte(ik.PrivateKey))
mac.Write([]byte(path))
signature := hex.EncodeToString(mac.Sum(nil))
if strings.Contains(resultURL, "?") {
resultURL = resultURL + "&" + fmt.Sprintf("ik-t=%s&ik-s=%s", expires, signature)
} else {
resultURL = resultURL + "?" + fmt.Sprintf("ik-t=%s&ik-s=%s", expires, signature)
}
}
return resultURL, nil
}

View File

@@ -1,828 +0,0 @@
// Package imagekit provides an interface to the ImageKit.io media library.
package imagekit
import (
"context"
"errors"
"fmt"
"io"
"math"
"net/http"
"path"
"strconv"
"strings"
"time"
"github.com/rclone/rclone/backend/imagekit/client"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/version"
)
const (
minSleep = 1 * time.Millisecond
maxSleep = 100 * time.Millisecond
decayConstant = 2
)
var systemMetadataInfo = map[string]fs.MetadataHelp{
"btime": {
Help: "Time of file birth (creation) read from Last-Modified header",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
ReadOnly: true,
},
"size": {
Help: "Size of the object in bytes",
Type: "int64",
ReadOnly: true,
},
"file-type": {
Help: "Type of the file",
Type: "string",
Example: "image",
ReadOnly: true,
},
"height": {
Help: "Height of the image or video in pixels",
Type: "int",
ReadOnly: true,
},
"width": {
Help: "Width of the image or video in pixels",
Type: "int",
ReadOnly: true,
},
"has-alpha": {
Help: "Whether the image has alpha channel or not",
Type: "bool",
ReadOnly: true,
},
"tags": {
Help: "Tags associated with the file",
Type: "string",
Example: "tag1,tag2",
ReadOnly: true,
},
"google-tags": {
Help: "AI generated tags by Google Cloud Vision associated with the image",
Type: "string",
Example: "tag1,tag2",
ReadOnly: true,
},
"aws-tags": {
Help: "AI generated tags by AWS Rekognition associated with the image",
Type: "string",
Example: "tag1,tag2",
ReadOnly: true,
},
"is-private-file": {
Help: "Whether the file is private or not",
Type: "bool",
ReadOnly: true,
},
"custom-coordinates": {
Help: "Custom coordinates of the file",
Type: "string",
Example: "0,0,100,100",
ReadOnly: true,
},
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "imagekit",
Description: "ImageKit.io",
NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
System: systemMetadataInfo,
Help: `Any metadata supported by the underlying remote is read and written.`,
},
Options: []fs.Option{
{
Name: "endpoint",
Help: "You can find your ImageKit.io URL endpoint in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)",
Required: true,
},
{
Name: "public_key",
Help: "You can find your ImageKit.io public key in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)",
Required: true,
Sensitive: true,
},
{
Name: "private_key",
Help: "You can find your ImageKit.io private key in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)",
Required: true,
Sensitive: true,
},
{
Name: "only_signed",
Help: "If you have configured `Restrict unsigned image URLs` in your dashboard settings, set this to true.",
Default: false,
Advanced: true,
},
{
Name: "versions",
Help: "Include old versions in directory listings.",
Default: false,
Advanced: true,
},
{
Name: "upload_tags",
Help: "Tags to add to the uploaded files, e.g. \"tag1,tag2\".",
Default: "",
Advanced: true,
},
{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.EncodeZero |
encoder.EncodeSlash |
encoder.EncodeQuestion |
encoder.EncodeHashPercent |
encoder.EncodeCtl |
encoder.EncodeDel |
encoder.EncodeDot |
encoder.EncodeDoubleQuote |
encoder.EncodePercent |
encoder.EncodeBackSlash |
encoder.EncodeDollar |
encoder.EncodeLtGt |
encoder.EncodeSquareBracket |
encoder.EncodeInvalidUtf8),
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
Endpoint string `config:"endpoint"`
PublicKey string `config:"public_key"`
PrivateKey string `config:"private_key"`
OnlySigned bool `config:"only_signed"`
Versions bool `config:"versions"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote to ImageKit
type Fs struct {
name string // name of remote
root string // root path
opt Options // parsed options
features *fs.Features // optional features
ik *client.ImageKit // ImageKit client
pacer *fs.Pacer // pacer for API calls
}
// Object describes a ImageKit file
type Object struct {
fs *Fs // The Fs this object is part of
remote string // The remote path
filePath string // The path to the file
contentType string // The content type of the object if known - may be ""
timestamp time.Time // The timestamp of the object if known - may be zero
file client.File // The media file if known - may be nil
versionID string // If present this points to an object version
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
ik, err := client.New(ctx, client.NewParams{
URLEndpoint: opt.Endpoint,
PublicKey: opt.PublicKey,
PrivateKey: opt.PrivateKey,
})
if err != nil {
return nil, err
}
f := &Fs{
name: name,
opt: *opt,
ik: ik,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.root = path.Join("/", root)
f.features = (&fs.Features{
CaseInsensitive: false,
DuplicateFiles: false,
ReadMimeType: true,
WriteMimeType: false,
CanHaveEmptyDirectories: true,
BucketBased: false,
ServerSideAcrossConfigs: false,
IsLocal: false,
SlowHash: true,
ReadMetadata: true,
WriteMetadata: false,
UserMetadata: false,
FilterAware: true,
PartialUploads: false,
NoMultiThreading: false,
}).Fill(ctx, f)
if f.root != "/" {
r := f.root
folderPath := f.EncodePath(r[:strings.LastIndex(r, "/")+1])
fileName := f.EncodeFileName(r[strings.LastIndex(r, "/")+1:])
file := f.getFileByName(ctx, folderPath, fileName)
if file != nil {
newRoot := path.Dir(f.root)
f.root = newRoot
return f, fs.ErrorIsFile
}
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return strings.TrimLeft(f.root, "/")
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("FS imagekit: %s", f.root)
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash types of the filesystem.
func (f *Fs) Hashes() hash.Set {
return hash.NewHashSet()
}
// Features returns the optional features of this Fs.
func (f *Fs) Features() *fs.Features {
return f.features
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
remote := path.Join(f.root, dir)
remote = f.EncodePath(remote)
if remote != "/" {
parentFolderPath, folderName := path.Split(remote)
folderExists, err := f.getFolderByName(ctx, parentFolderPath, folderName)
if err != nil {
return make(fs.DirEntries, 0), err
}
if folderExists == nil {
return make(fs.DirEntries, 0), fs.ErrorDirNotFound
}
}
folders, folderError := f.getFolders(ctx, remote)
if folderError != nil {
return make(fs.DirEntries, 0), folderError
}
files, fileError := f.getFiles(ctx, remote, f.opt.Versions)
if fileError != nil {
return make(fs.DirEntries, 0), fileError
}
res := make([]fs.DirEntry, 0, len(folders)+len(files))
for _, folder := range folders {
folderPath := f.DecodePath(strings.TrimLeft(strings.Replace(folder.FolderPath, f.EncodePath(f.root), "", 1), "/"))
res = append(res, fs.NewDir(folderPath, folder.UpdatedAt))
}
for _, file := range files {
res = append(res, f.newObject(ctx, remote, file))
}
return res, nil
}
func (f *Fs) newObject(ctx context.Context, remote string, file client.File) *Object {
remoteFile := strings.TrimLeft(strings.Replace(file.FilePath, f.EncodePath(f.root), "", 1), "/")
folderPath, fileName := path.Split(remoteFile)
folderPath = f.DecodePath(folderPath)
fileName = f.DecodeFileName(fileName)
remoteFile = path.Join(folderPath, fileName)
if file.Type == "file-version" {
remoteFile = version.Add(remoteFile, file.UpdatedAt)
return &Object{
fs: f,
remote: remoteFile,
filePath: file.FilePath,
contentType: file.Mime,
timestamp: file.UpdatedAt,
file: file,
versionID: file.VersionInfo["id"],
}
}
return &Object{
fs: f,
remote: remoteFile,
filePath: file.FilePath,
contentType: file.Mime,
timestamp: file.UpdatedAt,
file: file,
}
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
//
// If remote points to a directory then it should return
// ErrorIsDir if possible without doing any extra work,
// otherwise ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
r := path.Join(f.root, remote)
folderPath, fileName := path.Split(r)
folderPath = f.EncodePath(folderPath)
fileName = f.EncodeFileName(fileName)
isFolder, err := f.getFolderByName(ctx, folderPath, fileName)
if err != nil {
return nil, err
}
if isFolder != nil {
return nil, fs.ErrorIsDir
}
file := f.getFileByName(ctx, folderPath, fileName)
if file == nil {
return nil, fs.ErrorObjectNotFound
}
return f.newObject(ctx, r, *file), nil
}
// Put in to the remote path with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
// return an error or upload it properly (rather than e.g. calling panic).
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return uploadFile(ctx, f, in, src.Remote(), options...)
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
remote := path.Join(f.root, dir)
parentFolderPath, folderName := path.Split(remote)
parentFolderPath = f.EncodePath(parentFolderPath)
folderName = f.EncodeFileName(folderName)
err = f.pacer.Call(func() (bool, error) {
var res *http.Response
res, err = f.ik.CreateFolder(ctx, client.CreateFolderParam{
ParentFolderPath: parentFolderPath,
FolderName: folderName,
})
return f.shouldRetry(ctx, res, err)
})
return err
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
entries, err := f.List(ctx, dir)
if err != nil {
return err
}
if len(entries) > 0 {
return errors.New("directory is not empty")
}
err = f.pacer.Call(func() (bool, error) {
var res *http.Response
res, err = f.ik.DeleteFolder(ctx, client.DeleteFolderParam{
FolderPath: f.EncodePath(path.Join(f.root, dir)),
})
if res.StatusCode == http.StatusNotFound {
return false, fs.ErrorDirNotFound
}
return f.shouldRetry(ctx, res, err)
})
return err
}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
remote := path.Join(f.root, dir)
err = f.pacer.Call(func() (bool, error) {
var res *http.Response
res, err = f.ik.DeleteFolder(ctx, client.DeleteFolderParam{
FolderPath: f.EncodePath(remote),
})
if res.StatusCode == http.StatusNotFound {
return false, fs.ErrorDirNotFound
}
return f.shouldRetry(ctx, res, err)
})
return err
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
duration := time.Duration(math.Abs(float64(expire)))
expireSeconds := duration.Seconds()
fileRemote := path.Join(f.root, remote)
folderPath, fileName := path.Split(fileRemote)
folderPath = f.EncodePath(folderPath)
fileName = f.EncodeFileName(fileName)
file := f.getFileByName(ctx, folderPath, fileName)
if file == nil {
return "", fs.ErrorObjectNotFound
}
// Pacer not needed as this doesn't use the API
url, err := f.ik.URL(client.URLParam{
Src: file.URL,
Signed: *file.IsPrivateFile || f.opt.OnlySigned,
ExpireSeconds: int64(expireSeconds),
QueryParameters: map[string]string{
"updatedAt": file.UpdatedAt.String(),
},
})
if err != nil {
return "", err
}
return url, nil
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Storable says whether this object can be stored
func (o *Object) Storable() bool {
return true
}
// String returns a description of the Object
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.file.Name
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
func (o *Object) ModTime(context.Context) time.Time {
return o.file.UpdatedAt
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return int64(o.file.Size)
}
// MimeType returns the MIME type of the file
func (o *Object) MimeType(context.Context) string {
return o.contentType
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
// Offset and Count for range download
var offset int64
var count int64
fs.FixRangeOption(options, -1)
partialContent := false
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, count = x.Decode(-1)
partialContent = true
case *fs.SeekOption:
offset = x.Offset
partialContent = true
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
// Pacer not needed as this doesn't use the API
url, err := o.fs.ik.URL(client.URLParam{
Src: o.file.URL,
Signed: *o.file.IsPrivateFile || o.fs.opt.OnlySigned,
QueryParameters: map[string]string{
"tr": "orig-true",
"updatedAt": o.file.UpdatedAt.String(),
},
})
if err != nil {
return nil, err
}
client := &http.Client{}
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+count-1))
resp, err := client.Do(req)
if err != nil {
return nil, err
}
end := resp.ContentLength
if partialContent && resp.StatusCode == http.StatusOK {
skip := offset
if offset < 0 {
skip = end + offset + 1
}
_, err = io.CopyN(io.Discard, resp.Body, skip)
if err != nil {
if resp != nil {
_ = resp.Body.Close()
}
return nil, err
}
return readers.NewLimitedReadCloser(resp.Body, end-skip), nil
}
return resp.Body, nil
}
// Update in to the object with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
srcRemote := o.Remote()
remote := path.Join(o.fs.root, srcRemote)
folderPath, fileName := path.Split(remote)
UseUniqueFileName := new(bool)
*UseUniqueFileName = false
var resp *client.UploadResult
err = o.fs.pacer.Call(func() (bool, error) {
var res *http.Response
res, resp, err = o.fs.ik.Upload(ctx, in, client.UploadParam{
FileName: fileName,
Folder: folderPath,
IsPrivateFile: o.file.IsPrivateFile,
})
return o.fs.shouldRetry(ctx, res, err)
})
if err != nil {
return err
}
fileID := resp.FileID
_, file, err := o.fs.ik.File(ctx, fileID)
if err != nil {
return err
}
o.file = *file
return nil
}
// Remove this object
func (o *Object) Remove(ctx context.Context) (err error) {
err = o.fs.pacer.Call(func() (bool, error) {
var res *http.Response
res, err = o.fs.ik.DeleteFile(ctx, o.file.FileID)
return o.fs.shouldRetry(ctx, res, err)
})
return err
}
// SetModTime sets the metadata on the object to set the modification date
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
return fs.ErrorCantSetModTime
}
func uploadFile(ctx context.Context, f *Fs, in io.Reader, srcRemote string, options ...fs.OpenOption) (fs.Object, error) {
remote := path.Join(f.root, srcRemote)
folderPath, fileName := path.Split(remote)
folderPath = f.EncodePath(folderPath)
fileName = f.EncodeFileName(fileName)
UseUniqueFileName := new(bool)
*UseUniqueFileName = false
err := f.pacer.Call(func() (bool, error) {
var res *http.Response
var err error
res, _, err = f.ik.Upload(ctx, in, client.UploadParam{
FileName: fileName,
Folder: folderPath,
IsPrivateFile: &f.opt.OnlySigned,
})
return f.shouldRetry(ctx, res, err)
})
if err != nil {
return nil, err
}
return f.NewObject(ctx, srcRemote)
}
// Metadata returns the metadata for the object
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
metadata.Set("btime", o.file.CreatedAt.Format(time.RFC3339))
metadata.Set("size", strconv.FormatUint(o.file.Size, 10))
metadata.Set("file-type", o.file.FileType)
metadata.Set("height", strconv.Itoa(o.file.Height))
metadata.Set("width", strconv.Itoa(o.file.Width))
metadata.Set("has-alpha", strconv.FormatBool(o.file.HasAlpha))
for k, v := range o.file.EmbeddedMetadata {
metadata.Set(k, fmt.Sprint(v))
}
if o.file.Tags != nil {
metadata.Set("tags", strings.Join(o.file.Tags, ","))
}
if o.file.CustomCoordinates != nil {
metadata.Set("custom-coordinates", *o.file.CustomCoordinates)
}
if o.file.IsPrivateFile != nil {
metadata.Set("is-private-file", strconv.FormatBool(*o.file.IsPrivateFile))
}
if o.file.AITags != nil {
googleTags := []string{}
awsTags := []string{}
for _, tag := range o.file.AITags {
if tag.Source == "google-auto-tagging" {
googleTags = append(googleTags, tag.Name)
} else if tag.Source == "aws-auto-tagging" {
awsTags = append(awsTags, tag.Name)
}
}
if len(googleTags) > 0 {
metadata.Set("google-tags", strings.Join(googleTags, ","))
}
if len(awsTags) > 0 {
metadata.Set("aws-tags", strings.Join(awsTags, ","))
}
}
return metadata, nil
}
// Copy src to this remote using server-side move operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
return nil, fs.ErrorCantMove
}
file, err := srcObj.Open(ctx)
if err != nil {
return nil, err
}
return uploadFile(ctx, f, file, remote)
}
// Check the interfaces are satisfied.
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.PublicLinker = &Fs{}
_ fs.Object = &Object{}
_ fs.Copier = &Fs{}
)

View File

@@ -1,18 +0,0 @@
package imagekit
import (
"testing"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
func TestIntegration(t *testing.T) {
debug := true
fstest.Verbose = &debug
fstests.Run(t, &fstests.Opt{
RemoteName: "TestImageKit:",
NilObject: (*Object)(nil),
SkipFsCheckWrap: true,
})
}

View File

@@ -1,193 +0,0 @@
package imagekit
import (
"context"
"fmt"
"net/http"
"strconv"
"time"
"github.com/rclone/rclone/backend/imagekit/client"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/pacer"
)
func (f *Fs) getFiles(ctx context.Context, path string, includeVersions bool) (files []client.File, err error) {
files = make([]client.File, 0)
var hasMore = true
for hasMore {
err = f.pacer.Call(func() (bool, error) {
var data *[]client.File
var res *http.Response
res, data, err = f.ik.Files(ctx, client.FilesOrFolderParam{
Skip: len(files),
Limit: 100,
Path: path,
}, includeVersions)
hasMore = !(len(*data) == 0 || len(*data) < 100)
if len(*data) > 0 {
files = append(files, *data...)
}
return f.shouldRetry(ctx, res, err)
})
}
if err != nil {
return make([]client.File, 0), err
}
return files, nil
}
func (f *Fs) getFolders(ctx context.Context, path string) (folders []client.Folder, err error) {
folders = make([]client.Folder, 0)
var hasMore = true
for hasMore {
err = f.pacer.Call(func() (bool, error) {
var data *[]client.Folder
var res *http.Response
res, data, err = f.ik.Folders(ctx, client.FilesOrFolderParam{
Skip: len(folders),
Limit: 100,
Path: path,
})
hasMore = !(len(*data) == 0 || len(*data) < 100)
if len(*data) > 0 {
folders = append(folders, *data...)
}
return f.shouldRetry(ctx, res, err)
})
}
if err != nil {
return make([]client.Folder, 0), err
}
return folders, nil
}
func (f *Fs) getFileByName(ctx context.Context, path string, name string) (file *client.File) {
err := f.pacer.Call(func() (bool, error) {
res, data, err := f.ik.Files(ctx, client.FilesOrFolderParam{
Limit: 1,
Path: path,
SearchQuery: fmt.Sprintf(`type = "file" AND name = %s`, strconv.Quote(name)),
}, false)
if len(*data) == 0 {
file = nil
} else {
file = &(*data)[0]
}
return f.shouldRetry(ctx, res, err)
})
if err != nil {
return nil
}
return file
}
func (f *Fs) getFolderByName(ctx context.Context, path string, name string) (folder *client.Folder, err error) {
err = f.pacer.Call(func() (bool, error) {
res, data, err := f.ik.Folders(ctx, client.FilesOrFolderParam{
Limit: 1,
Path: path,
SearchQuery: fmt.Sprintf(`type = "folder" AND name = %s`, strconv.Quote(name)),
})
if len(*data) == 0 {
folder = nil
} else {
folder = &(*data)[0]
}
return f.shouldRetry(ctx, res, err)
})
if err != nil {
return nil, err
}
return folder, nil
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
401, // Unauthorized (e.g. "Token has expired")
408, // Request Timeout
429, // Rate exceeded.
500, // Get occasional 500 Internal Server Error
503, // Service Unavailable
504, // Gateway Time-out
}
func shouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool {
if resp == nil {
return false
}
for _, e := range retryErrorCodes {
if resp.StatusCode == e {
return true
}
}
return false
}
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if resp != nil && (resp.StatusCode == 429 || resp.StatusCode == 503) {
var retryAfter = 1
retryAfterString := resp.Header.Get("X-RateLimit-Reset")
if retryAfterString != "" {
var err error
retryAfter, err = strconv.Atoi(retryAfterString)
if err != nil {
fs.Errorf(f, "Malformed %s header %q: %v", "X-RateLimit-Reset", retryAfterString, err)
}
}
return true, pacer.RetryAfterError(err, time.Duration(retryAfter)*time.Millisecond)
}
return fserrors.ShouldRetry(err) || shouldRetryHTTP(resp, retryErrorCodes), err
}
// EncodePath encapsulates the logic for encoding a path
func (f *Fs) EncodePath(str string) string {
return f.opt.Enc.FromStandardPath(str)
}
// DecodePath encapsulates the logic for decoding a path
func (f *Fs) DecodePath(str string) string {
return f.opt.Enc.ToStandardPath(str)
}
// EncodeFileName encapsulates the logic for encoding a file name
func (f *Fs) EncodeFileName(str string) string {
return f.opt.Enc.FromStandardName(str)
}
// DecodeFileName encapsulates the logic for decoding a file name
func (f *Fs) DecodeFileName(str string) string {
return f.opt.Enc.ToStandardName(str)
}

View File

@@ -133,13 +133,11 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
}, },
Options: []fs.Option{{ Options: []fs.Option{{
Name: "access_key_id", Name: "access_key_id",
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php", Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
Sensitive: true,
}, { }, {
Name: "secret_access_key", Name: "secret_access_key",
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.", Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
Sensitive: true,
}, { }, {
// their official client (https://github.com/jjjake/internetarchive) hardcodes following the two // their official client (https://github.com/jjjake/internetarchive) hardcodes following the two
Name: "endpoint", Name: "endpoint",
@@ -802,7 +800,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
headers["x-archive-size-hint"] = fmt.Sprintf("%d", size) headers["x-archive-size-hint"] = fmt.Sprintf("%d", size)
} }
var mdata fs.Metadata var mdata fs.Metadata
mdata, err = fs.GetMetadataOptions(ctx, o.fs, src, options) mdata, err = fs.GetMetadataOptions(ctx, src, options)
if err == nil && mdata != nil { if err == nil && mdata != nil {
for mk, mv := range mdata { for mk, mv := range mdata {
mk = strings.ToLower(mk) mk = strings.ToLower(mk)

View File

@@ -395,7 +395,7 @@ type JottaFile struct {
State string `xml:"currentRevision>state"` State string `xml:"currentRevision>state"`
CreatedAt JottaTime `xml:"currentRevision>created"` CreatedAt JottaTime `xml:"currentRevision>created"`
ModifiedAt JottaTime `xml:"currentRevision>modified"` ModifiedAt JottaTime `xml:"currentRevision>modified"`
UpdatedAt JottaTime `xml:"currentRevision>updated"` Updated JottaTime `xml:"currentRevision>updated"`
Size int64 `xml:"currentRevision>size"` Size int64 `xml:"currentRevision>size"`
MimeType string `xml:"currentRevision>mime"` MimeType string `xml:"currentRevision>mime"`
MD5 string `xml:"currentRevision>md5"` MD5 string `xml:"currentRevision>md5"`

View File

@@ -67,13 +67,9 @@ const (
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2" legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
legacyConfigVersion = 0 legacyConfigVersion = 0
teliaseCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token" teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
teliaseCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth" teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
teliaseCloudClientID = "desktop" teliaCloudClientID = "desktop"
telianoCloudTokenURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/token"
telianoCloudAuthURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/auth"
telianoCloudClientID = "desktop"
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token" tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth" tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
@@ -92,34 +88,7 @@ func init() {
Description: "Jottacloud", Description: "Jottacloud",
NewFs: NewFs, NewFs: NewFs,
Config: Config, Config: Config,
MetadataInfo: &fs.MetadataInfo{ Options: []fs.Option{{
Help: `Jottacloud has limited support for metadata, currently an extended set of timestamps.`,
System: map[string]fs.MetadataHelp{
"btime": {
Help: "Time of file birth (creation), read from rclone metadata",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
},
"mtime": {
Help: "Time of last modification, read from rclone metadata",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
},
"utime": {
Help: "Time of last upload, when current revision was created, generated by backend",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
ReadOnly: true,
},
"content-type": {
Help: "MIME type, also known as media type",
Type: "string",
Example: "text/plain",
ReadOnly: true,
},
},
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "md5_memory_limit", Name: "md5_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.", Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
Default: fs.SizeSuffix(10 * 1024 * 1024), Default: fs.SizeSuffix(10 * 1024 * 1024),
@@ -154,7 +123,7 @@ func init() {
Default: (encoder.Display | Default: (encoder.Display |
encoder.EncodeWin | // :?"*<>| encoder.EncodeWin | // :?"*<>|
encoder.EncodeInvalidUtf8), encoder.EncodeInvalidUtf8),
}}...), }},
}) })
} }
@@ -169,11 +138,8 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
Value: "legacy", Value: "legacy",
Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.", Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
}, { }, {
Value: "telia_se", Value: "telia",
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud (Sweden).", Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud.",
}, {
Value: "telia_no",
Help: "Telia Sky authentication.\nUse this if you are using Telia Sky (Norway).",
}, { }, {
Value: "tele2", Value: "tele2",
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.", Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
@@ -272,32 +238,17 @@ machines.`)
return nil, fmt.Errorf("error while saving token: %w", err) return nil, fmt.Errorf("error while saving token: %w", err)
} }
return fs.ConfigGoto("choose_device") return fs.ConfigGoto("choose_device")
case "telia_se": // telia_se cloud config case "telia": // telia cloud config
m.Set("configVersion", fmt.Sprint(configVersion)) m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, teliaseCloudClientID) m.Set(configClientID, teliaCloudClientID)
m.Set(configTokenURL, teliaseCloudTokenURL) m.Set(configTokenURL, teliaCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{ return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{ OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{ Endpoint: oauth2.Endpoint{
AuthURL: teliaseCloudAuthURL, AuthURL: teliaCloudAuthURL,
TokenURL: teliaseCloudTokenURL, TokenURL: teliaCloudTokenURL,
}, },
ClientID: teliaseCloudClientID, ClientID: teliaCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "telia_no": // telia_no cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, telianoCloudClientID)
m.Set(configTokenURL, telianoCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: telianoCloudAuthURL,
TokenURL: telianoCloudTokenURL,
},
ClientID: telianoCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"}, Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
}, },
@@ -524,9 +475,7 @@ type Object struct {
remote string remote string
hasMetaData bool hasMetaData bool
size int64 size int64
createTime time.Time
modTime time.Time modTime time.Time
updateTime time.Time
md5 string md5 string
mimeType string mimeType string
} }
@@ -1001,9 +950,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
ReadMimeType: true, ReadMimeType: true,
WriteMimeType: false, WriteMimeType: false,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: false,
}).Fill(ctx, f) }).Fill(ctx, f)
f.jfsSrv.SetErrorHandler(errorHandler) f.jfsSrv.SetErrorHandler(errorHandler)
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
@@ -1190,7 +1136,6 @@ func parseListRStream(ctx context.Context, r io.Reader, filesystem *Fs, callback
remote: filesystem.opt.Enc.ToStandardPath(path.Join(f.Path, f.Name)), remote: filesystem.opt.Enc.ToStandardPath(path.Join(f.Path, f.Name)),
size: f.Size, size: f.Size,
md5: f.Checksum, md5: f.Checksum,
createTime: time.Time(f.Created),
modTime: time.Time(f.Modified), modTime: time.Time(f.Modified),
}) })
} }
@@ -1420,7 +1365,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// is currently in trash, but can be made to match, it will be // is currently in trash, but can be made to match, it will be
// restored. Returns ErrorObjectNotFound if upload will be necessary // restored. Returns ErrorObjectNotFound if upload will be necessary
// to get a matching remote file. // to get a matching remote file.
func (f *Fs) createOrUpdate(ctx context.Context, file string, createTime time.Time, modTime time.Time, size int64, md5 string) (info *api.JottaFile, err error) { func (f *Fs) createOrUpdate(ctx context.Context, file string, modTime time.Time, size int64, md5 string) (info *api.JottaFile, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: f.filePath(file), Path: f.filePath(file),
@@ -1430,10 +1375,11 @@ func (f *Fs) createOrUpdate(ctx context.Context, file string, createTime time.Ti
opts.Parameters.Set("cphash", "true") opts.Parameters.Set("cphash", "true")
fileDate := api.JottaTime(modTime).String()
opts.ExtraHeaders["JSize"] = strconv.FormatInt(size, 10) opts.ExtraHeaders["JSize"] = strconv.FormatInt(size, 10)
opts.ExtraHeaders["JMd5"] = md5 opts.ExtraHeaders["JMd5"] = md5
opts.ExtraHeaders["JCreated"] = api.JottaTime(createTime).String() opts.ExtraHeaders["JCreated"] = fileDate
opts.ExtraHeaders["JModified"] = api.JottaTime(modTime).String() opts.ExtraHeaders["JModified"] = fileDate
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@@ -1496,7 +1442,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// if destination was a trashed file then after a successful copy the copied file is still in trash (bug in api?) // if destination was a trashed file then after a successful copy the copied file is still in trash (bug in api?)
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" { if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
fs.Debugf(src, "Server-side copied to trashed destination, restoring") fs.Debugf(src, "Server-side copied to trashed destination, restoring")
info, err = f.createOrUpdate(ctx, remote, srcObj.createTime, srcObj.modTime, srcObj.size, srcObj.md5) info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5)
} }
if err != nil { if err != nil {
@@ -1759,9 +1705,7 @@ func (o *Object) setMetaData(info *api.JottaFile) (err error) {
o.size = info.Size o.size = info.Size
o.md5 = info.MD5 o.md5 = info.MD5
o.mimeType = info.MimeType o.mimeType = info.MimeType
o.createTime = time.Time(info.CreatedAt)
o.modTime = time.Time(info.ModifiedAt) o.modTime = time.Time(info.ModifiedAt)
o.updateTime = time.Time(info.UpdatedAt)
return nil return nil
} }
@@ -1806,7 +1750,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
// (note that if size/md5 does not match, the file content will // (note that if size/md5 does not match, the file content will
// also be modified if deduplication is possible, i.e. it is // also be modified if deduplication is possible, i.e. it is
// important to use correct/latest values) // important to use correct/latest values)
_, err = o.fs.createOrUpdate(ctx, o.remote, o.createTime, modTime, o.size, o.md5) _, err = o.fs.createOrUpdate(ctx, o.remote, modTime, o.size, o.md5)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
// file was modified (size/md5 changed) between readMetaData and createOrUpdate? // file was modified (size/md5 changed) between readMetaData and createOrUpdate?
@@ -1943,37 +1887,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Wrap the accounting back onto the stream // Wrap the accounting back onto the stream
in = wrap(in) in = wrap(in)
} }
// Fetch metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
if err != nil {
return fmt.Errorf("failed to read metadata from source object: %w", err)
}
var createdTime string
var modTime string
if meta != nil {
if v, ok := meta["btime"]; ok {
t, err := time.Parse(time.RFC3339Nano, v) // metadata stores RFC3339Nano timestamps
if err != nil {
fs.Debugf(o, "failed to parse metadata btime: %q: %v", v, err)
} else {
createdTime = api.Rfc3339Time(t).String() // jottacloud api wants RFC3339 timestamps
}
}
if v, ok := meta["mtime"]; ok {
t, err := time.Parse(time.RFC3339Nano, v)
if err != nil {
fs.Debugf(o, "failed to parse metadata mtime: %q: %v", v, err)
} else {
modTime = api.Rfc3339Time(t).String()
}
}
}
if modTime == "" { // prefer mtime in meta as Modified time, fallback to source ModTime
modTime = api.Rfc3339Time(src.ModTime(ctx)).String()
}
if createdTime == "" { // if no Created time set same as Modified
createdTime = modTime
}
// use the api to allocate the file first and get resume / deduplication info // use the api to allocate the file first and get resume / deduplication info
var resp *http.Response var resp *http.Response
@@ -1983,12 +1896,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Options: options, Options: options,
ExtraHeaders: make(map[string]string), ExtraHeaders: make(map[string]string),
} }
fileDate := api.Rfc3339Time(src.ModTime(ctx)).String()
// the allocate request // the allocate request
var request = api.AllocateFileRequest{ var request = api.AllocateFileRequest{
Bytes: size, Bytes: size,
Created: createdTime, Created: fileDate,
Modified: modTime, Modified: fileDate,
Md5: md5String, Md5: md5String,
Path: o.fs.allocatePathRaw(o.remote, true), Path: o.fs.allocatePathRaw(o.remote, true),
} }
@@ -2003,10 +1917,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err return err
} }
// If the file state is INCOMPLETE and CORRUPT, we must upload it. // If the file state is INCOMPLETE and CORRUPT, try to upload a then
// Else, if the file state is COMPLETE, we don't need to upload it because
// the content is already there, possibly it was created with deduplication,
// and also any metadata changes are already performed by the allocate request.
if response.State != "COMPLETED" { if response.State != "COMPLETED" {
// how much do we still have to upload? // how much do we still have to upload?
remainingBytes := size - response.ResumePos remainingBytes := size - response.ResumePos
@@ -2030,18 +1941,22 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// send the remaining bytes // send the remaining bytes
_, err = o.fs.apiSrv.CallJSON(ctx, &opts, nil, &result) resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, nil, &result)
if err != nil { if err != nil {
return err return err
} }
// Upload response contains main metadata properties (size, md5 and modTime) // finally update the meta data
// which could be set back to the object, but it does not contain the o.hasMetaData = true
// necessary information to set the createTime and updateTime properties, o.size = result.Bytes
// so must therefore perform a read instead. o.md5 = result.Md5
o.modTime = time.Unix(result.Modified/1000, 0)
} else {
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still need to update our metadata
return o.readMetaData(ctx, true)
} }
// in any case we must update the object meta data
return o.readMetaData(ctx, true) return nil
} }
func (o *Object) remove(ctx context.Context, hard bool) error { func (o *Object) remove(ctx context.Context, hard bool) error {
@@ -2076,22 +1991,6 @@ func (o *Object) Remove(ctx context.Context) error {
return o.remove(ctx, o.fs.opt.HardDelete) return o.remove(ctx, o.fs.opt.HardDelete)
} }
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
err = o.readMetaData(ctx, false)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return nil, err
}
metadata.Set("btime", o.createTime.Format(time.RFC3339Nano)) // metadata timestamps should be RFC3339Nano
metadata.Set("mtime", o.modTime.Format(time.RFC3339Nano))
metadata.Set("utime", o.updateTime.Format(time.RFC3339Nano))
metadata.Set("content-type", o.mimeType)
return metadata, nil
}
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = (*Fs)(nil) _ fs.Fs = (*Fs)(nil)
@@ -2106,5 +2005,4 @@ var (
_ fs.CleanUpper = (*Fs)(nil) _ fs.CleanUpper = (*Fs)(nil)
_ fs.Object = (*Object)(nil) _ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil) _ fs.MimeTyper = (*Object)(nil)
_ fs.Metadataer = (*Object)(nil)
) )

View File

@@ -1,17 +1,11 @@
package jottacloud package jottacloud
import ( import (
"context"
"crypto/md5" "crypto/md5"
"fmt" "fmt"
"io" "io"
"testing" "testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@@ -46,48 +40,3 @@ func TestReadMD5(t *testing.T) {
}) })
} }
} }
func (f *Fs) InternalTestMetadata(t *testing.T) {
ctx := context.Background()
contents := random.String(1000)
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
utime := time.Now()
metadata := fs.Metadata{
"btime": "2009-05-06T04:05:06.499999999Z",
"mtime": "2010-06-07T08:09:07.599999999Z",
//"utime" - read-only
//"content-type" - read-only
}
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, contents, true, "text/html", metadata)
defer func() {
assert.NoError(t, obj.Remove(ctx))
}()
o := obj.(*Object)
gotMetadata, err := o.Metadata(ctx)
require.NoError(t, err)
for k, v := range metadata {
got := gotMetadata[k]
switch k {
case "btime":
assert.True(t, fstest.Time(v).Truncate(f.Precision()).Equal(fstest.Time(got)), fmt.Sprintf("btime not equal want %v got %v", v, got))
case "mtime":
assert.True(t, fstest.Time(v).Truncate(f.Precision()).Equal(fstest.Time(got)), fmt.Sprintf("btime not equal want %v got %v", v, got))
case "utime":
gotUtime := fstest.Time(got)
dt := gotUtime.Sub(utime)
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("utime more than 1 minute out want %v got %v delta %v", utime, gotUtime, dt))
assert.True(t, fstest.Time(v).Equal(fstest.Time(got)))
case "content-type":
assert.True(t, o.MimeType(ctx) == got)
default:
assert.Equal(t, v, got, k)
}
}
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Metadata", f.InternalTestMetadata)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -61,10 +61,9 @@ func init() {
Default: true, Default: true,
Advanced: true, Advanced: true,
}, { }, {
Name: "user", Name: "user",
Help: "Your user name.", Help: "Your user name.",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "password", Name: "password",
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).", Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",

View File

@@ -1,897 +0,0 @@
// Package linkbox provides an interface to the linkbox.to Cloud storage system.
//
// API docs: https://www.linkbox.to/api-docs
package linkbox
/*
Extras
- PublicLink - NO - sharing doesn't share the actual file, only a page with it on
- Move - YES - have Move and Rename file APIs so is possible
- MoveDir - NO - probably not possible - have Move but no Rename
*/
import (
"bytes"
"context"
"crypto/md5"
"fmt"
"io"
"net/http"
"net/url"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
const (
maxEntitiesPerPage = 1024
minSleep = 200 * time.Millisecond
maxSleep = 2 * time.Second
pacerBurst = 1
linkboxAPIURL = "https://www.linkbox.to/api/open/"
rootID = "0" // ID of root directory
)
func init() {
fsi := &fs.RegInfo{
Name: "linkbox",
Description: "Linkbox",
NewFs: NewFs,
Options: []fs.Option{{
Name: "token",
Help: "Token from https://www.linkbox.to/admin/account",
Sensitive: true,
Required: true,
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
Token string `config:"token"`
}
// Fs stores the interface to the remote Linkbox files
type Fs struct {
name string
root string
opt Options // options for this backend
features *fs.Features // optional features
ci *fs.ConfigInfo // global config
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer
}
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
contentType string
fullURL string
dirID int64
itemID string // and these IDs are for files
id int64 // these IDs appear to apply to directories
isDir bool
}
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
root = strings.Trim(root, "/")
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
opt: *opt,
root: root,
ci: ci,
srv: rest.NewClient(fshttp.NewClient(ctx)),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep))),
}
f.dirCache = dircache.New(root, rootID, f)
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
CaseInsensitive: true,
}).Fill(ctx, f)
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
tempF := *f
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
tempF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := tempF.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
return f, nil
}
return nil, err
}
f.features.Fill(ctx, &tempF)
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
type entity struct {
Type string `json:"type"`
Name string `json:"name"`
URL string `json:"url"`
Ctime int64 `json:"ctime"`
Size int64 `json:"size"`
ID int64 `json:"id"`
Pid int64 `json:"pid"`
ItemID string `json:"item_id"`
}
// Return true if the entity is a directory
func (e *entity) isDir() bool {
return e.Type == "dir" || e.Type == "sdir"
}
type data struct {
Entities []entity `json:"list"`
}
type fileSearchRes struct {
response
SearchData data `json:"data"`
}
// Set an object info from an entity
func (o *Object) set(e *entity) {
o.modTime = time.Unix(e.Ctime, 0)
o.contentType = e.Type
o.size = e.Size
o.fullURL = e.URL
o.isDir = e.isDir()
o.id = e.ID
o.itemID = e.ItemID
o.dirID = e.Pid
}
// Call linkbox with the query in opts and return result
//
// This will be checked for error and an error will be returned if Status != 1
func getUnmarshaledResponse(ctx context.Context, f *Fs, opts *rest.Opts, result interface{}) error {
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return err
}
responser := result.(responser)
if responser.IsError() {
return responser
}
return nil
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
// User function to process a File item from listAll
//
// Should return true to finish processing
type listAllFn func(*entity) bool
// Search is a bit fussy about which characters match
//
// If the name doesn't match this then do an dir list instead
var searchOK = regexp.MustCompile(`^[a-zA-Z0-9_ .]+$`)
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
//
// If you set name then search ignores dirID. name is a substring
// search also so name="dir" matches "sub dir" also. This filters it
// down so it only returns items in dirID
func (f *Fs) listAll(ctx context.Context, dirID string, name string, fn listAllFn) (found bool, err error) {
var (
pageNumber = 0
numberOfEntities = maxEntitiesPerPage
)
name = strings.TrimSpace(name) // search doesn't like spaces
if !searchOK.MatchString(name) {
// If name isn't good then do an unbounded search
name = ""
}
OUTER:
for numberOfEntities == maxEntitiesPerPage {
pageNumber++
opts := &rest.Opts{
Method: "GET",
RootURL: linkboxAPIURL,
Path: "file_search",
Parameters: url.Values{
"token": {f.opt.Token},
"name": {name},
"pid": {dirID},
"pageNo": {itoa(pageNumber)},
"pageSize": {itoa64(maxEntitiesPerPage)},
},
}
var responseResult fileSearchRes
err = getUnmarshaledResponse(ctx, f, opts, &responseResult)
if err != nil {
return false, fmt.Errorf("getting files failed: %w", err)
}
numberOfEntities = len(responseResult.SearchData.Entities)
for _, entity := range responseResult.SearchData.Entities {
if itoa64(entity.Pid) != dirID {
// when name != "" this returns from all directories, so ignore not this one
continue
}
if fn(&entity) {
found = true
break OUTER
}
}
if pageNumber > 100000 {
return false, fmt.Errorf("too many results")
}
}
return found, nil
}
// Turn 64 bit int to string
func itoa64(i int64) string {
return strconv.FormatInt(i, 10)
}
// Turn int to string
func itoa(i int) string {
return itoa64(int64(i))
}
func splitDirAndName(remote string) (dir string, name string) {
lastSlashPosition := strings.LastIndex(remote, "/")
if lastSlashPosition == -1 {
dir = ""
name = remote
} else {
dir = remote[:lastSlashPosition]
name = remote[lastSlashPosition+1:]
}
// fs.Debugf(nil, "splitDirAndName remote = {%s}, dir = {%s}, name = {%s}", remote, dir, name)
return dir, name
}
// FindLeaf finds a directory of name leaf in the folder with ID directoryID
func (f *Fs) FindLeaf(ctx context.Context, directoryID, leaf string) (directoryIDOut string, found bool, err error) {
// Find the leaf in directoryID
found, err = f.listAll(ctx, directoryID, leaf, func(entity *entity) bool {
if entity.isDir() && strings.EqualFold(entity.Name, leaf) {
directoryIDOut = itoa64(entity.ID)
return true
}
return false
})
return directoryIDOut, found, err
}
// Returned from "folder_create"
type folderCreateRes struct {
response
Data struct {
DirID int64 `json:"dirId"`
} `json:"data"`
}
// CreateDir makes a directory with dirID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, err error) {
// fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf)
opts := &rest.Opts{
Method: "GET",
RootURL: linkboxAPIURL,
Path: "folder_create",
Parameters: url.Values{
"token": {f.opt.Token},
"name": {leaf},
"pid": {dirID},
"isShare": {"0"},
"canInvite": {"1"},
"canShare": {"1"},
"withBodyImg": {"1"},
"desc": {""},
},
}
response := folderCreateRes{}
err = getUnmarshaledResponse(ctx, f, opts, &response)
if err != nil {
// response status 1501 means that directory already exists
if response.Status == 1501 {
return newID, fmt.Errorf("couldn't find already created directory: %w", fs.ErrorDirNotFound)
}
return newID, fmt.Errorf("CreateDir failed: %w", err)
}
if response.Data.DirID == 0 {
return newID, fmt.Errorf("API returned 0 for ID of newly created directory")
}
return itoa64(response.Data.DirID), nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// fs.Debugf(f, "List method dir = {%s}", dir)
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
}
_, err = f.listAll(ctx, directoryID, "", func(entity *entity) bool {
remote := path.Join(dir, entity.Name)
if entity.isDir() {
id := itoa64(entity.ID)
modTime := time.Unix(entity.Ctime, 0)
d := fs.NewDir(remote, modTime).SetID(id).SetParentID(itoa64(entity.Pid))
entries = append(entries, d)
// cache the directory ID for later lookups
f.dirCache.Put(remote, id)
} else {
o := &Object{
fs: f,
remote: remote,
}
o.set(entity)
entries = append(entries, o)
}
return false
})
if err != nil {
return nil, err
}
return entries, nil
}
// get an entity with leaf from dirID
func getEntity(ctx context.Context, f *Fs, leaf string, directoryID string, token string) (*entity, error) {
var result *entity
var resultErr = fs.ErrorObjectNotFound
_, err := f.listAll(ctx, directoryID, leaf, func(entity *entity) bool {
if strings.EqualFold(entity.Name, leaf) {
// fs.Debugf(f, "getObject found entity.Name {%s} name {%s}", entity.Name, name)
if entity.isDir() {
result = nil
resultErr = fs.ErrorIsDir
} else {
result = entity
resultErr = nil
}
return true
}
return false
})
if err != nil {
return nil, err
}
return result, resultErr
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
//
// If remote points to a directory then it should return
// ErrorIsDir if possible without doing any extra work,
// otherwise ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
leaf, dirID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
}
return nil, err
}
entity, err := getEntity(ctx, f, leaf, dirID, f.opt.Token)
if err != nil {
return nil, err
}
o := &Object{
fs: f,
remote: remote,
}
o.set(entity)
return o, nil
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
_, err := f.dirCache.FindDir(ctx, dir, true)
return err
}
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
if check {
entries, err := f.List(ctx, dir)
if err != nil {
return err
}
if len(entries) != 0 {
return fs.ErrorDirectoryNotEmpty
}
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
}
opts := &rest.Opts{
Method: "GET",
RootURL: linkboxAPIURL,
Path: "folder_del",
Parameters: url.Values{
"token": {f.opt.Token},
"dirIds": {directoryID},
},
}
response := response{}
err = getUnmarshaledResponse(ctx, f, opts, &response)
if err != nil {
// Linkbox has some odd error returns here
if response.Status == 403 || response.Status == 500 {
return fs.ErrorDirNotFound
}
return fmt.Errorf("purge error: %w", err)
}
f.dirCache.FlushDir(dir)
if err != nil {
return err
}
return nil
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, true)
}
// SetModTime sets modTime on a particular file
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return fs.ErrorCantSetModTime
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
var res *http.Response
downloadURL := o.fullURL
if downloadURL == "" {
_, name := splitDirAndName(o.Remote())
newObject, err := getEntity(ctx, o.fs, name, itoa64(o.dirID), o.fs.opt.Token)
if err != nil {
return nil, err
}
if newObject == nil {
// fs.Debugf(o.fs, "Open entity is empty: name = {%s}", name)
return nil, fs.ErrorObjectNotFound
}
downloadURL = newObject.URL
}
opts := &rest.Opts{
Method: "GET",
RootURL: downloadURL,
Options: options,
}
err := o.fs.pacer.Call(func() (bool, error) {
var err error
res, err = o.fs.srv.Call(ctx, opts)
return o.fs.shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("Open failed: %w", err)
}
return res.Body, nil
}
// Update in to the object with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
size := src.Size()
if size == 0 {
return fs.ErrorCantUploadEmptyFiles
} else if size < 0 {
return fmt.Errorf("can't upload files of unknown length")
}
remote := o.Remote()
// remove the file if it exists
if o.itemID != "" {
fs.Debugf(o, "Update: removing old file")
err = o.Remove(ctx)
if err != nil {
fs.Errorf(o, "Update: failed to remove existing file: %v", err)
}
o.itemID = ""
} else {
tmpObject, err := o.fs.NewObject(ctx, remote)
if err == nil {
fs.Debugf(o, "Update: removing old file")
err = tmpObject.Remove(ctx)
if err != nil {
fs.Errorf(o, "Update: failed to remove existing file: %v", err)
}
}
}
first10m := io.LimitReader(in, 10_485_760)
first10mBytes, err := io.ReadAll(first10m)
if err != nil {
return fmt.Errorf("Update err in reading file: %w", err)
}
// get upload authorization (step 1)
opts := &rest.Opts{
Method: "GET",
RootURL: linkboxAPIURL,
Path: "get_upload_url",
Options: options,
Parameters: url.Values{
"token": {o.fs.opt.Token},
"fileMd5ofPre10m": {fmt.Sprintf("%x", md5.Sum(first10mBytes))},
"fileSize": {itoa64(size)},
},
}
getFirstStepResult := getUploadURLResponse{}
err = getUnmarshaledResponse(ctx, o.fs, opts, &getFirstStepResult)
if err != nil {
if getFirstStepResult.Status != 600 {
return fmt.Errorf("Update err in unmarshaling response: %w", err)
}
}
switch getFirstStepResult.Status {
case 1:
// upload file using link from first step
var res *http.Response
file := io.MultiReader(bytes.NewReader(first10mBytes), in)
opts := &rest.Opts{
Method: "PUT",
RootURL: getFirstStepResult.Data.SignURL,
Options: options,
Body: file,
ContentLength: &size,
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
res, err = o.fs.srv.Call(ctx, opts)
return o.fs.shouldRetry(ctx, res, err)
})
if err != nil {
return fmt.Errorf("update err in uploading file: %w", err)
}
_, err = io.ReadAll(res.Body)
if err != nil {
return fmt.Errorf("update err in reading response: %w", err)
}
case 600:
// Status means that we don't need to upload file
// We need only to make second step
default:
return fmt.Errorf("got unexpected message from Linkbox: %s", getFirstStepResult.Message)
}
leaf, dirID, err := o.fs.dirCache.FindPath(ctx, remote, false)
if err != nil {
return err
}
// create file item at Linkbox (second step)
opts = &rest.Opts{
Method: "GET",
RootURL: linkboxAPIURL,
Path: "folder_upload_file",
Options: options,
Parameters: url.Values{
"token": {o.fs.opt.Token},
"fileMd5ofPre10m": {fmt.Sprintf("%x", md5.Sum(first10mBytes))},
"fileSize": {itoa64(size)},
"pid": {dirID},
"diyName": {leaf},
},
}
getSecondStepResult := getUploadURLResponse{}
err = getUnmarshaledResponse(ctx, o.fs, opts, &getSecondStepResult)
if err != nil {
return fmt.Errorf("Update second step failed: %w", err)
}
// Try a few times to read the object after upload for eventual consistency
const maxTries = 10
var sleepTime = 100 * time.Millisecond
var entity *entity
for try := 1; try <= maxTries; try++ {
entity, err = getEntity(ctx, o.fs, leaf, dirID, o.fs.opt.Token)
if err == nil {
break
}
if err != fs.ErrorObjectNotFound {
return fmt.Errorf("Update failed to read object: %w", err)
}
fs.Debugf(o, "Trying to read object after upload: try again in %v (%d/%d)", sleepTime, try, maxTries)
time.Sleep(sleepTime)
sleepTime *= 2
}
if err != nil {
return err
}
o.set(entity)
return nil
}
// Remove this object
func (o *Object) Remove(ctx context.Context) error {
opts := &rest.Opts{
Method: "GET",
RootURL: linkboxAPIURL,
Path: "file_del",
Parameters: url.Values{
"token": {o.fs.opt.Token},
"itemIds": {o.itemID},
},
}
requestResult := getUploadURLResponse{}
err := getUnmarshaledResponse(ctx, o.fs, opts, &requestResult)
if err != nil {
return fmt.Errorf("could not Remove: %w", err)
}
return nil
}
// ModTime returns the modification time of the remote http file
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// Remote the name of the remote HTTP file, relative to the fs root
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size in bytes of the remote http file
func (o *Object) Size() int64 {
return o.size
}
// String returns the URL to the remote HTTP file
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Fs is the filesystem this remote http file object is located within
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Storable returns whether the remote http file is a regular file
// (not a directory, symbolic link, block device, character device, named pipe, etc.)
func (o *Object) Storable() bool {
return true
}
// Features returns the optional features of this Fs
// Info provides a read only interface to information about a filesystem.
func (f *Fs) Features() *fs.Features {
return f.features
}
// Name of the remote (as passed into NewFs)
// Name returns the configured name of the file system
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("Linkbox root '%s'", f.root)
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
// Returns the supported hash types of the filesystem
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
/*
{
"data": {
"signUrl": "http://xx -- Then CURL PUT your file with sign url "
},
"msg": "please use this url to upload (PUT method)",
"status": 1
}
*/
// All messages have these items
type response struct {
Message string `json:"msg"`
Status int `json:"status"`
}
// IsError returns whether response represents an error
func (r *response) IsError() bool {
return r.Status != 1
}
// Error returns the error state of this response
func (r *response) Error() string {
return fmt.Sprintf("Linkbox error %d: %s", r.Status, r.Message)
}
// responser is interface covering the response so we can use it when it is embedded.
type responser interface {
IsError() bool
Error() string
}
type getUploadURLData struct {
SignURL string `json:"signUrl"`
}
type getUploadURLResponse struct {
response
Data getUploadURLData `json:"data"`
}
// Put in to the remote path with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
// return an error or upload it properly (rather than e.g. calling panic).
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o := &Object{
fs: f,
remote: src.Remote(),
size: src.Size(),
}
dir, _ := splitDirAndName(src.Remote())
err := f.Mkdir(ctx, dir)
if err != nil {
return nil, err
}
err = o.Update(ctx, in, src, options...)
return o, err
}
// Purge all files in the directory specified
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry determines whether a given err rates being retried
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func (f *Fs) DirCacheFlush() {
f.dirCache.ResetRoot()
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.DirCacheFlusher = &Fs{}
_ fs.Object = &Object{}
)

View File

@@ -1,17 +0,0 @@
// Test Linkbox filesystem interface
package linkbox_test
import (
"testing"
"github.com/rclone/rclone/backend/linkbox"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestLinkbox:",
NilObject: (*linkbox.Object)(nil),
})
}

View File

@@ -13,7 +13,6 @@ import (
"runtime" "runtime"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"time" "time"
"unicode/utf8" "unicode/utf8"
@@ -146,11 +145,6 @@ time we:
- Only checksum the size that stat gave - Only checksum the size that stat gave
- Don't update the stat info for the file - Don't update the stat info for the file
**NB** do not use this flag on a Windows Volume Shadow (VSS). For some
unknown reason, files in a VSS sometimes show different sizes from the
directory listing (where the initial stat value comes from on Windows)
and when stat is called on them directly. Other copy tools always use
the direct stat value and setting this flag will disable that.
`, `,
Default: false, Default: false,
Advanced: true, Advanced: true,
@@ -249,7 +243,7 @@ type Fs struct {
precision time.Duration // precision of local filesystem precision time.Duration // precision of local filesystem
warnedMu sync.Mutex // used for locking access to 'warned'. warnedMu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string warned map[string]struct{} // whether we have warned about this string
xattrSupported atomic.Int32 // whether xattrs are supported xattrSupported int32 // whether xattrs are supported (atomic access)
// do os.Lstat or os.Stat // do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error) lstat func(name string) (os.FileInfo, error)
@@ -297,7 +291,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
lstat: os.Lstat, lstat: os.Lstat,
} }
if xattrSupported { if xattrSupported {
f.xattrSupported.Store(1) f.xattrSupported = 1
} }
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc) f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
f.features = (&fs.Features{ f.features = (&fs.Features{
@@ -522,7 +516,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
continue continue
} }
} }
fierr = fmt.Errorf("failed to get info about directory entry %q: %w", namepath, fierr) err = fmt.Errorf("failed to read directory %q: %w", namepath, fierr)
fs.Errorf(dir, "%v", fierr) fs.Errorf(dir, "%v", fierr)
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync _ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue continue
@@ -647,13 +641,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// //
// If it isn't empty it will return an error // If it isn't empty it will return an error
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(ctx context.Context, dir string) error {
localPath := f.localPath(dir) return os.Remove(f.localPath(dir))
if fi, err := os.Stat(localPath); err != nil {
return err
} else if !fi.IsDir() {
return fs.ErrorIsFile
}
return os.Remove(localPath)
} }
// Precision of the file system // Precision of the file system
@@ -1128,12 +1116,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
} }
} }
// Update the file info before we start reading
err = o.lstat()
if err != nil {
return nil, err
}
// If not checking updated then limit to current size. This means if // If not checking updated then limit to current size. This means if
// file is being extended, readers will read a o.Size() bytes rather // file is being extended, readers will read a o.Size() bytes rather
// than the new size making for a consistent upload. // than the new size making for a consistent upload.
@@ -1298,7 +1280,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// Fetch and set metadata if --metadata is in use // Fetch and set metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options) meta, err := fs.GetMetadataOptions(ctx, src, options)
if err != nil { if err != nil {
return fmt.Errorf("failed to read metadata from source object: %w", err) return fmt.Errorf("failed to read metadata from source object: %w", err)
} }

View File

@@ -19,7 +19,6 @@ import (
"github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/file" "github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
@@ -515,43 +514,3 @@ func TestFilterSymlinkCopyLinks(t *testing.T) {
func TestFilterSymlinkLinks(t *testing.T) { func TestFilterSymlinkLinks(t *testing.T) {
testFilterSymlink(t, false) testFilterSymlink(t, false)
} }
func TestCopySymlink(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
when := time.Now()
f := r.Flocal.(*Fs)
// Create a file and a symlink to it
r.WriteFile("src/file.txt", "hello world", when)
require.NoError(t, os.Symlink("file.txt", filepath.Join(r.LocalName, "src", "link.txt")))
defer func() {
// Reset -L/-l mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = false
f.lstat = os.Lstat
}()
// Set fs into "-l/--links" mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = true
f.lstat = os.Lstat
// Create dst
require.NoError(t, f.Mkdir(ctx, "dst"))
// Do copy from src into dst
src, err := f.NewObject(ctx, "src/link.txt.rclonelink")
require.NoError(t, err)
require.NotNil(t, src)
dst, err := operations.Copy(ctx, f, nil, "dst/link.txt.rclonelink", src)
require.NoError(t, err)
require.NotNil(t, dst)
// Test that we made a symlink and it has the right contents
dstPath := filepath.Join(r.LocalName, "dst", "link.txt")
linkContents, err := os.Readlink(dstPath)
require.NoError(t, err)
assert.Equal(t, "file.txt", linkContents)
}

View File

@@ -6,6 +6,7 @@ package local
import ( import (
"fmt" "fmt"
"strings" "strings"
"sync/atomic"
"syscall" "syscall"
"github.com/pkg/xattr" "github.com/pkg/xattr"
@@ -27,7 +28,7 @@ func (f *Fs) xattrIsNotSupported(err error) bool {
// Xattrs not supported can be ENOTSUP or ENOATTR or EINVAL (on Solaris) // Xattrs not supported can be ENOTSUP or ENOATTR or EINVAL (on Solaris)
if xattrErr.Err == syscall.EINVAL || xattrErr.Err == syscall.ENOTSUP || xattrErr.Err == xattr.ENOATTR { if xattrErr.Err == syscall.EINVAL || xattrErr.Err == syscall.ENOTSUP || xattrErr.Err == xattr.ENOATTR {
// Show xattrs not supported // Show xattrs not supported
if f.xattrSupported.CompareAndSwap(1, 0) { if atomic.CompareAndSwapInt32(&f.xattrSupported, 1, 0) {
fs.Errorf(f, "xattrs not supported - disabling: %v", err) fs.Errorf(f, "xattrs not supported - disabling: %v", err)
} }
return true return true
@@ -40,7 +41,7 @@ func (f *Fs) xattrIsNotSupported(err error) bool {
// It doesn't return any attributes owned by this backend in // It doesn't return any attributes owned by this backend in
// metadataKeys // metadataKeys
func (o *Object) getXattr() (metadata fs.Metadata, err error) { func (o *Object) getXattr() (metadata fs.Metadata, err error) {
if !xattrSupported || o.fs.xattrSupported.Load() == 0 { if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
return nil, nil return nil, nil
} }
var list []string var list []string
@@ -89,7 +90,7 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
// //
// It doesn't set any attributes owned by this backend in metadataKeys // It doesn't set any attributes owned by this backend in metadataKeys
func (o *Object) setXattr(metadata fs.Metadata) (err error) { func (o *Object) setXattr(metadata fs.Metadata) (err error) {
if !xattrSupported || o.fs.xattrSupported.Load() == 0 { if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
return nil return nil
} }
for k, value := range metadata { for k, value := range metadata {

View File

@@ -85,11 +85,10 @@ func init() {
Name: "mailru", Name: "mailru",
Description: "Mail.ru Cloud", Description: "Mail.ru Cloud",
NewFs: NewFs, NewFs: NewFs,
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: []fs.Option{{
Name: "user", Name: "user",
Help: "User name (usually email).", Help: "User name (usually email).",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "pass", Name: "pass",
Help: `Password. Help: `Password.
@@ -214,7 +213,7 @@ Supported quirks: atomicmkdir binlist unknowndirs`,
encoder.EncodeWin | // :?"*<>| encoder.EncodeWin | // :?"*<>|
encoder.EncodeBackSlash | encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8), encoder.EncodeInvalidUtf8),
}}...), }},
}) })
} }

View File

@@ -58,10 +58,9 @@ func init() {
Description: "Mega", Description: "Mega",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "user", Name: "user",
Help: "User name.", Help: "User name.",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "pass", Name: "pass",
Help: "Password.", Help: "Password.",
@@ -206,7 +205,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
// cache *mega.Mega on username so we can reuse and share // cache *mega.Mega on username so we can re-use and share
// them between remotes. They are expensive to make as they // them between remotes. They are expensive to make as they
// contain all the objects and sharing the objects makes the // contain all the objects and sharing the objects makes the
// move code easier as we don't have to worry about mixing // move code easier as we don't have to worry about mixing

View File

@@ -65,13 +65,11 @@ HTTP is provided primarily for debugging purposes.`,
Help: `Domain+path of NetStorage host to connect to. Help: `Domain+path of NetStorage host to connect to.
Format should be ` + "`<domain>/<internal folders>`", Format should be ` + "`<domain>/<internal folders>`",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "account", Name: "account",
Help: "Set the NetStorage account name", Help: "Set the NetStorage account name",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "secret", Name: "secret",
Help: `Set the NetStorage account secret/G2O key for authentication. Help: `Set the NetStorage account secret/G2O key for authentication.

View File

@@ -99,16 +99,6 @@ type ItemReference struct {
DriveType string `json:"driveType"` // Type of the drive, Read-Only DriveType string `json:"driveType"` // Type of the drive, Read-Only
} }
// GetID returns a normalized ID of the item
// If DriveID is known it will be prefixed to the ID with # separator
// Can be parsed using onedrive.parseNormalizedID(normalizedID)
func (i *ItemReference) GetID() string {
if !strings.Contains(i.ID, "#") {
return i.DriveID + "#" + i.ID
}
return i.ID
}
// RemoteItemFacet groups data needed to reference a OneDrive remote item // RemoteItemFacet groups data needed to reference a OneDrive remote item
type RemoteItemFacet struct { type RemoteItemFacet struct {
ID string `json:"id"` // The unique identifier of the item within the remote Drive. Read-only. ID string `json:"id"` // The unique identifier of the item within the remote Drive. Read-only.
@@ -195,8 +185,8 @@ type Item struct {
Deleted *DeletedFacet `json:"deleted"` // Information about the deleted state of the item. Read-only. Deleted *DeletedFacet `json:"deleted"` // Information about the deleted state of the item. Read-only.
} }
// DeltaResponse is the response to the view delta method // ViewDeltaResponse is the response to the view delta method
type DeltaResponse struct { type ViewDeltaResponse struct {
Value []Item `json:"value"` // An array of Item objects which have been created, modified, or deleted. Value []Item `json:"value"` // An array of Item objects which have been created, modified, or deleted.
NextLink string `json:"@odata.nextLink"` // A URL to retrieve the next available page of changes. NextLink string `json:"@odata.nextLink"` // A URL to retrieve the next available page of changes.
DeltaLink string `json:"@odata.deltaLink"` // A URL returned instead of @odata.nextLink after all current changes have been returned. Used to read the next set of changes in the future. DeltaLink string `json:"@odata.deltaLink"` // A URL returned instead of @odata.nextLink after all current changes have been returned. Used to read the next set of changes in the future.
@@ -448,27 +438,3 @@ type Version struct {
type VersionsResponse struct { type VersionsResponse struct {
Versions []Version `json:"value"` Versions []Version `json:"value"`
} }
// DriveResource is returned from /me/drive
type DriveResource struct {
DriveID string `json:"id"`
DriveName string `json:"name"`
DriveType string `json:"driveType"`
}
// DrivesResponse is returned from /sites/{siteID}/drives",
type DrivesResponse struct {
Drives []DriveResource `json:"value"`
}
// SiteResource is part of the response from from "/sites/root:"
type SiteResource struct {
SiteID string `json:"id"`
SiteName string `json:"displayName"`
SiteURL string `json:"webUrl"`
}
// SiteResponse is returned from "/sites/root:"
type SiteResponse struct {
Sites []SiteResource `json:"value"`
}

View File

@@ -131,11 +131,10 @@ Note that the chunks will be buffered into memory.`,
Default: defaultChunkSize, Default: defaultChunkSize,
Advanced: true, Advanced: true,
}, { }, {
Name: "drive_id", Name: "drive_id",
Help: "The ID of the drive to use.", Help: "The ID of the drive to use.",
Default: "", Default: "",
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "drive_type", Name: "drive_type",
Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").", Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
@@ -149,8 +148,7 @@ This isn't normally needed, but in special circumstances you might
know the folder ID that you wish to access but not be able to get know the folder ID that you wish to access but not be able to get
there through a path traversal. there through a path traversal.
`, `,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "access_scopes", Name: "access_scopes",
Help: `Set scopes to be requested by rclone. Help: `Set scopes to be requested by rclone.
@@ -262,8 +260,7 @@ this flag there.
At the time of writing this only works with OneDrive personal paid accounts. At the time of writing this only works with OneDrive personal paid accounts.
`, `,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "hash_type", Name: "hash_type",
Default: "auto", Default: "auto",
@@ -324,37 +321,6 @@ the --onedrive-av-override flag, or av_override = true in the config
file. file.
`, `,
Advanced: true, Advanced: true,
}, {
Name: "delta",
Default: false,
Help: strings.ReplaceAll(`If set rclone will use delta listing to implement recursive listings.
If this flag is set the the onedrive backend will advertise |ListR|
support for recursive listings.
Setting this flag speeds up these things greatly:
rclone lsf -R onedrive:
rclone size onedrive:
rclone rc vfs/refresh recursive=true
**However** the delta listing API **only** works at the root of the
drive. If you use it not at the root then it recurses from the root
and discards all the data that is not under the directory you asked
for. So it will be correct but may not be very efficient.
This is why this flag is not set as the default.
As a rule of thumb if nearly all of your data is under rclone's root
directory (the |root/directory| in |onedrive:root/directory|) then
using this flag will be be a big performance win. If your data is
mostly not under the root then using this flag will be a big
performance loss.
It is recommended if you are mounting your onedrive at the root
(or near the root when using crypt) and using rclone |rc vfs/refresh|.
`, "|", "`"),
Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -402,6 +368,28 @@ It is recommended if you are mounting your onedrive at the root
}) })
} }
type driveResource struct {
DriveID string `json:"id"`
DriveName string `json:"name"`
DriveType string `json:"driveType"`
}
type drivesResponse struct {
Drives []driveResource `json:"value"`
}
type siteResource struct {
SiteID string `json:"id"`
SiteName string `json:"displayName"`
SiteURL string `json:"webUrl"`
}
type siteResponse struct {
Sites []siteResource `json:"value"`
}
type deltaResponse struct {
DeltaLink string `json:"@odata.deltaLink"`
Value []api.Item `json:"value"`
}
// Get the region and graphURL from the config // Get the region and graphURL from the config
func getRegionURL(m configmap.Mapper) (region, graphURL string) { func getRegionURL(m configmap.Mapper) (region, graphURL string) {
region, _ = m.Get("region") region, _ = m.Get("region")
@@ -428,7 +416,7 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
RootURL: graphURL, RootURL: graphURL,
Path: "/sites/root:" + opt.relativePath, Path: "/sites/root:" + opt.relativePath,
} }
site := api.SiteResource{} site := siteResource{}
_, err := srv.CallJSON(ctx, &opt.opts, nil, &site) _, err := srv.CallJSON(ctx, &opt.opts, nil, &site)
if err != nil { if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available site by relative path: %v", err)) return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available site by relative path: %v", err))
@@ -445,7 +433,7 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
} }
} }
drives := api.DrivesResponse{} drives := drivesResponse{}
// We don't have the final ID yet? // We don't have the final ID yet?
// query Microsoft Graph // query Microsoft Graph
@@ -458,7 +446,7 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
// Also call /me/drive as sometimes /me/drives doesn't return it #4068 // Also call /me/drive as sometimes /me/drives doesn't return it #4068
if opt.opts.Path == "/me/drives" { if opt.opts.Path == "/me/drives" {
opt.opts.Path = "/me/drive" opt.opts.Path = "/me/drive"
meDrive := api.DriveResource{} meDrive := driveResource{}
_, err := srv.CallJSON(ctx, &opt.opts, nil, &meDrive) _, err := srv.CallJSON(ctx, &opt.opts, nil, &meDrive)
if err != nil { if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available drives: %v", err)) return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available drives: %v", err))
@@ -477,7 +465,7 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
} }
} }
} else { } else {
drives.Drives = append(drives.Drives, api.DriveResource{ drives.Drives = append(drives.Drives, driveResource{
DriveID: opt.finalDriveID, DriveID: opt.finalDriveID,
DriveName: "Chosen Drive ID", DriveName: "Chosen Drive ID",
DriveType: "drive", DriveType: "drive",
@@ -581,18 +569,15 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
case "url": case "url":
return fs.ConfigInput("url_end", "config_site_url", `Site URL return fs.ConfigInput("url_end", "config_site_url", `Site URL
Examples: Example: "https://contoso.sharepoint.com/sites/mysite" or "mysite"
- "mysite"
- "https://XXX.sharepoint.com/sites/mysite"
- "https://XXX.sharepoint.com/teams/ID"
`) `)
case "url_end": case "url_end":
siteURL := config.Result siteURL := config.Result
re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`) re := regexp.MustCompile(`https://.*\.sharepoint\.com/sites/(.*)`)
match := re.FindStringSubmatch(siteURL) match := re.FindStringSubmatch(siteURL)
if len(match) == 2 { if len(match) == 2 {
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
relativePath: match[1], relativePath: "/sites/" + match[1],
}) })
} }
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
@@ -614,7 +599,7 @@ Examples:
Path: "/sites?search=" + searchTerm, Path: "/sites?search=" + searchTerm,
} }
sites := api.SiteResponse{} sites := siteResponse{}
_, err := srv.CallJSON(ctx, &opts, nil, &sites) _, err := srv.CallJSON(ctx, &opts, nil, &sites)
if err != nil { if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available sites: %v", err)) return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available sites: %v", err))
@@ -676,7 +661,6 @@ type Options struct {
LinkPassword string `config:"link_password"` LinkPassword string `config:"link_password"`
HashType string `config:"hash_type"` HashType string `config:"hash_type"`
AVOverride bool `config:"av_override"` AVOverride bool `config:"av_override"`
Delta bool `config:"delta"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
} }
@@ -1008,11 +992,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.dirCache = dircache.New(root, rootID, f) f.dirCache = dircache.New(root, rootID, f)
// ListR only supported if delta set
if !f.opt.Delta {
f.features.ListR = nil
}
// Find the current root // Find the current root
err = f.dirCache.FindRoot(ctx, false) err = f.dirCache.FindRoot(ctx, false)
if err != nil { if err != nil {
@@ -1132,29 +1111,32 @@ func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, e
// If directories is set it only sends directories // If directories is set it only sends directories
// User function to process a File item from listAll // User function to process a File item from listAll
// //
// If an error is returned then processing stops // Should return true to finish processing
type listAllFn func(*api.Item) error type listAllFn func(*api.Item) bool
// Lists the directory required calling the user function on each item found // Lists the directory required calling the user function on each item found
// //
// If the user fn ever returns true then it early exits with found = true // If the user fn ever returns true then it early exits with found = true
// func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
// This listing function works on both normal listings and delta listings // Top parameter asks for bigger pages of data
func (f *Fs) _listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn, opts *rest.Opts, result any, pValue *[]api.Item, pNextLink *string) (err error) { // https://dev.onedrive.com/odata/optional-query-parameters.htm
opts := f.newOptsCall(dirID, "GET", fmt.Sprintf("/children?$top=%d", f.opt.ListChunk))
OUTER:
for { for {
var result api.ListChildrenResponse
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, opts, nil, result) resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return fmt.Errorf("couldn't list files: %w", err) return found, fmt.Errorf("couldn't list files: %w", err)
} }
if len(*pValue) == 0 { if len(result.Value) == 0 {
break break
} }
for i := range *pValue { for i := range result.Value {
item := &(*pValue)[i] item := &result.Value[i]
isFolder := item.GetFolder() != nil isFolder := item.GetFolder() != nil
if isFolder { if isFolder {
if filesOnly { if filesOnly {
@@ -1169,60 +1151,18 @@ func (f *Fs) _listAll(ctx context.Context, dirID string, directoriesOnly bool, f
continue continue
} }
item.Name = f.opt.Enc.ToStandardName(item.GetName()) item.Name = f.opt.Enc.ToStandardName(item.GetName())
err = fn(item) if fn(item) {
if err != nil { found = true
return err break OUTER
} }
} }
if *pNextLink == "" { if result.NextLink == "" {
break break
} }
opts.Path = "" opts.Path = ""
opts.Parameters = nil opts.RootURL = result.NextLink
opts.RootURL = *pNextLink
// reset results
*pNextLink = ""
*pValue = nil
} }
return nil return
}
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (err error) {
// Top parameter asks for bigger pages of data
// https://dev.onedrive.com/odata/optional-query-parameters.htm
opts := f.newOptsCall(dirID, "GET", fmt.Sprintf("/children?$top=%d", f.opt.ListChunk))
var result api.ListChildrenResponse
return f._listAll(ctx, dirID, directoriesOnly, filesOnly, fn, &opts, &result, &result.Value, &result.NextLink)
}
// Convert a list item into a DirEntry
//
// Can return nil for an item which should be skipped
func (f *Fs) itemToDirEntry(ctx context.Context, dir string, info *api.Item) (entry fs.DirEntry, err error) {
if !f.opt.ExposeOneNoteFiles && info.GetPackageType() == api.PackageTypeOneNote {
fs.Debugf(info.Name, "OneNote file not shown in directory listing")
return nil, nil
}
remote := path.Join(dir, info.GetName())
folder := info.GetFolder()
if folder != nil {
// cache the directory ID for later lookups
id := info.GetID()
f.dirCache.Put(remote, id)
d := fs.NewDir(remote, time.Time(info.GetLastModifiedDateTime())).SetID(id)
d.SetItems(folder.ChildCount)
entry = d
} else {
o, err := f.newObjectWithInfo(ctx, remote, info)
if err != nil {
return nil, err
}
entry = o
}
return entry, nil
} }
// List the objects and directories in dir into entries. The // List the objects and directories in dir into entries. The
@@ -1239,137 +1179,41 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) error { var iErr error
entry, err := f.itemToDirEntry(ctx, dir, info) _, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
if err == nil { if !f.opt.ExposeOneNoteFiles && info.GetPackageType() == api.PackageTypeOneNote {
entries = append(entries, entry) fs.Debugf(info.Name, "OneNote file not shown in directory listing")
return false
} }
return err
remote := path.Join(dir, info.GetName())
folder := info.GetFolder()
if folder != nil {
// cache the directory ID for later lookups
id := info.GetID()
f.dirCache.Put(remote, id)
d := fs.NewDir(remote, time.Time(info.GetLastModifiedDateTime())).SetID(id)
d.SetItems(folder.ChildCount)
entries = append(entries, d)
} else {
o, err := f.newObjectWithInfo(ctx, remote, info)
if err != nil {
iErr = err
return true
}
entries = append(entries, o)
}
return false
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }
if iErr != nil {
return nil, iErr
}
return entries, nil return entries, nil
} }
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively than doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
// Make sure this ID is in the directory cache
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
}
// ListR only works at the root of a onedrive, not on a folder
// So we have to filter things outside of the root which is
// inefficient.
list := walk.NewListRHelper(callback)
// list a folder conventionally - used for shared folders
var listFolder func(dir string) error
listFolder = func(dir string) error {
entries, err := f.List(ctx, dir)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
if _, isDir := entry.(fs.Directory); isDir {
err = listFolder(entry.Remote())
if err != nil {
return err
}
}
}
return nil
}
// This code relies on the fact that directories are sent before their children. This isn't
// mentioned in the docs though, so maybe it shouldn't be relied on.
seen := map[string]struct{}{}
fn := func(info *api.Item) error {
var parentPath string
var ok bool
id := info.GetID()
// The API can produce duplicates, so skip them
if _, found := seen[id]; found {
return nil
}
seen[id] = struct{}{}
// Skip the root directory
if id == directoryID {
return nil
}
// Skip deleted items
if info.Deleted != nil {
return nil
}
dirID := info.GetParentReference().GetID()
// Skip files that don't have their parent directory
// cached as they are outside the root.
parentPath, ok = f.dirCache.GetInv(dirID)
if !ok {
return nil
}
// Skip files not under the root directory
remote := path.Join(parentPath, info.GetName())
if dir != "" && !strings.HasPrefix(remote, dir+"/") {
return nil
}
entry, err := f.itemToDirEntry(ctx, parentPath, info)
if err != nil {
return err
}
err = list.Add(entry)
if err != nil {
return err
}
// If this is a shared folder, we'll need list it too
if info.RemoteItem != nil && info.RemoteItem.Folder != nil {
fs.Debugf(remote, "Listing shared directory")
return listFolder(remote)
}
return nil
}
opts := rest.Opts{
Method: "GET",
Path: "/root/delta",
Parameters: map[string][]string{
// "token": {token},
"$top": {fmt.Sprintf("%d", f.opt.ListChunk)},
},
}
var result api.DeltaResponse
err = f._listAll(ctx, "", false, false, fn, &opts, &result, &result.Value, &result.NextLink)
if err != nil {
return err
}
return list.Flush()
}
// Creates from the parameters passed in a half finished Object which // Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it // must have setMetaData called on it
// //
@@ -1438,12 +1282,15 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
} }
if check { if check {
// check to see if there are any items // check to see if there are any items
err := f.listAll(ctx, rootID, false, false, func(item *api.Item) error { found, err := f.listAll(ctx, rootID, false, false, func(item *api.Item) bool {
return fs.ErrorDirectoryNotEmpty return true
}) })
if err != nil { if err != nil {
return err return err
} }
if found {
return fs.ErrorDirectoryNotEmpty
}
} }
err = f.deleteObject(ctx, rootID) err = f.deleteObject(ctx, rootID)
if err != nil { if err != nil {
@@ -2628,7 +2475,7 @@ func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (nextDeltaToken str
return return
} }
func (f *Fs) changeNotifyNextChange(ctx context.Context, token string) (delta api.DeltaResponse, err error) { func (f *Fs) changeNotifyNextChange(ctx context.Context, token string) (delta deltaResponse, err error) {
opts := f.buildDriveDeltaOpts(token) opts := f.buildDriveDeltaOpts(token)
_, err = f.srv.CallJSON(ctx, &opts, nil, &delta) _, err = f.srv.CallJSON(ctx, &opts, nil, &delta)
@@ -2747,7 +2594,6 @@ var (
_ fs.Abouter = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil) _ fs.CleanUpper = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.Object = (*Object)(nil) _ fs.Object = (*Object)(nil)
_ fs.MimeTyper = &Object{} _ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{} _ fs.IDer = &Object{}

View File

@@ -42,10 +42,9 @@ func init() {
Description: "OpenDrive", Description: "OpenDrive",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "username", Name: "username",
Help: "Username.", Help: "Username.",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "password", Name: "password",
Help: "Password.", Help: "Password.",
@@ -767,17 +766,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
if apiError, ok := err.(*Error); ok {
// Work around a bug maybe in opendrive or maybe in rclone.
//
// We should know whether the folder exists or not by the call to
// FindDir above so exactly why it is not found here is a mystery.
//
// This manifests as a failure in fs/sync TestSyncOverlapWithFilter
if apiError.Info.Message == "Folder is already deleted" {
return fs.DirEntries{}, nil
}
}
return nil, fmt.Errorf("failed to get folder list: %w", err) return nil, fmt.Errorf("failed to get folder list: %w", err)
} }

View File

@@ -13,6 +13,7 @@ import (
"github.com/oracle/oci-go-sdk/v65/common" "github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage" "github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
) )
const ( const (
@@ -127,3 +128,18 @@ func useBYOKCopyObject(fs *Fs, request *objectstorage.CopyObjectRequest) {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256) request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
} }
} }
func useBYOKUpload(fs *Fs, request *transfer.UploadRequest) {
if fs.opt.SSEKMSKeyID != "" {
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
}
if fs.opt.SSECustomerAlgorithm != "" {
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
}
if fs.opt.SSECustomerKey != "" {
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
}
if fs.opt.SSECustomerKeySha256 != "" {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}

View File

@@ -6,7 +6,6 @@ package oracleobjectstorage
import ( import (
"context" "context"
"fmt" "fmt"
"sort"
"strings" "strings"
"time" "time"
@@ -197,32 +196,6 @@ func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string
// for "dir" and it returns "dirKey" // for "dir" and it returns "dirKey"
func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) ( func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) (
uploads []*objectstorage.MultipartUpload, err error) { uploads []*objectstorage.MultipartUpload, err error) {
return f.listMultipartUploadsObject(ctx, bucketName, directory, false)
}
// listMultipartUploads finds first outstanding multipart uploads for (bucket, key)
//
// Note that rather lazily we treat key as a prefix, so it matches
// directories and objects. This could surprise the user if they ask
// for "dir" and it returns "dirKey"
func (f *Fs) findLatestMultipartUpload(ctx context.Context, bucketName, directory string) (
uploads []*objectstorage.MultipartUpload, err error) {
pastUploads, err := f.listMultipartUploadsObject(ctx, bucketName, directory, true)
if err != nil {
return nil, err
}
if len(pastUploads) > 0 {
sort.Slice(pastUploads, func(i, j int) bool {
return pastUploads[i].TimeCreated.After(pastUploads[j].TimeCreated.Time)
})
return pastUploads[:1], nil
}
return nil, err
}
func (f *Fs) listMultipartUploadsObject(ctx context.Context, bucketName, directory string, exact bool) (
uploads []*objectstorage.MultipartUpload, err error) {
uploads = []*objectstorage.MultipartUpload{} uploads = []*objectstorage.MultipartUpload{}
req := objectstorage.ListMultipartUploadsRequest{ req := objectstorage.ListMultipartUploadsRequest{
@@ -244,13 +217,7 @@ func (f *Fs) listMultipartUploadsObject(ctx context.Context, bucketName, directo
if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) { if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) {
continue continue
} }
if exact { uploads = append(uploads, &response.Items[index])
if *item.Object == directory {
uploads = append(uploads, &response.Items[index])
}
} else {
uploads = append(uploads, &response.Items[index])
}
} }
if response.OpcNextPage == nil { if response.OpcNextPage == nil {
break break
@@ -259,34 +226,3 @@ func (f *Fs) listMultipartUploadsObject(ctx context.Context, bucketName, directo
} }
return uploads, nil return uploads, nil
} }
func (f *Fs) listMultipartUploadParts(ctx context.Context, bucketName, bucketPath string, uploadID string) (
uploadedParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
uploadedParts = make(map[int]objectstorage.MultipartUploadPartSummary)
req := objectstorage.ListMultipartUploadPartsRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
UploadId: common.String(uploadID),
Limit: common.Int(1000),
}
var response objectstorage.ListMultipartUploadPartsResponse
for {
err = f.pacer.Call(func() (bool, error) {
response, err = f.srv.ListMultipartUploadParts(ctx, req)
return shouldRetry(ctx, response.HTTPResponse(), err)
})
if err != nil {
return uploadedParts, err
}
for _, item := range response.Items {
uploadedParts[*item.PartNumber] = item
}
if response.OpcNextPage == nil {
break
}
req.Page = response.OpcNextPage
}
return uploadedParts, nil
}

View File

@@ -1,441 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"strings"
"sync"
"time"
"github.com/ncw/swift/v2"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pool"
"golang.org/x/net/http/httpguts"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/hash"
)
var warnStreamUpload sync.Once
// Info needed for an upload
type uploadInfo struct {
req *objectstorage.PutObjectRequest
md5sumHex string
}
type objectChunkWriter struct {
chunkSize int64
size int64
f *Fs
bucket *string
key *string
uploadID *string
partsToCommit []objectstorage.CommitMultipartUploadPartDetails
partsToCommitMu sync.Mutex
existingParts map[int]objectstorage.MultipartUploadPartSummary
eTag string
md5sMu sync.Mutex
md5s []byte
ui uploadInfo
o *Object
}
func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, options ...fs.OpenOption) error {
_, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
Open: o.fs,
OpenOptions: options,
})
return err
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(
ctx context.Context,
remote string,
src fs.ObjectInfo,
options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: remote,
}
ui, err := o.prepareUpload(ctx, src, options)
if err != nil {
return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
}
uploadParts := f.opt.MaxUploadParts
if uploadParts < 1 {
uploadParts = 1
} else if uploadParts > maxUploadParts {
uploadParts = maxUploadParts
}
size := src.Size()
// calculate size of parts
chunkSize := f.opt.ChunkSize
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
// buffers here (default 5 MiB). With a maximum number of parts (10,000) this will be a file of
// 48 GiB which seems like a not too unreasonable limit.
if size == -1 {
warnStreamUpload.Do(func() {
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
f.opt.ChunkSize, fs.SizeSuffix(int64(chunkSize)*int64(uploadParts)))
})
} else {
chunkSize = chunksize.Calculator(src, size, uploadParts, chunkSize)
}
uploadID, existingParts, err := o.createMultipartUpload(ctx, ui.req)
if err != nil {
return info, nil, fmt.Errorf("create multipart upload request failed: %w", err)
}
bucketName, bucketPath := o.split()
chunkWriter := &objectChunkWriter{
chunkSize: int64(chunkSize),
size: size,
f: f,
bucket: &bucketName,
key: &bucketPath,
uploadID: &uploadID,
existingParts: existingParts,
ui: ui,
o: o,
}
info = fs.ChunkWriterInfo{
ChunkSize: int64(chunkSize),
Concurrency: o.fs.opt.UploadConcurrency,
LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
fs.Debugf(o, "open chunk writer: started multipart upload: %v", uploadID)
return info, chunkWriter, err
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error) {
if chunkNumber < 0 {
err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber)
return -1, err
}
// Only account after the checksum reads have been done
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
do.DelayAccounting(2)
}
m := md5.New()
currentChunkSize, err := io.Copy(m, reader)
if err != nil {
return -1, err
}
// If no data read, don't write the chunk
if currentChunkSize == 0 {
return 0, nil
}
md5sumBinary := m.Sum([]byte{})
w.addMd5(&md5sumBinary, int64(chunkNumber))
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
// Object storage requires 1 <= PartNumber <= 10000
ossPartNumber := chunkNumber + 1
if existing, ok := w.existingParts[ossPartNumber]; ok {
if md5sum == *existing.Md5 {
fs.Debugf(w.o, "matched uploaded part found, part num %d, skipping part, md5=%v", *existing.PartNumber, md5sum)
w.addCompletedPart(existing.PartNumber, existing.Etag)
return currentChunkSize, nil
}
}
req := objectstorage.UploadPartRequest{
NamespaceName: common.String(w.f.opt.Namespace),
BucketName: w.bucket,
ObjectName: w.key,
UploadId: w.uploadID,
UploadPartNum: common.Int(ossPartNumber),
ContentLength: common.Int64(currentChunkSize),
ContentMD5: common.String(md5sum),
}
w.o.applyPartUploadOptions(w.ui.req, &req)
var resp objectstorage.UploadPartResponse
err = w.f.pacer.Call(func() (bool, error) {
// req.UploadPartBody = io.NopCloser(bytes.NewReader(buf))
// rewind the reader on retry and after reading md5
_, err = reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
req.UploadPartBody = io.NopCloser(reader)
resp, err = w.f.srv.UploadPart(ctx, req)
if err != nil {
if ossPartNumber <= 8 {
return shouldRetry(ctx, resp.HTTPResponse(), err)
}
// retry all chunks once have done the first few
return true, err
}
return false, err
})
if err != nil {
fs.Errorf(w.o, "multipart upload failed to upload part:%d err: %v", ossPartNumber, err)
return -1, fmt.Errorf("multipart upload failed to upload part: %w", err)
}
w.addCompletedPart(&ossPartNumber, resp.ETag)
return currentChunkSize, err
}
// add a part number and etag to the completed parts
func (w *objectChunkWriter) addCompletedPart(partNum *int, eTag *string) {
w.partsToCommitMu.Lock()
defer w.partsToCommitMu.Unlock()
w.partsToCommit = append(w.partsToCommit, objectstorage.CommitMultipartUploadPartDetails{
PartNum: partNum,
Etag: eTag,
})
}
func (w *objectChunkWriter) Close(ctx context.Context) (err error) {
req := objectstorage.CommitMultipartUploadRequest{
NamespaceName: common.String(w.f.opt.Namespace),
BucketName: w.bucket,
ObjectName: w.key,
UploadId: w.uploadID,
}
req.PartsToCommit = w.partsToCommit
var resp objectstorage.CommitMultipartUploadResponse
err = w.f.pacer.Call(func() (bool, error) {
resp, err = w.f.srv.CommitMultipartUpload(ctx, req)
// if multipart is corrupted, we will abort the uploadId
if isMultiPartUploadCorrupted(err) {
fs.Debugf(w.o, "multipart uploadId %v is corrupted, aborting...", *w.uploadID)
_ = w.Abort(ctx)
return false, err
}
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return err
}
w.eTag = *resp.ETag
hashOfHashes := md5.Sum(w.md5s)
wantMultipartMd5 := fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hashOfHashes[:]), len(w.partsToCommit))
gotMultipartMd5 := *resp.OpcMultipartMd5
if wantMultipartMd5 != gotMultipartMd5 {
fs.Errorf(w.o, "multipart upload corrupted: multipart md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5)
return fmt.Errorf("multipart upload corrupted: md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5)
}
fs.Debugf(w.o, "multipart upload %v md5 matched: expecting %s and got %s", *w.uploadID, wantMultipartMd5, gotMultipartMd5)
return nil
}
func isMultiPartUploadCorrupted(err error) bool {
if err == nil {
return false
}
// Check if this oci-err object, and if it is multipart commit error
if ociError, ok := err.(common.ServiceError); ok {
// If it is a timeout then we want to retry that
if ociError.GetCode() == "InvalidUploadPart" {
return true
}
}
return false
}
func (w *objectChunkWriter) Abort(ctx context.Context) error {
fs.Debugf(w.o, "Cancelling multipart upload")
err := w.o.fs.abortMultiPartUpload(
ctx,
w.bucket,
w.key,
w.uploadID)
if err != nil {
fs.Debugf(w.o, "Failed to cancel multipart upload: %v", err)
} else {
fs.Debugf(w.o, "canceled and aborted multipart upload: %v", *w.uploadID)
}
return err
}
// addMd5 adds a binary md5 to the md5 calculated so far
func (w *objectChunkWriter) addMd5(md5binary *[]byte, chunkNumber int64) {
w.md5sMu.Lock()
defer w.md5sMu.Unlock()
start := chunkNumber * md5.Size
end := start + md5.Size
if extend := end - int64(len(w.md5s)); extend > 0 {
w.md5s = append(w.md5s, make([]byte, extend)...)
}
copy(w.md5s[start:end], (*md5binary)[:])
}
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
bucket, bucketPath := o.split()
ui.req = &objectstorage.PutObjectRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucket),
ObjectName: common.String(bucketPath),
}
// Set the mtime in the metadata
modTime := src.ModTime(ctx)
// Fetch metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
if err != nil {
return ui, fmt.Errorf("failed to read metadata from source object: %w", err)
}
ui.req.OpcMeta = make(map[string]string, len(meta)+2)
// merge metadata into request and user metadata
for k, v := range meta {
pv := common.String(v)
k = strings.ToLower(k)
switch k {
case "cache-control":
ui.req.CacheControl = pv
case "content-disposition":
ui.req.ContentDisposition = pv
case "content-encoding":
ui.req.ContentEncoding = pv
case "content-language":
ui.req.ContentLanguage = pv
case "content-type":
ui.req.ContentType = pv
case "tier":
// ignore
case "mtime":
// mtime in meta overrides source ModTime
metaModTime, err := time.Parse(time.RFC3339Nano, v)
if err != nil {
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
} else {
modTime = metaModTime
}
case "btime":
// write as metadata since we can't set it
ui.req.OpcMeta[k] = v
default:
ui.req.OpcMeta[k] = v
}
}
// Set the mtime in the metadata
ui.req.OpcMeta[metaMtime] = swift.TimeToFloatString(modTime)
// read the md5sum if available
// - for non-multipart
// - so we can add a ContentMD5
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
// - for multipart provided checksums aren't disabled
// - so we can add the md5sum in the metadata as metaMD5Hash
size := src.Size()
isMultipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
var md5sumBase64 string
if !isMultipart || !o.fs.opt.DisableChecksum {
ui.md5sumHex, err = src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(ui.md5sumHex) {
hashBytes, err := hex.DecodeString(ui.md5sumHex)
if err == nil {
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
if isMultipart && !o.fs.opt.DisableChecksum {
// Set the md5sum as metadata on the object if
// - a multipart upload
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
// provided checksums aren't disabled
ui.req.OpcMeta[metaMD5Hash] = md5sumBase64
}
}
}
}
// Set the content type if it isn't set already
if ui.req.ContentType == nil {
ui.req.ContentType = common.String(fs.MimeType(ctx, src))
}
if size >= 0 {
ui.req.ContentLength = common.Int64(size)
}
if md5sumBase64 != "" {
ui.req.ContentMD5 = &md5sumBase64
}
o.applyPutOptions(ui.req, options...)
useBYOKPutObject(o.fs, ui.req)
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return ui, fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
ui.req.StorageTier = storageTier
}
// Check metadata keys and values are valid
for key, value := range ui.req.OpcMeta {
if !httpguts.ValidHeaderFieldName(key) {
fs.Errorf(o, "Dropping invalid metadata key %q", key)
delete(ui.req.OpcMeta, key)
} else if value == "" {
fs.Errorf(o, "Dropping nil metadata value for key %q", key)
delete(ui.req.OpcMeta, key)
} else if !httpguts.ValidHeaderFieldValue(value) {
fs.Errorf(o, "Dropping invalid metadata value %q for key %q", value, key)
delete(ui.req.OpcMeta, key)
}
}
return ui, nil
}
func (o *Object) createMultipartUpload(ctx context.Context, putReq *objectstorage.PutObjectRequest) (
uploadID string, existingParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
bucketName, bucketPath := o.split()
f := o.fs
if f.opt.AttemptResumeUpload {
fs.Debugf(o, "attempting to resume upload for %v (if any)", o.remote)
resumeUploads, err := o.fs.findLatestMultipartUpload(ctx, bucketName, bucketPath)
if err == nil && len(resumeUploads) > 0 {
uploadID = *resumeUploads[0].UploadId
existingParts, err = f.listMultipartUploadParts(ctx, bucketName, bucketPath, uploadID)
if err == nil {
fs.Debugf(o, "resuming with existing upload id: %v", uploadID)
return uploadID, existingParts, err
}
}
}
req := objectstorage.CreateMultipartUploadRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
}
req.Object = common.String(bucketPath)
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return "", nil, fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
req.StorageTier = storageTier
}
o.applyMultipartUploadOptions(putReq, &req)
var resp objectstorage.CreateMultipartUploadResponse
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CreateMultipartUpload(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return "", existingParts, err
}
existingParts = make(map[int]objectstorage.MultipartUploadPartSummary)
uploadID = *resp.UploadId
fs.Debugf(o, "created new upload id: %v", uploadID)
return uploadID, existingParts, err
}

View File

@@ -4,14 +4,12 @@
package oracleobjectstorage package oracleobjectstorage
import ( import (
"bytes"
"context" "context"
"encoding/base64" "encoding/base64"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"os"
"regexp" "regexp"
"strconv" "strconv"
"strings" "strings"
@@ -20,8 +18,10 @@ import (
"github.com/ncw/swift/v2" "github.com/ncw/swift/v2"
"github.com/oracle/oci-go-sdk/v65/common" "github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage" "github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
) )
// ------------------------------------------------------------ // ------------------------------------------------------------
@@ -367,28 +367,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
return resp.HTTPResponse().Body, nil return resp.HTTPResponse().Body, nil
} }
func isZeroLength(streamReader io.Reader) bool {
switch v := streamReader.(type) {
case *bytes.Buffer:
return v.Len() == 0
case *bytes.Reader:
return v.Len() == 0
case *strings.Reader:
return v.Len() == 0
case *os.File:
fi, err := v.Stat()
if err != nil {
return false
}
return fi.Size() == 0
default:
return false
}
}
// Update an object if it has changed // Update an object if it has changed
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
bucketName, _ := o.split() bucketName, bucketPath := o.split()
err = o.fs.makeBucket(ctx, bucketName) err = o.fs.makeBucket(ctx, bucketName)
if err != nil { if err != nil {
return err return err
@@ -396,24 +377,142 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// determine if we like upload single or multipart. // determine if we like upload single or multipart.
size := src.Size() size := src.Size()
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff) multipart := size >= int64(o.fs.opt.UploadCutoff)
if isZeroLength(in) {
multipart = false // Set the mtime in the metadata
modTime := src.ModTime(ctx)
metadata := map[string]string{
metaMtime: swift.TimeToFloatString(modTime),
} }
// read the md5sum if available
// - for non-multipart
// - so we can add a ContentMD5
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
// - for multipart provided checksums aren't disabled
// - so we can add the md5sum in the metadata as metaMD5Hash
var md5sumBase64 string
var md5sumHex string
if !multipart || !o.fs.opt.DisableChecksum {
md5sumHex, err = src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(md5sumHex) {
hashBytes, err := hex.DecodeString(md5sumHex)
if err == nil {
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
if multipart && !o.fs.opt.DisableChecksum {
// Set the md5sum as metadata on the object if
// - a multipart upload
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
// provided checksums aren't disabled
metadata[metaMD5Hash] = md5sumBase64
}
}
}
}
// Guess the content type
mimeType := fs.MimeType(ctx, src)
if multipart { if multipart {
err = o.uploadMultipart(ctx, src, in, options...) chunkSize := int64(o.fs.opt.ChunkSize)
uploadRequest := transfer.UploadRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
ContentType: common.String(mimeType),
PartSize: common.Int64(chunkSize),
AllowMultipartUploads: common.Bool(true),
AllowParrallelUploads: common.Bool(true),
ObjectStorageClient: o.fs.srv,
EnableMultipartChecksumVerification: common.Bool(!o.fs.opt.DisableChecksum),
NumberOfGoroutines: common.Int(o.fs.opt.UploadConcurrency),
Metadata: metadataWithOpcPrefix(metadata),
}
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
uploadRequest.StorageTier = storageTier
}
o.applyMultiPutOptions(&uploadRequest, options...)
useBYOKUpload(o.fs, &uploadRequest)
uploadStreamRequest := transfer.UploadStreamRequest{
UploadRequest: uploadRequest,
StreamReader: in,
}
uploadMgr := transfer.NewUploadManager()
var uploadID = ""
defer atexit.OnError(&err, func() {
if uploadID == "" {
return
}
if o.fs.opt.LeavePartsOnError {
return
}
fs.Debugf(o, "Cancelling multipart upload")
errCancel := o.fs.abortMultiPartUpload(
context.Background(),
bucketName,
bucketPath,
uploadID)
if errCancel != nil {
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
}
})()
err = o.fs.pacer.Call(func() (bool, error) {
uploadResponse, err := uploadMgr.UploadStream(ctx, uploadStreamRequest)
var httpResponse *http.Response
if err == nil {
if uploadResponse.Type == transfer.MultipartUpload {
if uploadResponse.MultipartUploadResponse != nil {
httpResponse = uploadResponse.MultipartUploadResponse.HTTPResponse()
}
} else {
if uploadResponse.SinglepartUploadResponse != nil {
httpResponse = uploadResponse.SinglepartUploadResponse.HTTPResponse()
}
}
}
if err != nil {
uploadID := ""
if uploadResponse.MultipartUploadResponse != nil && uploadResponse.MultipartUploadResponse.UploadID != nil {
uploadID = *uploadResponse.MultipartUploadResponse.UploadID
fs.Debugf(o, "multipart streaming upload failed, aborting uploadID: %v, may retry", uploadID)
_ = o.fs.abortMultiPartUpload(ctx, bucketName, bucketPath, uploadID)
}
}
return shouldRetry(ctx, httpResponse, err)
})
if err != nil { if err != nil {
fs.Errorf(o, "multipart streaming upload failed %v", err)
return err return err
} }
} else { } else {
ui, err := o.prepareUpload(ctx, src, options) req := objectstorage.PutObjectRequest{
if err != nil { NamespaceName: common.String(o.fs.opt.Namespace),
return fmt.Errorf("failed to prepare upload: %w", err) BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
ContentType: common.String(mimeType),
PutObjectBody: io.NopCloser(in),
OpcMeta: metadata,
} }
if size >= 0 {
req.ContentLength = common.Int64(size)
}
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
req.StorageTier = storageTier
}
o.applyPutOptions(&req, options...)
useBYOKPutObject(o.fs, &req)
var resp objectstorage.PutObjectResponse var resp objectstorage.PutObjectResponse
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
ui.req.PutObjectBody = io.NopCloser(in) resp, err = o.fs.srv.PutObject(ctx, req)
resp, err = o.fs.srv.PutObject(ctx, *ui.req)
return shouldRetry(ctx, resp.HTTPResponse(), err) return shouldRetry(ctx, resp.HTTPResponse(), err)
}) })
if err != nil { if err != nil {
@@ -492,24 +591,28 @@ func (o *Object) applyGetObjectOptions(req *objectstorage.GetObjectRequest, opti
} }
} }
func (o *Object) applyMultipartUploadOptions(putReq *objectstorage.PutObjectRequest, req *objectstorage.CreateMultipartUploadRequest) { func (o *Object) applyMultiPutOptions(req *transfer.UploadRequest, options ...fs.OpenOption) {
req.ContentType = putReq.ContentType // Apply upload options
req.ContentLanguage = putReq.ContentLanguage for _, option := range options {
req.ContentEncoding = putReq.ContentEncoding key, value := option.Header()
req.ContentDisposition = putReq.ContentDisposition lowerKey := strings.ToLower(key)
req.CacheControl = putReq.CacheControl switch lowerKey {
req.Metadata = metadataWithOpcPrefix(putReq.OpcMeta) case "":
req.OpcSseCustomerAlgorithm = putReq.OpcSseCustomerAlgorithm // ignore
req.OpcSseCustomerKey = putReq.OpcSseCustomerKey case "content-encoding":
req.OpcSseCustomerKeySha256 = putReq.OpcSseCustomerKeySha256 req.ContentEncoding = common.String(value)
req.OpcSseKmsKeyId = putReq.OpcSseKmsKeyId case "content-language":
} req.ContentLanguage = common.String(value)
case "content-type":
func (o *Object) applyPartUploadOptions(putReq *objectstorage.PutObjectRequest, req *objectstorage.UploadPartRequest) { req.ContentType = common.String(value)
req.OpcSseCustomerAlgorithm = putReq.OpcSseCustomerAlgorithm default:
req.OpcSseCustomerKey = putReq.OpcSseCustomerKey if strings.HasPrefix(lowerKey, ociMetaPrefix) {
req.OpcSseCustomerKeySha256 = putReq.OpcSseCustomerKeySha256 req.Metadata[lowerKey] = value
req.OpcSseKmsKeyId = putReq.OpcSseKmsKeyId } else {
fs.Errorf(o, "Don't know how to set key %q on upload", key)
}
}
}
} }
func metadataWithOpcPrefix(src map[string]string) map[string]string { func metadataWithOpcPrefix(src map[string]string) map[string]string {

View File

@@ -13,10 +13,9 @@ import (
const ( const (
maxSizeForCopy = 4768 * 1024 * 1024 maxSizeForCopy = 4768 * 1024 * 1024
maxUploadParts = 10000 minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
defaultUploadConcurrency = 10
minChunkSize = fs.SizeSuffix(5 * 1024 * 1024)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024) defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
defaultUploadConcurrency = 10
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024) maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
minSleep = 10 * time.Millisecond minSleep = 10 * time.Millisecond
defaultCopyTimeoutDuration = fs.Duration(time.Minute) defaultCopyTimeoutDuration = fs.Duration(time.Minute)
@@ -56,14 +55,12 @@ type Options struct {
ConfigProfile string `config:"config_profile"` ConfigProfile string `config:"config_profile"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
MaxUploadParts int `config:"max_upload_parts"`
UploadConcurrency int `config:"upload_concurrency"` UploadConcurrency int `config:"upload_concurrency"`
DisableChecksum bool `config:"disable_checksum"` DisableChecksum bool `config:"disable_checksum"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"` CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
CopyTimeout fs.Duration `config:"copy_timeout"` CopyTimeout fs.Duration `config:"copy_timeout"`
StorageTier string `config:"storage_tier"` StorageTier string `config:"storage_tier"`
LeavePartsOnError bool `config:"leave_parts_on_error"` LeavePartsOnError bool `config:"leave_parts_on_error"`
AttemptResumeUpload bool `config:"attempt_resume_upload"`
NoCheckBucket bool `config:"no_check_bucket"` NoCheckBucket bool `config:"no_check_bucket"`
SSEKMSKeyID string `config:"sse_kms_key_id"` SSEKMSKeyID string `config:"sse_kms_key_id"`
SSECustomerAlgorithm string `config:"sse_customer_algorithm"` SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
@@ -95,16 +92,14 @@ func newOptions() []fs.Option {
Help: noAuthHelpText, Help: noAuthHelpText,
}}, }},
}, { }, {
Name: "namespace", Name: "namespace",
Help: "Object storage namespace", Help: "Object storage namespace",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "compartment", Name: "compartment",
Help: "Object storage compartment OCID", Help: "Object storage compartment OCID",
Provider: "!no_auth", Provider: "!no_auth",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "region", Name: "region",
Help: "Object storage Region", Help: "Object storage Region",
@@ -160,8 +155,9 @@ The minimum is 0 and the maximum is 5 GiB.`,
Help: `Chunk size to use for uploading. Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff or files with unknown When uploading files larger than upload_cutoff or files with unknown
size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google
as multipart uploads using this chunk size. photos or google docs) they will be uploaded as multipart uploads
using this chunk size.
Note that "upload_concurrency" chunks of this size are buffered Note that "upload_concurrency" chunks of this size are buffered
in memory per transfer. in memory per transfer.
@@ -183,20 +179,6 @@ statistics displayed with "-P" flag.
`, `,
Default: minChunkSize, Default: minChunkSize,
Advanced: true, Advanced: true,
}, {
Name: "max_upload_parts",
Help: `Maximum number of parts in a multipart upload.
This option defines the maximum number of multipart chunks to use
when doing a multipart upload.
OCI has max parts limit of 10,000 chunks.
Rclone will automatically increase the chunk size when uploading a
large file of a known size to stay below this number of chunks limit.
`,
Default: maxUploadParts,
Advanced: true,
}, { }, {
Name: "upload_concurrency", Name: "upload_concurrency",
Help: `Concurrency for multipart uploads. Help: `Concurrency for multipart uploads.
@@ -254,24 +236,12 @@ to start uploading.`,
encoder.EncodeDot, encoder.EncodeDot,
}, { }, {
Name: "leave_parts_on_error", Name: "leave_parts_on_error",
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
It should be set to true for resuming uploads across different sessions. It should be set to true for resuming uploads across different sessions.
WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add
additional costs if not cleaned up. additional costs if not cleaned up.
`,
Default: false,
Advanced: true,
}, {
Name: "attempt_resume_upload",
Help: `If true attempt to resume previously started multipart upload for the object.
This will be helpful to speed up multipart transfers by resuming uploads from past session.
WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is
aborted and a new multipart upload is started with the new chunk size.
The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully.
`, `,
Default: false, Default: false,
Advanced: true, Advanced: true,

View File

@@ -138,14 +138,6 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
return return
} }
func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs
}
return
}
// ------------------------------------------------------------ // ------------------------------------------------------------
// Implement backed that represents a remote object storage server // Implement backed that represents a remote object storage server
// Fs is the interface a cloud storage system must provide // Fs is the interface a cloud storage system must provide
@@ -326,6 +318,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
remote := *object.Name remote := *object.Name
remote = f.opt.Enc.ToStandardPath(remote) remote = f.opt.Enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) { if !strings.HasPrefix(remote, prefix) {
// fs.Debugf(f, "Odd name received %v", object.Name)
continue continue
} }
remote = remote[len(prefix):] remote = remote[len(prefix):]
@@ -565,15 +558,15 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
}) })
} }
func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID *string) (err error) { func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID string) (err error) {
if uploadID == nil || *uploadID == "" { if uploadID == "" {
return nil return nil
} }
request := objectstorage.AbortMultipartUploadRequest{ request := objectstorage.AbortMultipartUploadRequest{
NamespaceName: common.String(f.opt.Namespace), NamespaceName: common.String(f.opt.Namespace),
BucketName: bucketName, BucketName: common.String(bucketName),
ObjectName: bucketPath, ObjectName: common.String(bucketPath),
UploadId: uploadID, UploadId: common.String(uploadID),
} }
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.AbortMultipartUpload(ctx, request) resp, err := f.srv.AbortMultipartUpload(ctx, request)
@@ -596,7 +589,7 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Durat
if operations.SkipDestructive(ctx, what, "remove pending upload") { if operations.SkipDestructive(ctx, what, "remove pending upload") {
continue continue
} }
_ = f.abortMultiPartUpload(ctx, upload.Bucket, upload.Object, upload.UploadId) _ = f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
} }
} else { } else {
fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.") fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.")
@@ -691,13 +684,12 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = &Fs{} _ fs.Fs = &Fs{}
_ fs.Copier = &Fs{} _ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{} _ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{} _ fs.ListRer = &Fs{}
_ fs.Commander = &Fs{} _ fs.Commander = &Fs{}
_ fs.CleanUpper = &Fs{} _ fs.CleanUpper = &Fs{}
_ fs.OpenChunkWriter = &Fs{}
_ fs.Object = &Object{} _ fs.Object = &Object{}
_ fs.MimeTyper = &Object{} _ fs.MimeTyper = &Object{}

View File

@@ -30,12 +30,4 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs) return f.setUploadCutoff(cs)
} }
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
return f.setCopyCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
_ fstests.SetCopyCutoffer = (*Fs)(nil)
)

View File

@@ -110,11 +110,10 @@ func init() {
encoder.EncodeBackSlash | encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8), encoder.EncodeInvalidUtf8),
}, { }, {
Name: "root_folder_id", Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.", Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "d0", Default: "d0",
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "hostname", Name: "hostname",
Help: `Hostname to connect to. Help: `Hostname to connect to.
@@ -139,8 +138,7 @@ with rclone authorize.
This is only required when you want to use the cleanup command. Due to a bug This is only required when you want to use the cleanup command. Due to a bug
in the pcloud API the required API does not support OAuth authentication so in the pcloud API the required API does not support OAuth authentication so
we have to rely on user password authentication for it.`, we have to rely on user password authentication for it.`,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "password", Name: "password",
Help: "Your pcloud password.", Help: "Your pcloud password.",

View File

@@ -158,10 +158,9 @@ func init() {
return nil, fmt.Errorf("unknown state %q", config.State) return nil, fmt.Errorf("unknown state %q", config.State)
}, },
Options: append(pikpakOAuthOptions(), []fs.Option{{ Options: append(pikpakOAuthOptions(), []fs.Option{{
Name: "user", Name: "user",
Help: "Pikpak username.", Help: "Pikpak username.",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "pass", Name: "pass",
Help: "Pikpak password.", Help: "Pikpak password.",
@@ -174,8 +173,7 @@ Leave blank normally.
Fill in for rclone to use a non root folder as its starting point. Fill in for rclone to use a non root folder as its starting point.
`, `,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "use_trash", Name: "use_trash",
Default: true, Default: true,
@@ -1215,7 +1213,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, sha1Str stri
return nil, fmt.Errorf("failed to upload: %w", err) return nil, fmt.Errorf("failed to upload: %w", err)
} }
// refresh uploaded file info // refresh uploaded file info
// Compared to `newfile.File` this upgrades several fields... // Compared to `newfile.File` this upgrades several feilds...
// audit, links, modified_time, phase, revision, and web_content_link // audit, links, modified_time, phase, revision, and web_content_link
return f.getFile(ctx, newfile.File.ID) return f.getFile(ctx, newfile.File.ID)
} }

View File

@@ -82,15 +82,14 @@ func init() {
OAuth2Config: oauthConfig, OAuth2Config: oauthConfig,
}) })
}, },
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: []fs.Option{{
Name: "api_key", Name: "api_key",
Help: `API Key. Help: `API Key.
This is not normally used - use oauth instead. This is not normally used - use oauth instead.
`, `,
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
Default: "", Default: "",
Sensitive: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -100,7 +99,7 @@ This is not normally used - use oauth instead.
encoder.EncodeBackSlash | encoder.EncodeBackSlash |
encoder.EncodeDoubleQuote | encoder.EncodeDoubleQuote |
encoder.EncodeInvalidUtf8), encoder.EncodeInvalidUtf8),
}}...), }},
}) })
} }

File diff suppressed because it is too large Load Diff

View File

@@ -1,16 +0,0 @@
package protondrive_test
import (
"testing"
"github.com/rclone/rclone/backend/protondrive"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestProtonDrive:",
NilObject: (*protondrive.Object)(nil),
})
}

View File

@@ -67,7 +67,7 @@ func init() {
NoOffline: true, NoOffline: true,
}) })
}, },
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: []fs.Option{{
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
Advanced: true, Advanced: true,
@@ -77,7 +77,7 @@ func init() {
Default: (encoder.Display | Default: (encoder.Display |
encoder.EncodeBackSlash | encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8), encoder.EncodeInvalidUtf8),
}}...), }},
}) })
} }

View File

@@ -49,13 +49,11 @@ func init() {
Help: "Get QingStor credentials from the environment (env vars or IAM).", Help: "Get QingStor credentials from the environment (env vars or IAM).",
}}, }},
}, { }, {
Name: "access_key_id", Name: "access_key_id",
Help: "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.", Help: "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
Sensitive: true,
}, { }, {
Name: "secret_access_key", Name: "secret_access_key",
Help: "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.", Help: "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
Sensitive: true,
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Enter an endpoint URL to connection QingStor API.\n\nLeave blank will use the default value \"https://qingstor.com:443\".", Help: "Enter an endpoint URL to connection QingStor API.\n\nLeave blank will use the default value \"https://qingstor.com:443\".",

View File

@@ -1,182 +0,0 @@
// Package api provides types used by the Quatrix API.
package api
import (
"strconv"
"time"
)
// OverwriteMode is a conflict resolve mode during copy or move. Files with conflicting names will be overwritten
const OverwriteMode = "overwrite"
// ProfileInfo is a profile info about quota
type ProfileInfo struct {
UserUsed int64 `json:"user_used"`
UserLimit int64 `json:"user_limit"`
AccUsed int64 `json:"acc_used"`
AccLimit int64 `json:"acc_limit"`
}
// IDList is a general object that contains list of ids
type IDList struct {
IDs []string `json:"ids"`
}
// DeleteParams is the request to delete object
type DeleteParams struct {
IDs []string `json:"ids"`
DeletePermanently bool `json:"delete_permanently"`
}
// FileInfoParams is the request to get object's (file or directory) info
type FileInfoParams struct {
ParentID string `json:"parent_id,omitempty"`
Path string `json:"path"`
}
// FileInfo is the response to get object's (file or directory) info
type FileInfo struct {
FileID string `json:"file_id"`
ParentID string `json:"parent_id"`
Src string `json:"src"`
Type string `json:"type"`
}
// IsFile returns true if object is a file
// false otherwise
func (fi *FileInfo) IsFile() bool {
if fi == nil {
return false
}
return fi.Type == "F"
}
// IsDir returns true if object is a directory
// false otherwise
func (fi *FileInfo) IsDir() bool {
if fi == nil {
return false
}
return fi.Type == "D" || fi.Type == "S" || fi.Type == "T"
}
// CreateDirParams is the request to create a directory
type CreateDirParams struct {
Target string `json:"target,omitempty"`
Name string `json:"name"`
Resolve bool `json:"resolve"`
}
// File represent metadata about object in Quatrix (file or directory)
type File struct {
ID string `json:"id"`
Created JSONTime `json:"created"`
Modified JSONTime `json:"modified"`
Name string `json:"name"`
ParentID string `json:"parent_id"`
Size int64 `json:"size"`
ModifiedMS JSONTime `json:"modified_ms"`
Type string `json:"type"`
Operations int `json:"operations"`
SubType string `json:"sub_type"`
Content []File `json:"content"`
}
// IsFile returns true if object is a file
// false otherwise
func (f *File) IsFile() bool {
if f == nil {
return false
}
return f.Type == "F"
}
// IsDir returns true if object is a directory
// false otherwise
func (f *File) IsDir() bool {
if f == nil {
return false
}
return f.Type == "D" || f.Type == "S" || f.Type == "T"
}
// SetMTimeParams is the request to set modification time for object
type SetMTimeParams struct {
ID string `json:"id,omitempty"`
MTime JSONTime `json:"mtime"`
}
// JSONTime provides methods to marshal/unmarshal time.Time as Unix time
type JSONTime time.Time
// MarshalJSON returns time representation in Unix time
func (u JSONTime) MarshalJSON() ([]byte, error) {
return []byte(strconv.FormatFloat(float64(time.Time(u).UTC().UnixNano())/1e9, 'f', 6, 64)), nil
}
// UnmarshalJSON sets time from Unix time representation
func (u *JSONTime) UnmarshalJSON(data []byte) error {
f, err := strconv.ParseFloat(string(data), 64)
if err != nil {
return err
}
t := JSONTime(time.Unix(0, int64(f*1e9)))
*u = t
return nil
}
// String returns Unix time representation of time as string
func (u JSONTime) String() string {
return strconv.FormatInt(time.Time(u).UTC().Unix(), 10)
}
// DownloadLinkResponse is the response to download-link request
type DownloadLinkResponse struct {
ID string `json:"id"`
}
// UploadLinkParams is the request to get upload-link
type UploadLinkParams struct {
Name string `json:"name"`
ParentID string `json:"parent_id"`
Resolve bool `json:"resolve"`
}
// UploadLinkResponse is the response to upload-link request
type UploadLinkResponse struct {
Name string `json:"name"`
FileID string `json:"file_id"`
ParentID string `json:"parent_id"`
UploadKey string `json:"upload_key"`
}
// UploadFinalizeResponse is the response to finalize file method
type UploadFinalizeResponse struct {
FileID string `json:"id"`
ParentID string `json:"parent_id"`
Modified int64 `json:"modified"`
FileSize int64 `json:"size"`
}
// FileModifyParams is the request to get modify file link
type FileModifyParams struct {
ID string `json:"id"`
Truncate int64 `json:"truncate"`
}
// FileCopyMoveOneParams is the request to do server-side copy and move
// can be used for file or directory
type FileCopyMoveOneParams struct {
ID string `json:"file_id"`
Target string `json:"target_id"`
Name string `json:"name"`
MTime JSONTime `json:"mtime"`
Resolve bool `json:"resolve"`
ResolveMode string `json:"resolve_mode"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
// Test Quatrix filesystem interface
package quatrix_test
import (
"testing"
"github.com/rclone/rclone/backend/quatrix"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestQuatrix:",
NilObject: (*quatrix.Object)(nil),
})
}

View File

@@ -1,108 +0,0 @@
package quatrix
import (
"sync"
"time"
"github.com/rclone/rclone/fs"
)
// UploadMemoryManager dynamically calculates every chunk size for the transfer and increases or decreases it
// depending on the upload speed. This makes general upload time smaller, because transfers that are faster
// does not have to wait for the slower ones until they finish upload.
type UploadMemoryManager struct {
m sync.Mutex
useDynamicSize bool
shared int64
reserved int64
effectiveTime time.Duration
fileUsage map[string]int64
}
// NewUploadMemoryManager is a constructor for UploadMemoryManager
func NewUploadMemoryManager(ci *fs.ConfigInfo, opt *Options) *UploadMemoryManager {
useDynamicSize := true
sharedMemory := int64(opt.MaximalSummaryChunkSize) - int64(opt.MinimalChunkSize)*int64(ci.Transfers)
if sharedMemory <= 0 {
sharedMemory = 0
useDynamicSize = false
}
return &UploadMemoryManager{
useDynamicSize: useDynamicSize,
shared: sharedMemory,
reserved: int64(opt.MinimalChunkSize),
effectiveTime: time.Duration(opt.EffectiveUploadTime),
fileUsage: map[string]int64{},
}
}
// Consume -- decide amount of memory to consume
func (u *UploadMemoryManager) Consume(fileID string, neededMemory int64, speed float64) int64 {
if !u.useDynamicSize {
if neededMemory < u.reserved {
return neededMemory
}
return u.reserved
}
u.m.Lock()
defer u.m.Unlock()
borrowed, found := u.fileUsage[fileID]
if found {
u.shared += borrowed
borrowed = 0
}
defer func() { u.fileUsage[fileID] = borrowed }()
effectiveChunkSize := int64(speed * u.effectiveTime.Seconds())
if effectiveChunkSize < u.reserved {
effectiveChunkSize = u.reserved
}
if neededMemory < effectiveChunkSize {
effectiveChunkSize = neededMemory
}
if effectiveChunkSize <= u.reserved {
return effectiveChunkSize
}
toBorrow := effectiveChunkSize - u.reserved
if toBorrow <= u.shared {
u.shared -= toBorrow
borrowed = toBorrow
return effectiveChunkSize
}
borrowed = u.shared
u.shared = 0
return borrowed + u.reserved
}
// Return returns consumed memory for the previous chunk upload to the memory pool
func (u *UploadMemoryManager) Return(fileID string) {
if !u.useDynamicSize {
return
}
u.m.Lock()
defer u.m.Unlock()
borrowed, found := u.fileUsage[fileID]
if !found {
return
}
u.shared += borrowed
delete(u.fileUsage, fileID)
}

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,6 @@ import (
"time" "time"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/cache"
@@ -394,41 +393,6 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
} }
}) })
t.Run("Mkdir", func(t *testing.T) {
// Test what happens when we create a bucket we already own and see whether the
// quirk is set correctly
req := s3.CreateBucketInput{
Bucket: &f.rootBucket,
ACL: stringPointerOrNil(f.opt.BucketACL),
}
if f.opt.LocationConstraint != "" {
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
LocationConstraint: &f.opt.LocationConstraint,
}
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.CreateBucketWithContext(ctx, &req)
return f.shouldRetry(ctx, err)
})
var errString string
if err == nil {
errString = "No Error"
} else if awsErr, ok := err.(awserr.Error); ok {
errString = awsErr.Code()
} else {
assert.Fail(t, "Unknown error %T %v", err, err)
}
t.Logf("Creating a bucket we already have created returned code: %s", errString)
switch errString {
case "BucketAlreadyExists":
assert.False(t, f.opt.UseAlreadyExists.Value, "Need to clear UseAlreadyExists quirk")
case "No Error", "BucketAlreadyOwnedByYou":
assert.True(t, f.opt.UseAlreadyExists.Value, "Need to set UseAlreadyExists quirk")
default:
assert.Fail(t, "Unknown error string %q", errString)
}
})
t.Run("Cleanup", func(t *testing.T) { t.Run("Cleanup", func(t *testing.T) {
require.NoError(t, f.CleanUpHidden(ctx)) require.NoError(t, f.CleanUpHidden(ctx))
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...) items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)

View File

@@ -47,12 +47,4 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs) return f.setUploadCutoff(cs)
} }
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
return f.setCopyCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
_ fstests.SetCopyCutoffer = (*Fs)(nil)
)

Some files were not shown because too many files have changed in this diff Show More