mirror of
https://github.com/rclone/rclone.git
synced 2026-01-01 16:13:35 +00:00
Compare commits
62 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a80287effd | ||
|
|
4216d55a05 | ||
|
|
ba6730720d | ||
|
|
7735b5c694 | ||
|
|
d45b3479ee | ||
|
|
4c5df0a765 | ||
|
|
8c61a09be2 | ||
|
|
c217145cae | ||
|
|
4c93378f0e | ||
|
|
f9e54f96c3 | ||
|
|
af0fcd03cb | ||
|
|
00aafc957e | ||
|
|
29abbd2032 | ||
|
|
663b2d9c46 | ||
|
|
f36d6d01b5 | ||
|
|
0c03aa3a8b | ||
|
|
caa2b8bf40 | ||
|
|
421e840e37 | ||
|
|
9b57d27be4 | ||
|
|
627ac1b2d9 | ||
|
|
ae395d8cf0 | ||
|
|
f04520a6e3 | ||
|
|
c968c3e41c | ||
|
|
3661791e82 | ||
|
|
4198763c35 | ||
|
|
3de47b8ed4 | ||
|
|
71b8e1e80b | ||
|
|
7366e97dfc | ||
|
|
21ba4d9a18 | ||
|
|
96e099d8e7 | ||
|
|
2a31b5bdd6 | ||
|
|
9bdfe4c36f | ||
|
|
e3a2f539fe | ||
|
|
ffa943e31f | ||
|
|
b16f603c51 | ||
|
|
a7a8372976 | ||
|
|
9beb0677e4 | ||
|
|
e43b5ce5e5 | ||
|
|
97328e5755 | ||
|
|
7b7d780fff | ||
|
|
c2600f9e4d | ||
|
|
7bd853ce35 | ||
|
|
05150cfb1d | ||
|
|
25366268fe | ||
|
|
c08d48a50d | ||
|
|
454574e2cc | ||
|
|
9218a3eb00 | ||
|
|
1e4ef4b4d5 | ||
|
|
8d92f7d697 | ||
|
|
fd56abc5f2 | ||
|
|
b323bf34e2 | ||
|
|
e78e73eae7 | ||
|
|
f51a5eca2e | ||
|
|
39e2af7974 | ||
|
|
b3217adf08 | ||
|
|
074234119a | ||
|
|
6210e22ab5 | ||
|
|
940e99a929 | ||
|
|
79b6866b57 | ||
|
|
c142e3edcc | ||
|
|
5c646dff9a | ||
|
|
19dfaf7440 |
2
.github/ISSUE_TEMPLATE/Bug.md
vendored
2
.github/ISSUE_TEMPLATE/Bug.md
vendored
@@ -9,7 +9,7 @@ We understand you are having a problem with rclone; we want to help you with tha
|
|||||||
|
|
||||||
**STOP and READ**
|
**STOP and READ**
|
||||||
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
|
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
|
||||||
Please show the effort you've put in to solving the problem and please be specific.
|
Please show the effort you've put into solving the problem and please be specific.
|
||||||
People are volunteering their time to help! Low effort posts are not likely to get good answers!
|
People are volunteering their time to help! Low effort posts are not likely to get good answers!
|
||||||
|
|
||||||
If you think you might have found a bug, try to replicate it with the latest beta (or stable).
|
If you think you might have found a bug, try to replicate it with the latest beta (or stable).
|
||||||
|
|||||||
8
.github/workflows/build.yml
vendored
8
.github/workflows/build.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.14', 'go1.15', 'go1.16']
|
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.15', 'go1.16']
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- job_name: linux
|
- job_name: linux
|
||||||
@@ -83,12 +83,6 @@ jobs:
|
|||||||
compile_all: true
|
compile_all: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: go1.14
|
|
||||||
os: ubuntu-latest
|
|
||||||
go: '1.14.x'
|
|
||||||
quicktest: true
|
|
||||||
racequicktest: true
|
|
||||||
|
|
||||||
- job_name: go1.15
|
- job_name: go1.15
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.15.x'
|
go: '1.15.x'
|
||||||
|
|||||||
@@ -223,7 +223,7 @@ find the results at https://pub.rclone.org/integration-tests/
|
|||||||
Rclone code is organised into a small number of top level directories
|
Rclone code is organised into a small number of top level directories
|
||||||
with modules beneath.
|
with modules beneath.
|
||||||
|
|
||||||
* backend - the rclone backends for interfacing to cloud providers -
|
* backend - the rclone backends for interfacing to cloud providers -
|
||||||
* all - import this to load all the cloud providers
|
* all - import this to load all the cloud providers
|
||||||
* ...providers
|
* ...providers
|
||||||
* bin - scripts for use while building or maintaining rclone
|
* bin - scripts for use while building or maintaining rclone
|
||||||
@@ -233,7 +233,7 @@ with modules beneath.
|
|||||||
* cmdtest - end-to-end tests of commands, flags, environment variables,...
|
* cmdtest - end-to-end tests of commands, flags, environment variables,...
|
||||||
* docs - the documentation and website
|
* docs - the documentation and website
|
||||||
* content - adjust these docs only - everything else is autogenerated
|
* content - adjust these docs only - everything else is autogenerated
|
||||||
* command - these are auto generated - edit the corresponding .go file
|
* command - these are auto-generated - edit the corresponding .go file
|
||||||
* fs - main rclone definitions - minimal amount of code
|
* fs - main rclone definitions - minimal amount of code
|
||||||
* accounting - bandwidth limiting and statistics
|
* accounting - bandwidth limiting and statistics
|
||||||
* asyncreader - an io.Reader which reads ahead
|
* asyncreader - an io.Reader which reads ahead
|
||||||
@@ -299,7 +299,7 @@ the source file in the `Help:` field.
|
|||||||
countries, it looks better without an ending period/full stop character.
|
countries, it looks better without an ending period/full stop character.
|
||||||
|
|
||||||
The only documentation you need to edit are the `docs/content/*.md`
|
The only documentation you need to edit are the `docs/content/*.md`
|
||||||
files. The `MANUAL.*`, `rclone.1`, web site, etc. are all auto generated
|
files. The `MANUAL.*`, `rclone.1`, website, etc. are all auto-generated
|
||||||
from those during the release process. See the `make doc` and `make
|
from those during the release process. See the `make doc` and `make
|
||||||
website` targets in the Makefile if you are interested in how. You
|
website` targets in the Makefile if you are interested in how. You
|
||||||
don't need to run these when adding a feature.
|
don't need to run these when adding a feature.
|
||||||
@@ -350,7 +350,7 @@ And here is an example of a longer one:
|
|||||||
```
|
```
|
||||||
mount: fix hang on errored upload
|
mount: fix hang on errored upload
|
||||||
|
|
||||||
In certain circumstances if an upload failed then the mount could hang
|
In certain circumstances, if an upload failed then the mount could hang
|
||||||
indefinitely. This was fixed by closing the read pipe after the Put
|
indefinitely. This was fixed by closing the read pipe after the Put
|
||||||
completed. This will cause the write side to return a pipe closed
|
completed. This will cause the write side to return a pipe closed
|
||||||
error fixing the hang.
|
error fixing the hang.
|
||||||
@@ -382,7 +382,7 @@ and `go.sum` in the same commit as your other changes.
|
|||||||
|
|
||||||
If you need to update a dependency then run
|
If you need to update a dependency then run
|
||||||
|
|
||||||
GO111MODULE=on go get -u github.com/pkg/errors
|
GO111MODULE=on go get -u golang.org/x/crypto
|
||||||
|
|
||||||
Check in a single commit as above.
|
Check in a single commit as above.
|
||||||
|
|
||||||
@@ -425,8 +425,8 @@ Research
|
|||||||
Getting going
|
Getting going
|
||||||
|
|
||||||
* Create `backend/remote/remote.go` (copy this from a similar remote)
|
* Create `backend/remote/remote.go` (copy this from a similar remote)
|
||||||
* box is a good one to start from if you have a directory based remote
|
* box is a good one to start from if you have a directory-based remote
|
||||||
* b2 is a good one to start from if you have a bucket based remote
|
* b2 is a good one to start from if you have a bucket-based remote
|
||||||
* Add your remote to the imports in `backend/all/all.go`
|
* Add your remote to the imports in `backend/all/all.go`
|
||||||
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
||||||
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ Current active maintainers of rclone are:
|
|||||||
|
|
||||||
**This is a work in progress Draft**
|
**This is a work in progress Draft**
|
||||||
|
|
||||||
This is a guide for how to be an rclone maintainer. This is mostly a writeup of what I (@ncw) attempt to do.
|
This is a guide for how to be an rclone maintainer. This is mostly a write-up of what I (@ncw) attempt to do.
|
||||||
|
|
||||||
## Triaging Tickets ##
|
## Triaging Tickets ##
|
||||||
|
|
||||||
@@ -27,15 +27,15 @@ When a ticket comes in it should be triaged. This means it should be classified
|
|||||||
|
|
||||||
Rclone uses the labels like this:
|
Rclone uses the labels like this:
|
||||||
|
|
||||||
* `bug` - a definite verified bug
|
* `bug` - a definitely verified bug
|
||||||
* `can't reproduce` - a problem which we can't reproduce
|
* `can't reproduce` - a problem which we can't reproduce
|
||||||
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
|
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
|
||||||
* `duplicate` - normally close these and ask the user to subscribe to the original
|
* `duplicate` - normally close these and ask the user to subscribe to the original
|
||||||
* `enhancement: new remote` - a new rclone backend
|
* `enhancement: new remote` - a new rclone backend
|
||||||
* `enhancement` - a new feature
|
* `enhancement` - a new feature
|
||||||
* `FUSE` - to do with `rclone mount` command
|
* `FUSE` - to do with `rclone mount` command
|
||||||
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
|
* `good first issue` - mark these if you find a small self-contained issue - these get shown to new visitors to the project
|
||||||
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
|
* `help` wanted - mark these if you find a self-contained issue - these get shown to new visitors to the project
|
||||||
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
||||||
* `maintenance` - internal enhancement, code re-organisation, etc.
|
* `maintenance` - internal enhancement, code re-organisation, etc.
|
||||||
* `Needs Go 1.XX` - waiting for that version of Go to be released
|
* `Needs Go 1.XX` - waiting for that version of Go to be released
|
||||||
@@ -51,7 +51,7 @@ The milestones have these meanings:
|
|||||||
|
|
||||||
* v1.XX - stuff we would like to fit into this release
|
* v1.XX - stuff we would like to fit into this release
|
||||||
* v1.XX+1 - stuff we are leaving until the next release
|
* v1.XX+1 - stuff we are leaving until the next release
|
||||||
* Soon - stuff we think is a good idea - waiting to be scheduled to a release
|
* Soon - stuff we think is a good idea - waiting to be scheduled for a release
|
||||||
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
||||||
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
||||||
|
|
||||||
@@ -65,7 +65,7 @@ Close tickets as soon as you can - make sure they are tagged with a release. Po
|
|||||||
|
|
||||||
Try to process pull requests promptly!
|
Try to process pull requests promptly!
|
||||||
|
|
||||||
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
Merging pull requests on GitHub itself works quite well nowadays so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||||
|
|
||||||
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
||||||
|
|
||||||
@@ -81,15 +81,15 @@ Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer
|
|||||||
|
|
||||||
High impact regressions should be fixed before the next release.
|
High impact regressions should be fixed before the next release.
|
||||||
|
|
||||||
Near the start of the release cycle the dependencies should be updated with `make update` to give time for bugs to surface.
|
Near the start of the release cycle, the dependencies should be updated with `make update` to give time for bugs to surface.
|
||||||
|
|
||||||
Towards the end of the release cycle try not to merge anything too big so let things settle down.
|
Towards the end of the release cycle try not to merge anything too big so let things settle down.
|
||||||
|
|
||||||
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
|
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time-consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
|
||||||
|
|
||||||
## Mailing list ##
|
## Mailing list ##
|
||||||
|
|
||||||
There is now an invite only mailing list for rclone developers `rclone-dev` on google groups.
|
There is now an invite-only mailing list for rclone developers `rclone-dev` on google groups.
|
||||||
|
|
||||||
## TODO ##
|
## TODO ##
|
||||||
|
|
||||||
|
|||||||
8
Makefile
8
Makefile
@@ -104,10 +104,14 @@ showupdates:
|
|||||||
@echo "*** Direct dependencies that could be updated ***"
|
@echo "*** Direct dependencies that could be updated ***"
|
||||||
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
||||||
|
|
||||||
|
# Update direct dependencies only
|
||||||
|
updatedirect:
|
||||||
|
GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||||
|
GO111MODULE=on go mod tidy
|
||||||
|
|
||||||
# Update direct and indirect dependencies and test dependencies
|
# Update direct and indirect dependencies and test dependencies
|
||||||
update:
|
update:
|
||||||
GO111MODULE=on go get -u -t ./...
|
GO111MODULE=on go get -d -u -t ./...
|
||||||
-#GO111MODULE=on go get -d $(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
|
||||||
GO111MODULE=on go mod tidy
|
GO111MODULE=on go mod tidy
|
||||||
|
|
||||||
# Tidy the module dependencies
|
# Tidy the module dependencies
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
[Website](https://rclone.org) |
|
[Website](https://rclone.org) |
|
||||||
[Documentation](https://rclone.org/docs/) |
|
[Documentation](https://rclone.org/docs/) |
|
||||||
[Download](https://rclone.org/downloads/) |
|
[Download](https://rclone.org/downloads/) |
|
||||||
[Contributing](CONTRIBUTING.md) |
|
[Contributing](CONTRIBUTING.md) |
|
||||||
[Changelog](https://rclone.org/changelog/) |
|
[Changelog](https://rclone.org/changelog/) |
|
||||||
[Installation](https://rclone.org/install/) |
|
[Installation](https://rclone.org/install/) |
|
||||||
@@ -10,12 +10,12 @@
|
|||||||
|
|
||||||
[](https://github.com/rclone/rclone/actions?query=workflow%3Abuild)
|
[](https://github.com/rclone/rclone/actions?query=workflow%3Abuild)
|
||||||
[](https://goreportcard.com/report/github.com/rclone/rclone)
|
[](https://goreportcard.com/report/github.com/rclone/rclone)
|
||||||
[](https://godoc.org/github.com/rclone/rclone)
|
[](https://godoc.org/github.com/rclone/rclone)
|
||||||
[](https://hub.docker.com/r/rclone/rclone)
|
[](https://hub.docker.com/r/rclone/rclone)
|
||||||
|
|
||||||
# Rclone
|
# Rclone
|
||||||
|
|
||||||
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers.
|
Rclone *("rsync for cloud storage")* is a command-line program to sync files and directories to and from different cloud storage providers.
|
||||||
|
|
||||||
## Storage providers
|
## Storage providers
|
||||||
|
|
||||||
@@ -59,6 +59,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
|||||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||||
|
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||||
@@ -72,7 +73,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
|||||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||||
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
||||||
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||||
|
|
||||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|||||||
17
RELEASE.md
17
RELEASE.md
@@ -34,13 +34,24 @@ This file describes how to make the various kinds of releases
|
|||||||
* make startdev # make startstable for stable branch
|
* make startdev # make startstable for stable branch
|
||||||
* # announce with forum post, twitter post, patreon post
|
* # announce with forum post, twitter post, patreon post
|
||||||
|
|
||||||
|
## Update dependencies
|
||||||
|
|
||||||
Early in the next release cycle update the dependencies
|
Early in the next release cycle update the dependencies
|
||||||
|
|
||||||
* Review any pinned packages in go.mod and remove if possible
|
* Review any pinned packages in go.mod and remove if possible
|
||||||
* make update
|
* make updatedirect
|
||||||
* git status
|
* make
|
||||||
* git add new files
|
|
||||||
* git commit -a -v
|
* git commit -a -v
|
||||||
|
* make update
|
||||||
|
* make
|
||||||
|
* roll back any updates which didn't compile
|
||||||
|
* git commit -a -v --amend
|
||||||
|
|
||||||
|
Note that `make update` updates all direct and indirect dependencies
|
||||||
|
and there can occasionally be forwards compatibility problems with
|
||||||
|
doing that so it may be necessary to roll back dependencies to the
|
||||||
|
version specified by `make updatedirect` in order to get rclone to
|
||||||
|
build.
|
||||||
|
|
||||||
## Making a point release
|
## Making a point release
|
||||||
|
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ we ignore assets completely!
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -22,7 +23,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
acd "github.com/ncw/go-acd"
|
acd "github.com/ncw/go-acd"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
@@ -259,7 +259,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient)
|
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
|
return nil, fmt.Errorf("failed to configure Amazon Drive: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c := acd.NewClient(oAuthClient)
|
c := acd.NewClient(oAuthClient)
|
||||||
@@ -292,13 +292,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
return f.shouldRetry(ctx, resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to get endpoints")
|
return nil, fmt.Errorf("failed to get endpoints: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get rootID
|
// Get rootID
|
||||||
rootInfo, err := f.getRootInfo(ctx)
|
rootInfo, err := f.getRootInfo(ctx)
|
||||||
if err != nil || rootInfo.Id == nil {
|
if err != nil || rootInfo.Id == nil {
|
||||||
return nil, errors.Wrap(err, "failed to get root")
|
return nil, fmt.Errorf("failed to get root: %w", err)
|
||||||
}
|
}
|
||||||
f.trueRootID = *rootInfo.Id
|
f.trueRootID = *rootInfo.Id
|
||||||
|
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@@ -24,7 +25,6 @@ import (
|
|||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||||
"github.com/Azure/go-autorest/autorest/adal"
|
"github.com/Azure/go-autorest/autorest/adal"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
@@ -414,10 +414,10 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
|||||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||||
const minChunkSize = fs.SizeSuffixBase
|
const minChunkSize = fs.SizeSuffixBase
|
||||||
if cs < minChunkSize {
|
if cs < minChunkSize {
|
||||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||||
}
|
}
|
||||||
if cs > maxChunkSize {
|
if cs > maxChunkSize {
|
||||||
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
|
return fmt.Errorf("%s is greater than %s", cs, maxChunkSize)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -459,11 +459,11 @@ const azureStorageEndpoint = "https://storage.azure.com/"
|
|||||||
func newServicePrincipalTokenRefresher(ctx context.Context, credentialsData []byte) (azblob.TokenRefresher, error) {
|
func newServicePrincipalTokenRefresher(ctx context.Context, credentialsData []byte) (azblob.TokenRefresher, error) {
|
||||||
var spCredentials servicePrincipalCredentials
|
var spCredentials servicePrincipalCredentials
|
||||||
if err := json.Unmarshal(credentialsData, &spCredentials); err != nil {
|
if err := json.Unmarshal(credentialsData, &spCredentials); err != nil {
|
||||||
return nil, errors.Wrap(err, "error parsing credentials from JSON file")
|
return nil, fmt.Errorf("error parsing credentials from JSON file: %w", err)
|
||||||
}
|
}
|
||||||
oauthConfig, err := adal.NewOAuthConfig(azureActiveDirectoryEndpoint, spCredentials.Tenant)
|
oauthConfig, err := adal.NewOAuthConfig(azureActiveDirectoryEndpoint, spCredentials.Tenant)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error creating oauth config")
|
return nil, fmt.Errorf("error creating oauth config: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create service principal token for Azure Storage.
|
// Create service principal token for Azure Storage.
|
||||||
@@ -473,7 +473,7 @@ func newServicePrincipalTokenRefresher(ctx context.Context, credentialsData []by
|
|||||||
spCredentials.Password,
|
spCredentials.Password,
|
||||||
azureStorageEndpoint)
|
azureStorageEndpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error creating service principal token")
|
return nil, fmt.Errorf("error creating service principal token: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrap token inside a refresher closure.
|
// Wrap token inside a refresher closure.
|
||||||
@@ -526,10 +526,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
|
|
||||||
err = checkUploadChunkSize(opt.ChunkSize)
|
err = checkUploadChunkSize(opt.ChunkSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "azure: chunk size")
|
return nil, fmt.Errorf("azure: chunk size: %w", err)
|
||||||
}
|
}
|
||||||
if opt.ListChunkSize > maxListChunkSize {
|
if opt.ListChunkSize > maxListChunkSize {
|
||||||
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
|
return nil, fmt.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
|
||||||
}
|
}
|
||||||
if opt.Endpoint == "" {
|
if opt.Endpoint == "" {
|
||||||
opt.Endpoint = storageDefaultBaseURL
|
opt.Endpoint = storageDefaultBaseURL
|
||||||
@@ -538,12 +538,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if opt.AccessTier == "" {
|
if opt.AccessTier == "" {
|
||||||
opt.AccessTier = string(defaultAccessTier)
|
opt.AccessTier = string(defaultAccessTier)
|
||||||
} else if !validateAccessTier(opt.AccessTier) {
|
} else if !validateAccessTier(opt.AccessTier) {
|
||||||
return nil, errors.Errorf("Azure Blob: Supported access tiers are %s, %s and %s",
|
return nil, fmt.Errorf("Azure Blob: Supported access tiers are %s, %s and %s",
|
||||||
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
||||||
}
|
}
|
||||||
|
|
||||||
if !validatePublicAccess((opt.PublicAccess)) {
|
if !validatePublicAccess((opt.PublicAccess)) {
|
||||||
return nil, errors.Errorf("Azure Blob: Supported public access level are %s and %s",
|
return nil, fmt.Errorf("Azure Blob: Supported public access level are %s and %s",
|
||||||
string(azblob.PublicAccessBlob), string(azblob.PublicAccessContainer))
|
string(azblob.PublicAccessBlob), string(azblob.PublicAccessContainer))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -585,11 +585,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
case opt.UseEmulator:
|
case opt.UseEmulator:
|
||||||
credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey)
|
credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Failed to parse credentials")
|
return nil, fmt.Errorf("Failed to parse credentials: %w", err)
|
||||||
}
|
}
|
||||||
u, err = url.Parse(emulatorBlobEndpoint)
|
u, err = url.Parse(emulatorBlobEndpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
|
||||||
}
|
}
|
||||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||||
@@ -631,12 +631,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Failed to acquire MSI token")
|
return nil, fmt.Errorf("Failed to acquire MSI token: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
|
||||||
}
|
}
|
||||||
credential := azblob.NewTokenCredential(token.AccessToken, func(credential azblob.TokenCredential) time.Duration {
|
credential := azblob.NewTokenCredential(token.AccessToken, func(credential azblob.TokenCredential) time.Duration {
|
||||||
fs.Debugf(f, "Token refresher called.")
|
fs.Debugf(f, "Token refresher called.")
|
||||||
@@ -666,19 +666,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
case opt.Account != "" && opt.Key != "":
|
case opt.Account != "" && opt.Key != "":
|
||||||
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
|
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Failed to parse credentials")
|
return nil, fmt.Errorf("Failed to parse credentials: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
|
||||||
}
|
}
|
||||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||||
case opt.SASURL != "":
|
case opt.SASURL != "":
|
||||||
u, err = url.Parse(opt.SASURL)
|
u, err = url.Parse(opt.SASURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to parse SAS URL")
|
return nil, fmt.Errorf("failed to parse SAS URL: %w", err)
|
||||||
}
|
}
|
||||||
// use anonymous credentials in case of sas url
|
// use anonymous credentials in case of sas url
|
||||||
pipeline := f.newPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
pipeline := f.newPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||||
@@ -698,17 +698,17 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
// Create a standard URL.
|
// Create a standard URL.
|
||||||
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
|
||||||
}
|
}
|
||||||
// Try loading service principal credentials from file.
|
// Try loading service principal credentials from file.
|
||||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServicePrincipalFile))
|
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServicePrincipalFile))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error opening service principal credentials file")
|
return nil, fmt.Errorf("error opening service principal credentials file: %w", err)
|
||||||
}
|
}
|
||||||
// Create a token refresher from service principal credentials.
|
// Create a token refresher from service principal credentials.
|
||||||
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, loadedCreds)
|
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, loadedCreds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to create a service principal token")
|
return nil, fmt.Errorf("failed to create a service principal token: %w", err)
|
||||||
}
|
}
|
||||||
options := azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}}
|
options := azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}}
|
||||||
pipe := f.newPipeline(azblob.NewTokenCredential("", tokenRefresher), options)
|
pipe := f.newPipeline(azblob.NewTokenCredential("", tokenRefresher), options)
|
||||||
@@ -1324,7 +1324,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||||||
}
|
}
|
||||||
data, err := base64.StdEncoding.DecodeString(o.md5)
|
data, err := base64.StdEncoding.DecodeString(o.md5)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrapf(err, "Failed to decode Content-MD5: %q", o.md5)
|
return "", fmt.Errorf("Failed to decode Content-MD5: %q: %w", o.md5, err)
|
||||||
}
|
}
|
||||||
return hex.EncodeToString(data), nil
|
return hex.EncodeToString(data), nil
|
||||||
}
|
}
|
||||||
@@ -1510,7 +1510,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
var offset int64
|
var offset int64
|
||||||
var count int64
|
var count int64
|
||||||
if o.AccessTier() == azblob.AccessTierArchive {
|
if o.AccessTier() == azblob.AccessTierArchive {
|
||||||
return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
|
return nil, fmt.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
|
||||||
}
|
}
|
||||||
fs.FixRangeOption(options, o.size)
|
fs.FixRangeOption(options, o.size)
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
@@ -1536,11 +1536,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
return o.fs.shouldRetry(ctx, err)
|
return o.fs.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to open for download")
|
return nil, fmt.Errorf("failed to open for download: %w", err)
|
||||||
}
|
}
|
||||||
err = o.decodeMetaDataFromDownloadResponse(downloadResponse)
|
err = o.decodeMetaDataFromDownloadResponse(downloadResponse)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to decode metadata for download")
|
return nil, fmt.Errorf("failed to decode metadata for download: %w", err)
|
||||||
}
|
}
|
||||||
in = downloadResponse.Body(azblob.RetryReaderOptions{})
|
in = downloadResponse.Body(azblob.RetryReaderOptions{})
|
||||||
return in, nil
|
return in, nil
|
||||||
@@ -1630,7 +1630,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
fs.Debugf(o, "deleting archive tier blob before updating")
|
fs.Debugf(o, "deleting archive tier blob before updating")
|
||||||
err = o.Remove(ctx)
|
err = o.Remove(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to delete archive blob before updating")
|
return fmt.Errorf("failed to delete archive blob before updating: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return errCantUpdateArchiveTierBlobs
|
return errCantUpdateArchiveTierBlobs
|
||||||
@@ -1723,7 +1723,7 @@ func (o *Object) AccessTier() azblob.AccessTierType {
|
|||||||
// SetTier performs changing object tier
|
// SetTier performs changing object tier
|
||||||
func (o *Object) SetTier(tier string) error {
|
func (o *Object) SetTier(tier string) error {
|
||||||
if !validateAccessTier(tier) {
|
if !validateAccessTier(tier) {
|
||||||
return errors.Errorf("Tier %s not supported by Azure Blob Storage", tier)
|
return fmt.Errorf("Tier %s not supported by Azure Blob Storage", tier)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if current tier already matches with desired tier
|
// Check if current tier already matches with desired tier
|
||||||
@@ -1739,7 +1739,7 @@ func (o *Object) SetTier(tier string) error {
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Failed to set Blob Tier")
|
return fmt.Errorf("Failed to set Blob Tier: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set access tier on local object also, this typically
|
// Set access tier on local object also, this typically
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/autorest/adal"
|
"github.com/Azure/go-autorest/autorest/adal"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
)
|
)
|
||||||
@@ -95,7 +94,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
|||||||
httpClient := fshttp.NewClient(ctx)
|
httpClient := fshttp.NewClient(ctx)
|
||||||
resp, err := httpClient.Do(req)
|
resp, err := httpClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return result, errors.Wrap(err, "MSI is not enabled on this VM")
|
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
|
||||||
}
|
}
|
||||||
defer func() { // resp and Body should not be nil
|
defer func() { // resp and Body should not be nil
|
||||||
_, err = io.Copy(ioutil.Discard, resp.Body)
|
_, err = io.Copy(ioutil.Discard, resp.Body)
|
||||||
@@ -120,7 +119,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
|||||||
|
|
||||||
b, err := ioutil.ReadAll(resp.Body)
|
b, err := ioutil.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return result, errors.Wrap(err, "Couldn't read IMDS response")
|
return result, fmt.Errorf("Couldn't read IMDS response: %w", err)
|
||||||
}
|
}
|
||||||
// Remove BOM, if any. azcopy does this so I'm following along.
|
// Remove BOM, if any. azcopy does this so I'm following along.
|
||||||
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
|
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
|
||||||
@@ -131,7 +130,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
|||||||
// storage API call.
|
// storage API call.
|
||||||
err = json.Unmarshal(b, &result)
|
err = json.Unmarshal(b, &result)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return result, errors.Wrap(err, "Couldn't unmarshal IMDS response")
|
return result, fmt.Errorf("Couldn't unmarshal IMDS response: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return result, nil
|
return result, nil
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
gohash "hash"
|
gohash "hash"
|
||||||
"io"
|
"io"
|
||||||
@@ -19,7 +20,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/b2/api"
|
"github.com/rclone/rclone/backend/b2/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
@@ -366,7 +366,7 @@ func errorHandler(resp *http.Response) error {
|
|||||||
|
|
||||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||||
if cs < minChunkSize {
|
if cs < minChunkSize {
|
||||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -381,7 +381,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
|||||||
|
|
||||||
func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
|
func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
|
||||||
if cs < opt.ChunkSize {
|
if cs < opt.ChunkSize {
|
||||||
return errors.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
|
return fmt.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -414,11 +414,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
err = checkUploadCutoff(opt, opt.UploadCutoff)
|
err = checkUploadCutoff(opt, opt.UploadCutoff)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "b2: upload cutoff")
|
return nil, fmt.Errorf("b2: upload cutoff: %w", err)
|
||||||
}
|
}
|
||||||
err = checkUploadChunkSize(opt.ChunkSize)
|
err = checkUploadChunkSize(opt.ChunkSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "b2: chunk size")
|
return nil, fmt.Errorf("b2: chunk size: %w", err)
|
||||||
}
|
}
|
||||||
if opt.Account == "" {
|
if opt.Account == "" {
|
||||||
return nil, errors.New("account not found")
|
return nil, errors.New("account not found")
|
||||||
@@ -463,7 +463,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
err = f.authorizeAccount(ctx)
|
err = f.authorizeAccount(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to authorize account")
|
return nil, fmt.Errorf("failed to authorize account: %w", err)
|
||||||
}
|
}
|
||||||
// If this is a key limited to a single bucket, it must exist already
|
// If this is a key limited to a single bucket, it must exist already
|
||||||
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
|
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
|
||||||
@@ -472,7 +472,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
||||||
}
|
}
|
||||||
if allowedBucket != f.rootBucket {
|
if allowedBucket != f.rootBucket {
|
||||||
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
|
return nil, fmt.Errorf("you must use bucket %q with this application key", allowedBucket)
|
||||||
}
|
}
|
||||||
f.cache.MarkOK(f.rootBucket)
|
f.cache.MarkOK(f.rootBucket)
|
||||||
f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
|
f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
|
||||||
@@ -512,7 +512,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
|||||||
return f.shouldRetryNoReauth(ctx, resp, err)
|
return f.shouldRetryNoReauth(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to authenticate")
|
return fmt.Errorf("failed to authenticate: %w", err)
|
||||||
}
|
}
|
||||||
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
|
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
|
||||||
return nil
|
return nil
|
||||||
@@ -558,7 +558,7 @@ func (f *Fs) getUploadURL(ctx context.Context, bucket string) (upload *api.GetUp
|
|||||||
return f.shouldRetry(ctx, resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
return nil, fmt.Errorf("failed to get upload URL: %w", err)
|
||||||
}
|
}
|
||||||
return upload, nil
|
return upload, nil
|
||||||
}
|
}
|
||||||
@@ -1048,7 +1048,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return errors.Wrap(err, "failed to create bucket")
|
return fmt.Errorf("failed to create bucket: %w", err)
|
||||||
}
|
}
|
||||||
f.setBucketID(bucket, response.ID)
|
f.setBucketID(bucket, response.ID)
|
||||||
f.setBucketType(bucket, response.Type)
|
f.setBucketType(bucket, response.Type)
|
||||||
@@ -1083,7 +1083,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
return f.shouldRetry(ctx, resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to delete bucket")
|
return fmt.Errorf("failed to delete bucket: %w", err)
|
||||||
}
|
}
|
||||||
f.clearBucketID(bucket)
|
f.clearBucketID(bucket)
|
||||||
f.clearBucketType(bucket)
|
f.clearBucketType(bucket)
|
||||||
@@ -1124,7 +1124,7 @@ func (f *Fs) hide(ctx context.Context, bucket, bucketPath string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return errors.Wrapf(err, "failed to hide %q", bucketPath)
|
return fmt.Errorf("failed to hide %q: %w", bucketPath, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1145,7 +1145,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
|
|||||||
return f.shouldRetry(ctx, resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "failed to delete %q", Name)
|
return fmt.Errorf("failed to delete %q: %w", Name, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1364,7 +1364,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
|
|||||||
return f.shouldRetry(ctx, resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "failed to get download authorization")
|
return "", fmt.Errorf("failed to get download authorization: %w", err)
|
||||||
}
|
}
|
||||||
return response.AuthorizationToken, nil
|
return response.AuthorizationToken, nil
|
||||||
}
|
}
|
||||||
@@ -1669,14 +1669,14 @@ func (file *openFile) Close() (err error) {
|
|||||||
|
|
||||||
// Check to see we read the correct number of bytes
|
// Check to see we read the correct number of bytes
|
||||||
if file.o.Size() != file.bytes {
|
if file.o.Size() != file.bytes {
|
||||||
return errors.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
|
return fmt.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check the SHA1
|
// Check the SHA1
|
||||||
receivedSHA1 := file.o.sha1
|
receivedSHA1 := file.o.sha1
|
||||||
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
|
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
|
||||||
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
|
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
|
||||||
return errors.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
|
return fmt.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -1716,7 +1716,7 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
|||||||
if resp != nil && (resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusBadRequest) {
|
if resp != nil && (resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusBadRequest) {
|
||||||
return nil, nil, fs.ErrorObjectNotFound
|
return nil, nil, fs.ErrorObjectNotFound
|
||||||
}
|
}
|
||||||
return nil, nil, errors.Wrapf(err, "failed to %s for download", method)
|
return nil, nil, fmt.Errorf("failed to %s for download: %w", method, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NB resp may be Open here - don't return err != nil without closing
|
// NB resp may be Open here - don't return err != nil without closing
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/b2/api"
|
"github.com/rclone/rclone/backend/b2/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
@@ -102,7 +101,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
parts++
|
parts++
|
||||||
}
|
}
|
||||||
if parts > maxParts {
|
if parts > maxParts {
|
||||||
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
|
return nil, fmt.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
|
||||||
}
|
}
|
||||||
sha1SliceSize = parts
|
sha1SliceSize = parts
|
||||||
}
|
}
|
||||||
@@ -185,7 +184,7 @@ func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadP
|
|||||||
return up.f.shouldRetry(ctx, resp, err)
|
return up.f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
return nil, fmt.Errorf("failed to get upload URL: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
||||||
@@ -406,7 +405,7 @@ func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (e
|
|||||||
up.size += int64(n)
|
up.size += int64(n)
|
||||||
if part > maxParts {
|
if part > maxParts {
|
||||||
up.f.putBuf(buf, false)
|
up.f.putBuf(buf, false)
|
||||||
return errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||||
}
|
}
|
||||||
|
|
||||||
part := part // for the closure
|
part := part // for the closure
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ import (
|
|||||||
"crypto/rsa"
|
"crypto/rsa"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@@ -26,13 +27,6 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
|
||||||
"github.com/rclone/rclone/lib/env"
|
|
||||||
"github.com/rclone/rclone/lib/jwtutil"
|
|
||||||
|
|
||||||
"github.com/youmark/pkcs8"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/box/api"
|
"github.com/rclone/rclone/backend/box/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -43,9 +37,13 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/dircache"
|
"github.com/rclone/rclone/lib/dircache"
|
||||||
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
|
"github.com/rclone/rclone/lib/env"
|
||||||
|
"github.com/rclone/rclone/lib/jwtutil"
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
"github.com/youmark/pkcs8"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
"golang.org/x/oauth2/jws"
|
"golang.org/x/oauth2/jws"
|
||||||
)
|
)
|
||||||
@@ -93,7 +91,7 @@ func init() {
|
|||||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||||
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure token with jwt authentication")
|
return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err)
|
||||||
}
|
}
|
||||||
// Else, if not using an access token, use oauth2
|
// Else, if not using an access token, use oauth2
|
||||||
} else if boxAccessToken == "" || !boxAccessTokenOk {
|
} else if boxAccessToken == "" || !boxAccessTokenOk {
|
||||||
@@ -167,15 +165,15 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
|||||||
jsonFile = env.ShellExpand(jsonFile)
|
jsonFile = env.ShellExpand(jsonFile)
|
||||||
boxConfig, err := getBoxConfig(jsonFile)
|
boxConfig, err := getBoxConfig(jsonFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "get box config")
|
return fmt.Errorf("get box config: %w", err)
|
||||||
}
|
}
|
||||||
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "get decrypted private key")
|
return fmt.Errorf("get decrypted private key: %w", err)
|
||||||
}
|
}
|
||||||
claims, err := getClaims(boxConfig, boxSubType)
|
claims, err := getClaims(boxConfig, boxSubType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "get claims")
|
return fmt.Errorf("get claims: %w", err)
|
||||||
}
|
}
|
||||||
signingHeaders := getSigningHeaders(boxConfig)
|
signingHeaders := getSigningHeaders(boxConfig)
|
||||||
queryParams := getQueryParams(boxConfig)
|
queryParams := getQueryParams(boxConfig)
|
||||||
@@ -187,11 +185,11 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
|||||||
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||||
file, err := ioutil.ReadFile(configFile)
|
file, err := ioutil.ReadFile(configFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "box: failed to read Box config")
|
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
|
||||||
}
|
}
|
||||||
err = json.Unmarshal(file, &boxConfig)
|
err = json.Unmarshal(file, &boxConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "box: failed to parse Box config")
|
return nil, fmt.Errorf("box: failed to parse Box config: %w", err)
|
||||||
}
|
}
|
||||||
return boxConfig, nil
|
return boxConfig, nil
|
||||||
}
|
}
|
||||||
@@ -199,7 +197,7 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
|||||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
|
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
|
||||||
val, err := jwtutil.RandomHex(20)
|
val, err := jwtutil.RandomHex(20)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "box: failed to generate random string for jti")
|
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
claims = &jws.ClaimSet{
|
claims = &jws.ClaimSet{
|
||||||
@@ -240,12 +238,12 @@ func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err
|
|||||||
|
|
||||||
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
||||||
if len(rest) > 0 {
|
if len(rest) > 0 {
|
||||||
return nil, errors.Wrap(err, "box: extra data included in private key")
|
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
|
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "box: failed to decrypt private key")
|
return nil, fmt.Errorf("box: failed to decrypt private key: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return rsaKey.(*rsa.PrivateKey), nil
|
return rsaKey.(*rsa.PrivateKey), nil
|
||||||
@@ -403,7 +401,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
if opt.UploadCutoff < minUploadCutoff {
|
if opt.UploadCutoff < minUploadCutoff {
|
||||||
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
|
return nil, fmt.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
|
||||||
}
|
}
|
||||||
|
|
||||||
root = parsePath(root)
|
root = parsePath(root)
|
||||||
@@ -414,7 +412,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if opt.AccessToken == "" {
|
if opt.AccessToken == "" {
|
||||||
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
|
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure Box")
|
return nil, fmt.Errorf("failed to configure Box: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -613,7 +611,7 @@ OUTER:
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return found, errors.Wrap(err, "couldn't list files")
|
return found, fmt.Errorf("couldn't list files: %w", err)
|
||||||
}
|
}
|
||||||
for i := range result.Entries {
|
for i := range result.Entries {
|
||||||
item := &result.Entries[i]
|
item := &result.Entries[i]
|
||||||
@@ -740,14 +738,14 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
|
|||||||
var conflict api.PreUploadCheckConflict
|
var conflict api.PreUploadCheckConflict
|
||||||
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
|
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "pre-upload check: JSON decode failed")
|
return "", fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
|
||||||
}
|
}
|
||||||
if conflict.Conflicts.Type != api.ItemTypeFile {
|
if conflict.Conflicts.Type != api.ItemTypeFile {
|
||||||
return "", errors.Wrap(err, "pre-upload check: can't overwrite non file with file")
|
return "", fmt.Errorf("pre-upload check: can't overwrite non file with file: %w", err)
|
||||||
}
|
}
|
||||||
return conflict.Conflicts.ID, nil
|
return conflict.Conflicts.ID, nil
|
||||||
}
|
}
|
||||||
return "", errors.Wrap(err, "pre-upload check")
|
return "", fmt.Errorf("pre-upload check: %w", err)
|
||||||
}
|
}
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
@@ -856,7 +854,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "rmdir failed")
|
return fmt.Errorf("rmdir failed: %w", err)
|
||||||
}
|
}
|
||||||
f.dirCache.FlushDir(dir)
|
f.dirCache.FlushDir(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -900,7 +898,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
srcPath := srcObj.fs.rootSlash() + srcObj.remote
|
srcPath := srcObj.fs.rootSlash() + srcObj.remote
|
||||||
dstPath := f.rootSlash() + remote
|
dstPath := f.rootSlash() + remote
|
||||||
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
||||||
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
@@ -984,7 +982,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to read user info")
|
return nil, fmt.Errorf("failed to read user info: %w", err)
|
||||||
}
|
}
|
||||||
// FIXME max upload size would be useful to use in Update
|
// FIXME max upload size would be useful to use in Update
|
||||||
usage = &fs.Usage{
|
usage = &fs.Usage{
|
||||||
@@ -1145,7 +1143,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
|||||||
})
|
})
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
if deleteErrors != 0 {
|
if deleteErrors != 0 {
|
||||||
return errors.Errorf("failed to delete %d trash items", deleteErrors)
|
return fmt.Errorf("failed to delete %d trash items", deleteErrors)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1205,7 +1203,7 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
|||||||
return fs.ErrorIsDir
|
return fs.ErrorIsDir
|
||||||
}
|
}
|
||||||
if info.Type != api.ItemTypeFile {
|
if info.Type != api.ItemTypeFile {
|
||||||
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
|
return fmt.Errorf("%q is %q: %w", o.remote, info.Type, fs.ErrorNotAFile)
|
||||||
}
|
}
|
||||||
o.hasMetaData = true
|
o.hasMetaData = true
|
||||||
o.size = int64(info.Size)
|
o.size = int64(info.Size)
|
||||||
@@ -1341,7 +1339,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if result.TotalCount != 1 || len(result.Entries) != 1 {
|
if result.TotalCount != 1 || len(result.Entries) != 1 {
|
||||||
return errors.Errorf("failed to upload %v - not sure why", o)
|
return fmt.Errorf("failed to upload %v - not sure why", o)
|
||||||
}
|
}
|
||||||
return o.setMetaData(&result.Entries[0])
|
return o.setMetaData(&result.Entries[0])
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -15,7 +16,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/box/api"
|
"github.com/rclone/rclone/backend/box/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
@@ -140,7 +140,7 @@ outer:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
|
return nil, fmt.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
|
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
|
||||||
@@ -151,7 +151,7 @@ outer:
|
|||||||
}
|
}
|
||||||
err = json.Unmarshal(body, &result)
|
err = json.Unmarshal(body, &result)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "couldn't decode commit response: %q", body)
|
return nil, fmt.Errorf("couldn't decode commit response: %q: %w", body, err)
|
||||||
}
|
}
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
@@ -177,7 +177,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
|
|||||||
// Create upload session
|
// Create upload session
|
||||||
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
|
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "multipart upload create session failed")
|
return fmt.Errorf("multipart upload create session failed: %w", err)
|
||||||
}
|
}
|
||||||
chunkSize := session.PartSize
|
chunkSize := session.PartSize
|
||||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
|
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
|
||||||
@@ -222,7 +222,7 @@ outer:
|
|||||||
// Read the chunk
|
// Read the chunk
|
||||||
_, err = io.ReadFull(in, buf)
|
_, err = io.ReadFull(in, buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrap(err, "multipart upload failed to read source")
|
err = fmt.Errorf("multipart upload failed to read source: %w", err)
|
||||||
break outer
|
break outer
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -238,7 +238,7 @@ outer:
|
|||||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||||
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
|
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrap(err, "multipart upload failed to upload part")
|
err = fmt.Errorf("multipart upload failed to upload part: %w", err)
|
||||||
select {
|
select {
|
||||||
case errs <- err:
|
case errs <- err:
|
||||||
default:
|
default:
|
||||||
@@ -266,11 +266,11 @@ outer:
|
|||||||
// Finalise the upload session
|
// Finalise the upload session
|
||||||
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
|
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "multipart upload failed to finalize")
|
return fmt.Errorf("multipart upload failed to finalize: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.TotalCount != 1 || len(result.Entries) != 1 {
|
if result.TotalCount != 1 || len(result.Entries) != 1 {
|
||||||
return errors.Errorf("multipart upload failed %v - not sure why", o)
|
return fmt.Errorf("multipart upload failed %v - not sure why", o)
|
||||||
}
|
}
|
||||||
return o.setMetaData(&result.Entries[0])
|
return o.setMetaData(&result.Entries[0])
|
||||||
}
|
}
|
||||||
|
|||||||
48
backend/cache/cache.go
vendored
48
backend/cache/cache.go
vendored
@@ -5,6 +5,7 @@ package cache
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
@@ -19,7 +20,6 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/crypt"
|
"github.com/rclone/rclone/backend/crypt"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/cache"
|
"github.com/rclone/rclone/fs/cache"
|
||||||
@@ -356,7 +356,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
|
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
|
||||||
return nil, errors.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
|
return nil, fmt.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
|
||||||
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
|
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -366,13 +366,13 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
|
|
||||||
rpath, err := parseRootPath(rootPath)
|
rpath, err := parseRootPath(rootPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
|
return nil, fmt.Errorf("failed to clean root path %q: %w", rootPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
|
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
|
||||||
wrappedFs, wrapErr := cache.Get(ctx, remotePath)
|
wrappedFs, wrapErr := cache.Get(ctx, remotePath)
|
||||||
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
||||||
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
|
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remotePath, wrapErr)
|
||||||
}
|
}
|
||||||
var fsErr error
|
var fsErr error
|
||||||
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
|
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
|
||||||
@@ -401,7 +401,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
if opt.PlexToken != "" {
|
if opt.PlexToken != "" {
|
||||||
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure)
|
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
|
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||||
@@ -413,7 +413,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
m.Set("plex_token", token)
|
m.Set("plex_token", token)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
|
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -434,11 +434,11 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
}
|
}
|
||||||
err = os.MkdirAll(dbPath, os.ModePerm)
|
err = os.MkdirAll(dbPath, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", dbPath)
|
return nil, fmt.Errorf("failed to create cache directory %v: %w", dbPath, err)
|
||||||
}
|
}
|
||||||
err = os.MkdirAll(chunkPath, os.ModePerm)
|
err = os.MkdirAll(chunkPath, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", chunkPath)
|
return nil, fmt.Errorf("failed to create cache directory %v: %w", chunkPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dbPath = filepath.Join(dbPath, name+".db")
|
dbPath = filepath.Join(dbPath, name+".db")
|
||||||
@@ -450,7 +450,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
DbWaitTime: time.Duration(opt.DbWaitTime),
|
DbWaitTime: time.Duration(opt.DbWaitTime),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to start cache db")
|
return nil, fmt.Errorf("failed to start cache db: %w", err)
|
||||||
}
|
}
|
||||||
// Trap SIGINT and SIGTERM to close the DB handle gracefully
|
// Trap SIGINT and SIGTERM to close the DB handle gracefully
|
||||||
c := make(chan os.Signal, 1)
|
c := make(chan os.Signal, 1)
|
||||||
@@ -484,12 +484,12 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
if f.opt.TempWritePath != "" {
|
if f.opt.TempWritePath != "" {
|
||||||
err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
|
err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
|
return nil, fmt.Errorf("failed to create cache directory %v: %w", f.opt.TempWritePath, err)
|
||||||
}
|
}
|
||||||
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
||||||
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
|
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
|
return nil, fmt.Errorf("failed to create temp fs: %w", err)
|
||||||
}
|
}
|
||||||
fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
|
fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
|
||||||
fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
|
fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
|
||||||
@@ -606,7 +606,7 @@ func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err er
|
|||||||
out = make(rc.Params)
|
out = make(rc.Params)
|
||||||
m, err := f.Stats()
|
m, err := f.Stats()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return out, errors.Errorf("error while getting cache stats")
|
return out, fmt.Errorf("error while getting cache stats")
|
||||||
}
|
}
|
||||||
out["status"] = "ok"
|
out["status"] = "ok"
|
||||||
out["stats"] = m
|
out["stats"] = m
|
||||||
@@ -633,7 +633,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
|||||||
out = make(rc.Params)
|
out = make(rc.Params)
|
||||||
remoteInt, ok := in["remote"]
|
remoteInt, ok := in["remote"]
|
||||||
if !ok {
|
if !ok {
|
||||||
return out, errors.Errorf("remote is needed")
|
return out, fmt.Errorf("remote is needed")
|
||||||
}
|
}
|
||||||
remote := remoteInt.(string)
|
remote := remoteInt.(string)
|
||||||
withData := false
|
withData := false
|
||||||
@@ -644,7 +644,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
|||||||
|
|
||||||
remote = f.unwrapRemote(remote)
|
remote = f.unwrapRemote(remote)
|
||||||
if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
|
if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
|
||||||
return out, errors.Errorf("%s doesn't exist in cache", remote)
|
return out, fmt.Errorf("%s doesn't exist in cache", remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
co := NewObject(f, remote)
|
co := NewObject(f, remote)
|
||||||
@@ -653,7 +653,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
|||||||
cd := NewDirectory(f, remote)
|
cd := NewDirectory(f, remote)
|
||||||
err := f.cache.ExpireDir(cd)
|
err := f.cache.ExpireDir(cd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return out, errors.WithMessage(err, "error expiring directory")
|
return out, fmt.Errorf("error expiring directory: %w", err)
|
||||||
}
|
}
|
||||||
// notify vfs too
|
// notify vfs too
|
||||||
f.notifyChangeUpstream(cd.Remote(), fs.EntryDirectory)
|
f.notifyChangeUpstream(cd.Remote(), fs.EntryDirectory)
|
||||||
@@ -664,7 +664,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
|||||||
// expire the entry
|
// expire the entry
|
||||||
err = f.cache.ExpireObject(co, withData)
|
err = f.cache.ExpireObject(co, withData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return out, errors.WithMessage(err, "error expiring file")
|
return out, fmt.Errorf("error expiring file: %w", err)
|
||||||
}
|
}
|
||||||
// notify vfs too
|
// notify vfs too
|
||||||
f.notifyChangeUpstream(co.Remote(), fs.EntryObject)
|
f.notifyChangeUpstream(co.Remote(), fs.EntryObject)
|
||||||
@@ -685,24 +685,24 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
|||||||
case 1:
|
case 1:
|
||||||
start, err = strconv.ParseInt(ints[0], 10, 64)
|
start, err = strconv.ParseInt(ints[0], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Errorf("invalid range: %q", part)
|
return nil, fmt.Errorf("invalid range: %q", part)
|
||||||
}
|
}
|
||||||
end = start + 1
|
end = start + 1
|
||||||
case 2:
|
case 2:
|
||||||
if ints[0] != "" {
|
if ints[0] != "" {
|
||||||
start, err = strconv.ParseInt(ints[0], 10, 64)
|
start, err = strconv.ParseInt(ints[0], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Errorf("invalid range: %q", part)
|
return nil, fmt.Errorf("invalid range: %q", part)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ints[1] != "" {
|
if ints[1] != "" {
|
||||||
end, err = strconv.ParseInt(ints[1], 10, 64)
|
end, err = strconv.ParseInt(ints[1], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Errorf("invalid range: %q", part)
|
return nil, fmt.Errorf("invalid range: %q", part)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("invalid range: %q", part)
|
return nil, fmt.Errorf("invalid range: %q", part)
|
||||||
}
|
}
|
||||||
crs = append(crs, chunkRange{start: start, end: end})
|
crs = append(crs, chunkRange{start: start, end: end})
|
||||||
}
|
}
|
||||||
@@ -757,18 +757,18 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
|||||||
delete(in, "chunks")
|
delete(in, "chunks")
|
||||||
crs, err := parseChunks(s)
|
crs, err := parseChunks(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "invalid chunks parameter")
|
return nil, fmt.Errorf("invalid chunks parameter: %w", err)
|
||||||
}
|
}
|
||||||
var files [][2]string
|
var files [][2]string
|
||||||
for k, v := range in {
|
for k, v := range in {
|
||||||
if !strings.HasPrefix(k, "file") {
|
if !strings.HasPrefix(k, "file") {
|
||||||
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
|
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
|
||||||
}
|
}
|
||||||
switch v := v.(type) {
|
switch v := v.(type) {
|
||||||
case string:
|
case string:
|
||||||
files = append(files, [2]string{v, f.unwrapRemote(v)})
|
files = append(files, [2]string{v, f.unwrapRemote(v)})
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
|
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
type fileStatus struct {
|
type fileStatus struct {
|
||||||
@@ -1124,7 +1124,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
|
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
|
||||||
default:
|
default:
|
||||||
return errors.Errorf("Unknown object type %T", entry)
|
return fmt.Errorf("Unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
36
backend/cache/cache_internal_test.go
vendored
36
backend/cache/cache_internal_test.go
vendored
@@ -7,6 +7,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"errors"
|
||||||
goflag "flag"
|
goflag "flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -22,7 +23,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/cache"
|
"github.com/rclone/rclone/backend/cache"
|
||||||
"github.com/rclone/rclone/backend/crypt"
|
"github.com/rclone/rclone/backend/crypt"
|
||||||
_ "github.com/rclone/rclone/backend/drive"
|
_ "github.com/rclone/rclone/backend/drive"
|
||||||
@@ -446,7 +446,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if coSize != expectedSize {
|
if coSize != expectedSize {
|
||||||
return errors.Errorf("%v <> %v", coSize, expectedSize)
|
return fmt.Errorf("%v <> %v", coSize, expectedSize)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, 12, time.Second*10)
|
}, 12, time.Second*10)
|
||||||
@@ -502,7 +502,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
|||||||
}
|
}
|
||||||
if len(li) != 2 {
|
if len(li) != 2 {
|
||||||
log.Printf("not expected listing /test: %v", li)
|
log.Printf("not expected listing /test: %v", li)
|
||||||
return errors.Errorf("not expected listing /test: %v", li)
|
return fmt.Errorf("not expected listing /test: %v", li)
|
||||||
}
|
}
|
||||||
|
|
||||||
li, err = runInstance.list(t, rootFs, "test/one")
|
li, err = runInstance.list(t, rootFs, "test/one")
|
||||||
@@ -512,7 +512,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
|||||||
}
|
}
|
||||||
if len(li) != 0 {
|
if len(li) != 0 {
|
||||||
log.Printf("not expected listing /test/one: %v", li)
|
log.Printf("not expected listing /test/one: %v", li)
|
||||||
return errors.Errorf("not expected listing /test/one: %v", li)
|
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||||
}
|
}
|
||||||
|
|
||||||
li, err = runInstance.list(t, rootFs, "test/second")
|
li, err = runInstance.list(t, rootFs, "test/second")
|
||||||
@@ -522,21 +522,21 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
|||||||
}
|
}
|
||||||
if len(li) != 1 {
|
if len(li) != 1 {
|
||||||
log.Printf("not expected listing /test/second: %v", li)
|
log.Printf("not expected listing /test/second: %v", li)
|
||||||
return errors.Errorf("not expected listing /test/second: %v", li)
|
return fmt.Errorf("not expected listing /test/second: %v", li)
|
||||||
}
|
}
|
||||||
if fi, ok := li[0].(os.FileInfo); ok {
|
if fi, ok := li[0].(os.FileInfo); ok {
|
||||||
if fi.Name() != "data.bin" {
|
if fi.Name() != "data.bin" {
|
||||||
log.Printf("not expected name: %v", fi.Name())
|
log.Printf("not expected name: %v", fi.Name())
|
||||||
return errors.Errorf("not expected name: %v", fi.Name())
|
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||||
}
|
}
|
||||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||||
if di.Remote() != "test/second/data.bin" {
|
if di.Remote() != "test/second/data.bin" {
|
||||||
log.Printf("not expected remote: %v", di.Remote())
|
log.Printf("not expected remote: %v", di.Remote())
|
||||||
return errors.Errorf("not expected remote: %v", di.Remote())
|
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Printf("unexpected listing: %v", li)
|
log.Printf("unexpected listing: %v", li)
|
||||||
return errors.Errorf("unexpected listing: %v", li)
|
return fmt.Errorf("unexpected listing: %v", li)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("complete listing: %v", li)
|
log.Printf("complete listing: %v", li)
|
||||||
@@ -591,17 +591,17 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
|||||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||||
if !found {
|
if !found {
|
||||||
log.Printf("not found /test")
|
log.Printf("not found /test")
|
||||||
return errors.Errorf("not found /test")
|
return fmt.Errorf("not found /test")
|
||||||
}
|
}
|
||||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||||
if !found {
|
if !found {
|
||||||
log.Printf("not found /test/one")
|
log.Printf("not found /test/one")
|
||||||
return errors.Errorf("not found /test/one")
|
return fmt.Errorf("not found /test/one")
|
||||||
}
|
}
|
||||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
||||||
if !found {
|
if !found {
|
||||||
log.Printf("not found /test/one/test2")
|
log.Printf("not found /test/one/test2")
|
||||||
return errors.Errorf("not found /test/one/test2")
|
return fmt.Errorf("not found /test/one/test2")
|
||||||
}
|
}
|
||||||
li, err := runInstance.list(t, rootFs, "test/one")
|
li, err := runInstance.list(t, rootFs, "test/one")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -610,21 +610,21 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
|||||||
}
|
}
|
||||||
if len(li) != 1 {
|
if len(li) != 1 {
|
||||||
log.Printf("not expected listing /test/one: %v", li)
|
log.Printf("not expected listing /test/one: %v", li)
|
||||||
return errors.Errorf("not expected listing /test/one: %v", li)
|
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||||
}
|
}
|
||||||
if fi, ok := li[0].(os.FileInfo); ok {
|
if fi, ok := li[0].(os.FileInfo); ok {
|
||||||
if fi.Name() != "test2" {
|
if fi.Name() != "test2" {
|
||||||
log.Printf("not expected name: %v", fi.Name())
|
log.Printf("not expected name: %v", fi.Name())
|
||||||
return errors.Errorf("not expected name: %v", fi.Name())
|
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||||
}
|
}
|
||||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||||
if di.Remote() != "test/one/test2" {
|
if di.Remote() != "test/one/test2" {
|
||||||
log.Printf("not expected remote: %v", di.Remote())
|
log.Printf("not expected remote: %v", di.Remote())
|
||||||
return errors.Errorf("not expected remote: %v", di.Remote())
|
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Printf("unexpected listing: %v", li)
|
log.Printf("unexpected listing: %v", li)
|
||||||
return errors.Errorf("unexpected listing: %v", li)
|
return fmt.Errorf("unexpected listing: %v", li)
|
||||||
}
|
}
|
||||||
log.Printf("complete listing /test/one/test2")
|
log.Printf("complete listing /test/one/test2")
|
||||||
return nil
|
return nil
|
||||||
@@ -1062,7 +1062,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
|||||||
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
|
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
|
||||||
|
|
||||||
if !noLengthCheck && size != int64(len(checkSample)) {
|
if !noLengthCheck && size != int64(len(checkSample)) {
|
||||||
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
|
return checkSample, fmt.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
|
||||||
}
|
}
|
||||||
return checkSample, nil
|
return checkSample, nil
|
||||||
}
|
}
|
||||||
@@ -1257,7 +1257,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
|
|||||||
case state = <-buCh:
|
case state = <-buCh:
|
||||||
// continue
|
// continue
|
||||||
case <-time.After(maxDuration):
|
case <-time.After(maxDuration):
|
||||||
waitCh <- errors.Errorf("Timed out waiting for background upload: %v", remote)
|
waitCh <- fmt.Errorf("Timed out waiting for background upload: %v", remote)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
checkRemote := state.Remote
|
checkRemote := state.Remote
|
||||||
@@ -1274,7 +1274,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
waitCh <- errors.Errorf("Too many attempts to wait for the background upload: %v", remote)
|
waitCh <- fmt.Errorf("Too many attempts to wait for the background upload: %v", remote)
|
||||||
}()
|
}()
|
||||||
return waitCh
|
return waitCh
|
||||||
}
|
}
|
||||||
|
|||||||
6
backend/cache/handle.go
vendored
6
backend/cache/handle.go
vendored
@@ -5,6 +5,7 @@ package cache
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
@@ -13,7 +14,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
)
|
)
|
||||||
@@ -243,7 +243,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||||||
return nil, io.ErrUnexpectedEOF
|
return nil, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, errors.Errorf("chunk not found %v", chunkStart)
|
return nil, fmt.Errorf("chunk not found %v", chunkStart)
|
||||||
}
|
}
|
||||||
|
|
||||||
// first chunk will be aligned with the start
|
// first chunk will be aligned with the start
|
||||||
@@ -323,7 +323,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
|||||||
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
|
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
|
||||||
r.offset = r.cachedObject.Size() + offset
|
r.offset = r.cachedObject.Size() + offset
|
||||||
default:
|
default:
|
||||||
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
|
err = fmt.Errorf("cache: unimplemented seek whence %v", whence)
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||||
|
|||||||
14
backend/cache/object.go
vendored
14
backend/cache/object.go
vendored
@@ -5,12 +5,12 @@ package cache
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
@@ -178,10 +178,14 @@ func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
|
|||||||
}
|
}
|
||||||
if o.isTempFile() {
|
if o.isTempFile() {
|
||||||
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
|
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
|
||||||
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
|
if err != nil {
|
||||||
|
err = fmt.Errorf("in parent fs %v: %w", o.ParentFs, err)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
|
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
|
||||||
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
|
if err != nil {
|
||||||
|
err = fmt.Errorf("in cache fs %v: %w", o.CacheFs.Fs, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "error refreshing object in : %v", err)
|
fs.Errorf(o, "error refreshing object in : %v", err)
|
||||||
@@ -253,7 +257,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
defer o.CacheFs.backgroundRunner.play()
|
defer o.CacheFs.backgroundRunner.play()
|
||||||
// don't allow started uploads
|
// don't allow started uploads
|
||||||
if o.isTempFile() && o.tempFileStartedUpload() {
|
if o.isTempFile() && o.tempFileStartedUpload() {
|
||||||
return errors.Errorf("%v is currently uploading, can't update", o)
|
return fmt.Errorf("%v is currently uploading, can't update", o)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fs.Debugf(o, "updating object contents with size %v", src.Size())
|
fs.Debugf(o, "updating object contents with size %v", src.Size())
|
||||||
@@ -292,7 +296,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
|||||||
defer o.CacheFs.backgroundRunner.play()
|
defer o.CacheFs.backgroundRunner.play()
|
||||||
// don't allow started uploads
|
// don't allow started uploads
|
||||||
if o.isTempFile() && o.tempFileStartedUpload() {
|
if o.isTempFile() && o.tempFileStartedUpload() {
|
||||||
return errors.Errorf("%v is currently uploading, can't delete", o)
|
return fmt.Errorf("%v is currently uploading, can't delete", o)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err := o.Object.Remove(ctx)
|
err := o.Object.Remove(ctx)
|
||||||
|
|||||||
4
backend/cache/storage_memory.go
vendored
4
backend/cache/storage_memory.go
vendored
@@ -4,12 +4,12 @@
|
|||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cache "github.com/patrickmn/go-cache"
|
cache "github.com/patrickmn/go-cache"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -53,7 +53,7 @@ func (m *Memory) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
|
|||||||
return data, nil
|
return data, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, errors.Errorf("couldn't get cached object data at offset %v", offset)
|
return nil, fmt.Errorf("couldn't get cached object data at offset %v", offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddChunk adds a new chunk of a cached object
|
// AddChunk adds a new chunk of a cached object
|
||||||
|
|||||||
83
backend/cache/storage_persistent.go
vendored
83
backend/cache/storage_persistent.go
vendored
@@ -17,7 +17,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/walk"
|
"github.com/rclone/rclone/fs/walk"
|
||||||
bolt "go.etcd.io/bbolt"
|
bolt "go.etcd.io/bbolt"
|
||||||
@@ -120,11 +119,11 @@ func (b *Persistent) connect() error {
|
|||||||
|
|
||||||
err = os.MkdirAll(b.dataPath, os.ModePerm)
|
err = os.MkdirAll(b.dataPath, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
|
return fmt.Errorf("failed to create a data directory %q: %w", b.dataPath, err)
|
||||||
}
|
}
|
||||||
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
|
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
|
return fmt.Errorf("failed to open a cache connection to %q: %w", b.dbPath, err)
|
||||||
}
|
}
|
||||||
if b.features.PurgeDb {
|
if b.features.PurgeDb {
|
||||||
b.Purge()
|
b.Purge()
|
||||||
@@ -176,7 +175,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
|
|||||||
err := b.db.View(func(tx *bolt.Tx) error {
|
err := b.db.View(func(tx *bolt.Tx) error {
|
||||||
bucket := b.getBucket(remote, false, tx)
|
bucket := b.getBucket(remote, false, tx)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return errors.Errorf("couldn't open bucket (%v)", remote)
|
return fmt.Errorf("couldn't open bucket (%v)", remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
data := bucket.Get([]byte("."))
|
data := bucket.Get([]byte("."))
|
||||||
@@ -184,7 +183,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
|
|||||||
return json.Unmarshal(data, cd)
|
return json.Unmarshal(data, cd)
|
||||||
}
|
}
|
||||||
|
|
||||||
return errors.Errorf("%v not found", remote)
|
return fmt.Errorf("%v not found", remote)
|
||||||
})
|
})
|
||||||
|
|
||||||
return cd, err
|
return cd, err
|
||||||
@@ -209,7 +208,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
|
|||||||
bucket = b.getBucket(cachedDirs[0].Dir, true, tx)
|
bucket = b.getBucket(cachedDirs[0].Dir, true, tx)
|
||||||
}
|
}
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return errors.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
|
return fmt.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, cachedDir := range cachedDirs {
|
for _, cachedDir := range cachedDirs {
|
||||||
@@ -226,7 +225,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
|
|||||||
|
|
||||||
encoded, err := json.Marshal(cachedDir)
|
encoded, err := json.Marshal(cachedDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
|
return fmt.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
|
||||||
}
|
}
|
||||||
err = b.Put([]byte("."), encoded)
|
err = b.Put([]byte("."), encoded)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -244,17 +243,17 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
|
|||||||
err := b.db.View(func(tx *bolt.Tx) error {
|
err := b.db.View(func(tx *bolt.Tx) error {
|
||||||
bucket := b.getBucket(cachedDir.abs(), false, tx)
|
bucket := b.getBucket(cachedDir.abs(), false, tx)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return errors.Errorf("couldn't open bucket (%v)", cachedDir.abs())
|
return fmt.Errorf("couldn't open bucket (%v)", cachedDir.abs())
|
||||||
}
|
}
|
||||||
|
|
||||||
val := bucket.Get([]byte("."))
|
val := bucket.Get([]byte("."))
|
||||||
if val != nil {
|
if val != nil {
|
||||||
err := json.Unmarshal(val, cachedDir)
|
err := json.Unmarshal(val, cachedDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("error during unmarshalling obj: %v", err)
|
return fmt.Errorf("error during unmarshalling obj: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return errors.Errorf("missing cached dir: %v", cachedDir)
|
return fmt.Errorf("missing cached dir: %v", cachedDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
c := bucket.Cursor()
|
c := bucket.Cursor()
|
||||||
@@ -269,7 +268,7 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
|
|||||||
// we try to find a cached meta for the dir
|
// we try to find a cached meta for the dir
|
||||||
currentBucket := c.Bucket().Bucket(k)
|
currentBucket := c.Bucket().Bucket(k)
|
||||||
if currentBucket == nil {
|
if currentBucket == nil {
|
||||||
return errors.Errorf("couldn't open bucket (%v)", string(k))
|
return fmt.Errorf("couldn't open bucket (%v)", string(k))
|
||||||
}
|
}
|
||||||
|
|
||||||
metaKey := currentBucket.Get([]byte("."))
|
metaKey := currentBucket.Get([]byte("."))
|
||||||
@@ -318,7 +317,7 @@ func (b *Persistent) RemoveDir(fp string) error {
|
|||||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return errors.Errorf("couldn't open bucket (%v)", fp)
|
return fmt.Errorf("couldn't open bucket (%v)", fp)
|
||||||
}
|
}
|
||||||
// delete the cached dir
|
// delete the cached dir
|
||||||
err := bucket.DeleteBucket([]byte(cleanPath(dirName)))
|
err := bucket.DeleteBucket([]byte(cleanPath(dirName)))
|
||||||
@@ -378,13 +377,13 @@ func (b *Persistent) GetObject(cachedObject *Object) (err error) {
|
|||||||
return b.db.View(func(tx *bolt.Tx) error {
|
return b.db.View(func(tx *bolt.Tx) error {
|
||||||
bucket := b.getBucket(cachedObject.Dir, false, tx)
|
bucket := b.getBucket(cachedObject.Dir, false, tx)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return errors.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
|
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
|
||||||
}
|
}
|
||||||
val := bucket.Get([]byte(cachedObject.Name))
|
val := bucket.Get([]byte(cachedObject.Name))
|
||||||
if val != nil {
|
if val != nil {
|
||||||
return json.Unmarshal(val, cachedObject)
|
return json.Unmarshal(val, cachedObject)
|
||||||
}
|
}
|
||||||
return errors.Errorf("couldn't find object (%v)", cachedObject.Name)
|
return fmt.Errorf("couldn't find object (%v)", cachedObject.Name)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -393,16 +392,16 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
|
|||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket := b.getBucket(cachedObject.Dir, true, tx)
|
bucket := b.getBucket(cachedObject.Dir, true, tx)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return errors.Errorf("couldn't open parent bucket for %v", cachedObject)
|
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject)
|
||||||
}
|
}
|
||||||
// cache Object Info
|
// cache Object Info
|
||||||
encoded, err := json.Marshal(cachedObject)
|
encoded, err := json.Marshal(cachedObject)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
return fmt.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(cachedObject.Name), encoded)
|
err = bucket.Put([]byte(cachedObject.Name), encoded)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
return fmt.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@@ -414,7 +413,7 @@ func (b *Persistent) RemoveObject(fp string) error {
|
|||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return errors.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
|
return fmt.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
|
||||||
}
|
}
|
||||||
err := bucket.Delete([]byte(cleanPath(objName)))
|
err := bucket.Delete([]byte(cleanPath(objName)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -446,7 +445,7 @@ func (b *Persistent) HasEntry(remote string) bool {
|
|||||||
err := b.db.View(func(tx *bolt.Tx) error {
|
err := b.db.View(func(tx *bolt.Tx) error {
|
||||||
bucket := b.getBucket(dir, false, tx)
|
bucket := b.getBucket(dir, false, tx)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return errors.Errorf("couldn't open parent bucket for %v", remote)
|
return fmt.Errorf("couldn't open parent bucket for %v", remote)
|
||||||
}
|
}
|
||||||
if f := bucket.Bucket([]byte(name)); f != nil {
|
if f := bucket.Bucket([]byte(name)); f != nil {
|
||||||
return nil
|
return nil
|
||||||
@@ -455,7 +454,7 @@ func (b *Persistent) HasEntry(remote string) bool {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return errors.Errorf("couldn't find object (%v)", remote)
|
return fmt.Errorf("couldn't find object (%v)", remote)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return true
|
return true
|
||||||
@@ -555,7 +554,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
|||||||
err := b.db.Update(func(tx *bolt.Tx) error {
|
err := b.db.Update(func(tx *bolt.Tx) error {
|
||||||
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
|
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
|
||||||
if dataTsBucket == nil {
|
if dataTsBucket == nil {
|
||||||
return errors.Errorf("Couldn't open (%v) bucket", DataTsBucket)
|
return fmt.Errorf("Couldn't open (%v) bucket", DataTsBucket)
|
||||||
}
|
}
|
||||||
// iterate through ts
|
// iterate through ts
|
||||||
c := dataTsBucket.Cursor()
|
c := dataTsBucket.Cursor()
|
||||||
@@ -733,7 +732,7 @@ func (b *Persistent) GetChunkTs(path string, offset int64) (time.Time, error) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return errors.Errorf("not found %v-%v", path, offset)
|
return fmt.Errorf("not found %v-%v", path, offset)
|
||||||
})
|
})
|
||||||
|
|
||||||
return t, err
|
return t, err
|
||||||
@@ -773,7 +772,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
|||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||||
}
|
}
|
||||||
tempObj := &tempUploadInfo{
|
tempObj := &tempUploadInfo{
|
||||||
DestPath: destPath,
|
DestPath: destPath,
|
||||||
@@ -784,11 +783,11 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
|||||||
// cache Object Info
|
// cache Object Info
|
||||||
encoded, err := json.Marshal(tempObj)
|
encoded, err := json.Marshal(tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
return fmt.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(destPath), encoded)
|
err = bucket.Put([]byte(destPath), encoded)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -803,7 +802,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
|
|||||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
c := bucket.Cursor()
|
c := bucket.Cursor()
|
||||||
@@ -836,7 +835,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return errors.Errorf("no pending upload found")
|
return fmt.Errorf("no pending upload found")
|
||||||
})
|
})
|
||||||
|
|
||||||
return destPath, err
|
return destPath, err
|
||||||
@@ -847,14 +846,14 @@ func (b *Persistent) SearchPendingUpload(remote string) (started bool, err error
|
|||||||
err = b.db.View(func(tx *bolt.Tx) error {
|
err = b.db.View(func(tx *bolt.Tx) error {
|
||||||
bucket := tx.Bucket([]byte(tempBucket))
|
bucket := tx.Bucket([]byte(tempBucket))
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
var tempObj = &tempUploadInfo{}
|
var tempObj = &tempUploadInfo{}
|
||||||
v := bucket.Get([]byte(remote))
|
v := bucket.Get([]byte(remote))
|
||||||
err = json.Unmarshal(v, tempObj)
|
err = json.Unmarshal(v, tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("pending upload (%v) not found %v", remote, err)
|
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
started = tempObj.Started
|
started = tempObj.Started
|
||||||
@@ -869,7 +868,7 @@ func (b *Persistent) searchPendingUploadFromDir(dir string) (remotes []string, e
|
|||||||
err = b.db.View(func(tx *bolt.Tx) error {
|
err = b.db.View(func(tx *bolt.Tx) error {
|
||||||
bucket := tx.Bucket([]byte(tempBucket))
|
bucket := tx.Bucket([]byte(tempBucket))
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
c := bucket.Cursor()
|
c := bucket.Cursor()
|
||||||
@@ -899,22 +898,22 @@ func (b *Persistent) rollbackPendingUpload(remote string) error {
|
|||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||||
}
|
}
|
||||||
var tempObj = &tempUploadInfo{}
|
var tempObj = &tempUploadInfo{}
|
||||||
v := bucket.Get([]byte(remote))
|
v := bucket.Get([]byte(remote))
|
||||||
err = json.Unmarshal(v, tempObj)
|
err = json.Unmarshal(v, tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("pending upload (%v) not found %v", remote, err)
|
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
|
||||||
}
|
}
|
||||||
tempObj.Started = false
|
tempObj.Started = false
|
||||||
v2, err := json.Marshal(tempObj)
|
v2, err := json.Marshal(tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("pending upload not updated %v", err)
|
return fmt.Errorf("pending upload not updated %v", err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("pending upload not updated %v", err)
|
return fmt.Errorf("pending upload not updated %v", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@@ -927,7 +926,7 @@ func (b *Persistent) removePendingUpload(remote string) error {
|
|||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||||
}
|
}
|
||||||
return bucket.Delete([]byte(remote))
|
return bucket.Delete([]byte(remote))
|
||||||
})
|
})
|
||||||
@@ -942,17 +941,17 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
|
|||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
var tempObj = &tempUploadInfo{}
|
var tempObj = &tempUploadInfo{}
|
||||||
v := bucket.Get([]byte(remote))
|
v := bucket.Get([]byte(remote))
|
||||||
err = json.Unmarshal(v, tempObj)
|
err = json.Unmarshal(v, tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("pending upload (%v) not found %v", remote, err)
|
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
|
||||||
}
|
}
|
||||||
if tempObj.Started {
|
if tempObj.Started {
|
||||||
return errors.Errorf("pending upload already started %v", remote)
|
return fmt.Errorf("pending upload already started %v", remote)
|
||||||
}
|
}
|
||||||
err = fn(tempObj)
|
err = fn(tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -970,11 +969,11 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
|
|||||||
}
|
}
|
||||||
v2, err := json.Marshal(tempObj)
|
v2, err := json.Marshal(tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("pending upload not updated %v", err)
|
return fmt.Errorf("pending upload not updated %v", err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("pending upload not updated %v", err)
|
return fmt.Errorf("pending upload not updated %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -1015,11 +1014,11 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
|
|||||||
// cache Object Info
|
// cache Object Info
|
||||||
encoded, err := json.Marshal(tempObj)
|
encoded, err := json.Marshal(tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
return fmt.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(destPath), encoded)
|
err = bucket.Put([]byte(destPath), encoded)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||||
}
|
}
|
||||||
fs.Debugf(cacheFs, "reconciled temporary upload: %v", destPath)
|
fs.Debugf(cacheFs, "reconciled temporary upload: %v", destPath)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
gohash "hash"
|
gohash "hash"
|
||||||
"io"
|
"io"
|
||||||
@@ -21,7 +22,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/cache"
|
"github.com/rclone/rclone/fs/cache"
|
||||||
@@ -290,13 +290,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
|
|
||||||
baseName, basePath, err := fspath.SplitFs(remote)
|
baseName, basePath, err := fspath.SplitFs(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err)
|
||||||
}
|
}
|
||||||
// Look for a file first
|
// Look for a file first
|
||||||
remotePath := fspath.JoinRootPath(basePath, rpath)
|
remotePath := fspath.JoinRootPath(basePath, rpath)
|
||||||
baseFs, err := cache.Get(ctx, baseName+remotePath)
|
baseFs, err := cache.Get(ctx, baseName+remotePath)
|
||||||
if err != fs.ErrorIsFile && err != nil {
|
if err != fs.ErrorIsFile && err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", baseName+remotePath)
|
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", baseName+remotePath, err)
|
||||||
}
|
}
|
||||||
if !operations.CanServerSideMove(baseFs) {
|
if !operations.CanServerSideMove(baseFs) {
|
||||||
return nil, errors.New("can't use chunker on a backend which doesn't support server-side move or copy")
|
return nil, errors.New("can't use chunker on a backend which doesn't support server-side move or copy")
|
||||||
@@ -386,7 +386,7 @@ type Fs struct {
|
|||||||
// configure must be called only from NewFs or by unit tests.
|
// configure must be called only from NewFs or by unit tests.
|
||||||
func (f *Fs) configure(nameFormat, metaFormat, hashType, transactionMode string) error {
|
func (f *Fs) configure(nameFormat, metaFormat, hashType, transactionMode string) error {
|
||||||
if err := f.setChunkNameFormat(nameFormat); err != nil {
|
if err := f.setChunkNameFormat(nameFormat); err != nil {
|
||||||
return errors.Wrapf(err, "invalid name format '%s'", nameFormat)
|
return fmt.Errorf("invalid name format '%s': %w", nameFormat, err)
|
||||||
}
|
}
|
||||||
if err := f.setMetaFormat(metaFormat); err != nil {
|
if err := f.setMetaFormat(metaFormat); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -878,7 +878,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
// ignores non-chunked objects and skips chunk size checks.
|
// ignores non-chunked objects and skips chunk size checks.
|
||||||
func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.Object, error) {
|
func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.Object, error) {
|
||||||
if err := f.forbidChunk(false, remote); err != nil {
|
if err := f.forbidChunk(false, remote); err != nil {
|
||||||
return nil, errors.Wrap(err, "can't access")
|
return nil, fmt.Errorf("can't access: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -927,7 +927,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
|||||||
case fs.ErrorDirNotFound:
|
case fs.ErrorDirNotFound:
|
||||||
entries = nil
|
entries = nil
|
||||||
default:
|
default:
|
||||||
return nil, errors.Wrap(err, "can't detect composite file")
|
return nil, fmt.Errorf("can't detect composite file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.useNoRename {
|
if f.useNoRename {
|
||||||
@@ -1067,7 +1067,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
|||||||
case ErrMetaTooBig, ErrMetaUnknown:
|
case ErrMetaTooBig, ErrMetaUnknown:
|
||||||
return err // return these errors unwrapped for unit tests
|
return err // return these errors unwrapped for unit tests
|
||||||
default:
|
default:
|
||||||
return errors.Wrap(err, "invalid metadata")
|
return fmt.Errorf("invalid metadata: %w", err)
|
||||||
}
|
}
|
||||||
if o.size != metaInfo.Size() || len(o.chunks) != metaInfo.nChunks {
|
if o.size != metaInfo.Size() || len(o.chunks) != metaInfo.nChunks {
|
||||||
return errors.New("metadata doesn't match file size")
|
return errors.New("metadata doesn't match file size")
|
||||||
@@ -1132,7 +1132,7 @@ func (f *Fs) put(
|
|||||||
|
|
||||||
// Perform consistency checks
|
// Perform consistency checks
|
||||||
if err := f.forbidChunk(src, remote); err != nil {
|
if err := f.forbidChunk(src, remote); err != nil {
|
||||||
return nil, errors.Wrap(err, action+" refused")
|
return nil, fmt.Errorf("%s refused: %w", action, err)
|
||||||
}
|
}
|
||||||
if target == nil {
|
if target == nil {
|
||||||
// Get target object with a quick directory scan
|
// Get target object with a quick directory scan
|
||||||
@@ -1146,7 +1146,7 @@ func (f *Fs) put(
|
|||||||
obj := target.(*Object)
|
obj := target.(*Object)
|
||||||
if err := obj.readMetadata(ctx); err == ErrMetaUnknown {
|
if err := obj.readMetadata(ctx); err == ErrMetaUnknown {
|
||||||
// refuse to update a file of unsupported format
|
// refuse to update a file of unsupported format
|
||||||
return nil, errors.Wrap(err, "refusing to "+action)
|
return nil, fmt.Errorf("refusing to %s: %w", action, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1564,7 +1564,7 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
// Shouldn't return an error if it already exists
|
// Shouldn't return an error if it already exists
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
if err := f.forbidChunk(dir, dir); err != nil {
|
if err := f.forbidChunk(dir, dir); err != nil {
|
||||||
return errors.Wrap(err, "can't mkdir")
|
return fmt.Errorf("can't mkdir: %w", err)
|
||||||
}
|
}
|
||||||
return f.base.Mkdir(ctx, dir)
|
return f.base.Mkdir(ctx, dir)
|
||||||
}
|
}
|
||||||
@@ -1633,7 +1633,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
|||||||
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
|
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
|
||||||
// operations.Move can still call Remove if chunker's Move refuses
|
// operations.Move can still call Remove if chunker's Move refuses
|
||||||
// to corrupt file in hard mode. Hence, refuse to Remove, too.
|
// to corrupt file in hard mode. Hence, refuse to Remove, too.
|
||||||
return errors.Wrap(err, "refuse to corrupt")
|
return fmt.Errorf("refuse to corrupt: %w", err)
|
||||||
}
|
}
|
||||||
if err := o.readMetadata(ctx); err == ErrMetaUnknown {
|
if err := o.readMetadata(ctx); err == ErrMetaUnknown {
|
||||||
// Proceed but warn user that unexpected things can happen.
|
// Proceed but warn user that unexpected things can happen.
|
||||||
@@ -1661,12 +1661,12 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
|||||||
// copyOrMove implements copy or move
|
// copyOrMove implements copy or move
|
||||||
func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMoveFn, md5, sha1, opName string) (fs.Object, error) {
|
func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMoveFn, md5, sha1, opName string) (fs.Object, error) {
|
||||||
if err := f.forbidChunk(o, remote); err != nil {
|
if err := f.forbidChunk(o, remote); err != nil {
|
||||||
return nil, errors.Wrapf(err, "can't %s", opName)
|
return nil, fmt.Errorf("can't %s: %w", opName, err)
|
||||||
}
|
}
|
||||||
if err := o.readMetadata(ctx); err != nil {
|
if err := o.readMetadata(ctx); err != nil {
|
||||||
// Refuse to copy/move composite files with invalid or future
|
// Refuse to copy/move composite files with invalid or future
|
||||||
// metadata format which might involve unsupported chunk types.
|
// metadata format which might involve unsupported chunk types.
|
||||||
return nil, errors.Wrapf(err, "can't %s this file", opName)
|
return nil, fmt.Errorf("can't %s this file: %w", opName, err)
|
||||||
}
|
}
|
||||||
if !o.isComposite() {
|
if !o.isComposite() {
|
||||||
fs.Debugf(o, "%s non-chunked object...", opName)
|
fs.Debugf(o, "%s non-chunked object...", opName)
|
||||||
@@ -2163,7 +2163,7 @@ func (o *Object) UnWrap() fs.Object {
|
|||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||||
if err := o.readMetadata(ctx); err != nil {
|
if err := o.readMetadata(ctx); err != nil {
|
||||||
// refuse to open unsupported format
|
// refuse to open unsupported format
|
||||||
return nil, errors.Wrap(err, "can't open")
|
return nil, fmt.Errorf("can't open: %w", err)
|
||||||
}
|
}
|
||||||
if !o.isComposite() {
|
if !o.isComposite() {
|
||||||
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
|
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
|
||||||
|
|||||||
@@ -44,6 +44,7 @@ func TestIntegration(t *testing.T) {
|
|||||||
"UserInfo",
|
"UserInfo",
|
||||||
"Disconnect",
|
"Disconnect",
|
||||||
},
|
},
|
||||||
|
QuickTestOK: true,
|
||||||
}
|
}
|
||||||
if *fstest.RemoteName == "" {
|
if *fstest.RemoteName == "" {
|
||||||
name := "TestChunker"
|
name := "TestChunker"
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@@ -21,7 +22,6 @@ import (
|
|||||||
"github.com/buengese/sgzip"
|
"github.com/buengese/sgzip"
|
||||||
"github.com/gabriel-vasile/mimetype"
|
"github.com/gabriel-vasile/mimetype"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/chunkedreader"
|
"github.com/rclone/rclone/fs/chunkedreader"
|
||||||
@@ -143,7 +143,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
|
|
||||||
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
|
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Strip trailing slashes if they exist in rpath
|
// Strip trailing slashes if they exist in rpath
|
||||||
@@ -158,7 +158,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
wrappedFs, err = wInfo.NewFs(ctx, wName, remotePath, wConfig)
|
wrappedFs, err = wInfo.NewFs(ctx, wName, remotePath, wConfig)
|
||||||
}
|
}
|
||||||
if err != nil && err != fs.ErrorIsFile {
|
if err != nil && err != fs.ErrorIsFile {
|
||||||
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
|
return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the wrapping fs
|
// Create the wrapping fs
|
||||||
@@ -304,7 +304,7 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
|
|||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
f.addDir(&newEntries, x)
|
f.addDir(&newEntries, x)
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("Unknown object type %T", entry)
|
return nil, fmt.Errorf("Unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return newEntries, nil
|
return newEntries, nil
|
||||||
@@ -410,7 +410,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
|
|||||||
srcHash := hasher.Sums()[ht]
|
srcHash := hasher.Sums()[ht]
|
||||||
dstHash, err := o.Hash(ctx, ht)
|
dstHash, err := o.Hash(ctx, ht)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to read destination hash")
|
return fmt.Errorf("failed to read destination hash: %w", err)
|
||||||
}
|
}
|
||||||
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
||||||
// remove object
|
// remove object
|
||||||
@@ -418,7 +418,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||||
}
|
}
|
||||||
return errors.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
|
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -462,10 +462,10 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
|||||||
_ = os.Remove(tempFile.Name())
|
_ = os.Remove(tempFile.Name())
|
||||||
}()
|
}()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Failed to create temporary local FS to spool file")
|
return nil, fmt.Errorf("Failed to create temporary local FS to spool file: %w", err)
|
||||||
}
|
}
|
||||||
if _, err = io.Copy(tempFile, in); err != nil {
|
if _, err = io.Copy(tempFile, in); err != nil {
|
||||||
return nil, errors.Wrap(err, "Failed to write temporary local file")
|
return nil, fmt.Errorf("Failed to write temporary local file: %w", err)
|
||||||
}
|
}
|
||||||
if _, err = tempFile.Seek(0, 0); err != nil {
|
if _, err = tempFile.Seek(0, 0); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -714,7 +714,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) {
|
if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) {
|
||||||
err = oldObj.(*Object).Object.Remove(ctx)
|
err = oldObj.(*Object).Object.Remove(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Could remove original object")
|
return nil, fmt.Errorf("Could remove original object: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -723,7 +723,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
if compressible {
|
if compressible {
|
||||||
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
|
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Couldn't rename streamed Object.")
|
return nil, fmt.Errorf("Couldn't rename streamed Object.: %w", err)
|
||||||
}
|
}
|
||||||
newObj.Object = wrapObj
|
newObj.Object = wrapObj
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,6 +16,9 @@ import (
|
|||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
|
if *fstest.RemoteName == "" {
|
||||||
|
t.Skip("Skipping as -remote not set")
|
||||||
|
}
|
||||||
opt := fstests.Opt{
|
opt := fstests.Opt{
|
||||||
RemoteName: *fstest.RemoteName,
|
RemoteName: *fstest.RemoteName,
|
||||||
NilObject: (*Object)(nil),
|
NilObject: (*Object)(nil),
|
||||||
@@ -61,5 +64,6 @@ func TestRemoteGzip(t *testing.T) {
|
|||||||
{Name: name, Key: "remote", Value: tempdir},
|
{Name: name, Key: "remote", Value: tempdir},
|
||||||
{Name: name, Key: "compression_mode", Value: "gzip"},
|
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||||
},
|
},
|
||||||
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,8 @@ import (
|
|||||||
gocipher "crypto/cipher"
|
gocipher "crypto/cipher"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/base32"
|
"encoding/base32"
|
||||||
|
"encoding/base64"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -15,7 +17,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/Max-Sum/base32768"
|
||||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
@@ -94,12 +96,12 @@ func NewNameEncryptionMode(s string) (mode NameEncryptionMode, err error) {
|
|||||||
case "obfuscate":
|
case "obfuscate":
|
||||||
mode = NameEncryptionObfuscated
|
mode = NameEncryptionObfuscated
|
||||||
default:
|
default:
|
||||||
err = errors.Errorf("Unknown file name encryption mode %q", s)
|
err = fmt.Errorf("Unknown file name encryption mode %q", s)
|
||||||
}
|
}
|
||||||
return mode, err
|
return mode, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// String turns mode into a human readable string
|
// String turns mode into a human-readable string
|
||||||
func (mode NameEncryptionMode) String() (out string) {
|
func (mode NameEncryptionMode) String() (out string) {
|
||||||
switch mode {
|
switch mode {
|
||||||
case NameEncryptionOff:
|
case NameEncryptionOff:
|
||||||
@@ -114,6 +116,57 @@ func (mode NameEncryptionMode) String() (out string) {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// fileNameEncoding are the encoding methods dealing with encrypted file names
|
||||||
|
type fileNameEncoding interface {
|
||||||
|
EncodeToString(src []byte) string
|
||||||
|
DecodeString(s string) ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// caseInsensitiveBase32Encoding defines a file name encoding
|
||||||
|
// using a modified version of standard base32 as described in
|
||||||
|
// RFC4648
|
||||||
|
//
|
||||||
|
// The standard encoding is modified in two ways
|
||||||
|
// * it becomes lower case (no-one likes upper case filenames!)
|
||||||
|
// * we strip the padding character `=`
|
||||||
|
type caseInsensitiveBase32Encoding struct{}
|
||||||
|
|
||||||
|
// EncodeToString encodes a strign using the modified version of
|
||||||
|
// base32 encoding.
|
||||||
|
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
|
||||||
|
encoded := base32.HexEncoding.EncodeToString(src)
|
||||||
|
encoded = strings.TrimRight(encoded, "=")
|
||||||
|
return strings.ToLower(encoded)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeString decodes a string as encoded by EncodeToString
|
||||||
|
func (caseInsensitiveBase32Encoding) DecodeString(s string) ([]byte, error) {
|
||||||
|
if strings.HasSuffix(s, "=") {
|
||||||
|
return nil, ErrorBadBase32Encoding
|
||||||
|
}
|
||||||
|
// First figure out how many padding characters to add
|
||||||
|
roundUpToMultipleOf8 := (len(s) + 7) &^ 7
|
||||||
|
equals := roundUpToMultipleOf8 - len(s)
|
||||||
|
s = strings.ToUpper(s) + "========"[:equals]
|
||||||
|
return base32.HexEncoding.DecodeString(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNameEncoding creates a NameEncoding from a string
|
||||||
|
func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
|
||||||
|
s = strings.ToLower(s)
|
||||||
|
switch s {
|
||||||
|
case "base32":
|
||||||
|
enc = caseInsensitiveBase32Encoding{}
|
||||||
|
case "base64":
|
||||||
|
enc = base64.RawURLEncoding
|
||||||
|
case "base32768":
|
||||||
|
enc = base32768.SafeEncoding
|
||||||
|
default:
|
||||||
|
err = fmt.Errorf("Unknown file name encoding mode %q", s)
|
||||||
|
}
|
||||||
|
return enc, err
|
||||||
|
}
|
||||||
|
|
||||||
// Cipher defines an encoding and decoding cipher for the crypt backend
|
// Cipher defines an encoding and decoding cipher for the crypt backend
|
||||||
type Cipher struct {
|
type Cipher struct {
|
||||||
dataKey [32]byte // Key for secretbox
|
dataKey [32]byte // Key for secretbox
|
||||||
@@ -121,15 +174,17 @@ type Cipher struct {
|
|||||||
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
||||||
block gocipher.Block
|
block gocipher.Block
|
||||||
mode NameEncryptionMode
|
mode NameEncryptionMode
|
||||||
|
fileNameEnc fileNameEncoding
|
||||||
buffers sync.Pool // encrypt/decrypt buffers
|
buffers sync.Pool // encrypt/decrypt buffers
|
||||||
cryptoRand io.Reader // read crypto random numbers from here
|
cryptoRand io.Reader // read crypto random numbers from here
|
||||||
dirNameEncrypt bool
|
dirNameEncrypt bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
||||||
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) {
|
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
|
||||||
c := &Cipher{
|
c := &Cipher{
|
||||||
mode: mode,
|
mode: mode,
|
||||||
|
fileNameEnc: enc,
|
||||||
cryptoRand: rand.Reader,
|
cryptoRand: rand.Reader,
|
||||||
dirNameEncrypt: dirNameEncrypt,
|
dirNameEncrypt: dirNameEncrypt,
|
||||||
}
|
}
|
||||||
@@ -187,30 +242,6 @@ func (c *Cipher) putBlock(buf []byte) {
|
|||||||
c.buffers.Put(buf)
|
c.buffers.Put(buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
// encodeFileName encodes a filename using a modified version of
|
|
||||||
// standard base32 as described in RFC4648
|
|
||||||
//
|
|
||||||
// The standard encoding is modified in two ways
|
|
||||||
// * it becomes lower case (no-one likes upper case filenames!)
|
|
||||||
// * we strip the padding character `=`
|
|
||||||
func encodeFileName(in []byte) string {
|
|
||||||
encoded := base32.HexEncoding.EncodeToString(in)
|
|
||||||
encoded = strings.TrimRight(encoded, "=")
|
|
||||||
return strings.ToLower(encoded)
|
|
||||||
}
|
|
||||||
|
|
||||||
// decodeFileName decodes a filename as encoded by encodeFileName
|
|
||||||
func decodeFileName(in string) ([]byte, error) {
|
|
||||||
if strings.HasSuffix(in, "=") {
|
|
||||||
return nil, ErrorBadBase32Encoding
|
|
||||||
}
|
|
||||||
// First figure out how many padding characters to add
|
|
||||||
roundUpToMultipleOf8 := (len(in) + 7) &^ 7
|
|
||||||
equals := roundUpToMultipleOf8 - len(in)
|
|
||||||
in = strings.ToUpper(in) + "========"[:equals]
|
|
||||||
return base32.HexEncoding.DecodeString(in)
|
|
||||||
}
|
|
||||||
|
|
||||||
// encryptSegment encrypts a path segment
|
// encryptSegment encrypts a path segment
|
||||||
//
|
//
|
||||||
// This uses EME with AES
|
// This uses EME with AES
|
||||||
@@ -231,7 +262,7 @@ func (c *Cipher) encryptSegment(plaintext string) string {
|
|||||||
}
|
}
|
||||||
paddedPlaintext := pkcs7.Pad(nameCipherBlockSize, []byte(plaintext))
|
paddedPlaintext := pkcs7.Pad(nameCipherBlockSize, []byte(plaintext))
|
||||||
ciphertext := eme.Transform(c.block, c.nameTweak[:], paddedPlaintext, eme.DirectionEncrypt)
|
ciphertext := eme.Transform(c.block, c.nameTweak[:], paddedPlaintext, eme.DirectionEncrypt)
|
||||||
return encodeFileName(ciphertext)
|
return c.fileNameEnc.EncodeToString(ciphertext)
|
||||||
}
|
}
|
||||||
|
|
||||||
// decryptSegment decrypts a path segment
|
// decryptSegment decrypts a path segment
|
||||||
@@ -239,7 +270,7 @@ func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
|
|||||||
if ciphertext == "" {
|
if ciphertext == "" {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
rawCiphertext, err := decodeFileName(ciphertext)
|
rawCiphertext, err := c.fileNameEnc.DecodeString(ciphertext)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@@ -580,7 +611,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
|
|||||||
func (n *nonce) fromReader(in io.Reader) error {
|
func (n *nonce) fromReader(in io.Reader) error {
|
||||||
read, err := io.ReadFull(in, (*n)[:])
|
read, err := io.ReadFull(in, (*n)[:])
|
||||||
if read != fileNonceSize {
|
if read != fileNonceSize {
|
||||||
return errors.Wrap(err, "short read of nonce")
|
return fmt.Errorf("short read of nonce: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -956,7 +987,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
|
|||||||
// Re-open the underlying object with the offset given
|
// Re-open the underlying object with the offset given
|
||||||
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
|
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
|
return 0, fh.finish(fmt.Errorf("couldn't reopen file with offset and limit: %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the file handle
|
// Set the file handle
|
||||||
|
|||||||
@@ -4,13 +4,15 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/base32"
|
"encoding/base32"
|
||||||
|
"encoding/base64"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/Max-Sum/base32768"
|
||||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -45,11 +47,31 @@ func TestNewNameEncryptionModeString(t *testing.T) {
|
|||||||
assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3")
|
assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEncodeFileName(t *testing.T) {
|
type EncodingTestCase struct {
|
||||||
for _, test := range []struct {
|
in string
|
||||||
in string
|
expected string
|
||||||
expected string
|
}
|
||||||
}{
|
|
||||||
|
func testEncodeFileName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
|
||||||
|
for _, test := range testCases {
|
||||||
|
enc, err := NewNameEncoding(encoding)
|
||||||
|
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
|
||||||
|
actual := enc.EncodeToString([]byte(test.in))
|
||||||
|
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
|
||||||
|
recovered, err := enc.DecodeString(test.expected)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", test.expected))
|
||||||
|
if caseInsensitive {
|
||||||
|
in := strings.ToUpper(test.expected)
|
||||||
|
recovered, err = enc.DecodeString(in)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", in))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEncodeFileNameBase32(t *testing.T) {
|
||||||
|
testEncodeFileName(t, "base32", []EncodingTestCase{
|
||||||
{"", ""},
|
{"", ""},
|
||||||
{"1", "64"},
|
{"1", "64"},
|
||||||
{"12", "64p0"},
|
{"12", "64p0"},
|
||||||
@@ -67,20 +89,56 @@ func TestEncodeFileName(t *testing.T) {
|
|||||||
{"12345678901234", "64p36d1l6orjge9g64p36d0"},
|
{"12345678901234", "64p36d1l6orjge9g64p36d0"},
|
||||||
{"123456789012345", "64p36d1l6orjge9g64p36d1l"},
|
{"123456789012345", "64p36d1l6orjge9g64p36d1l"},
|
||||||
{"1234567890123456", "64p36d1l6orjge9g64p36d1l6o"},
|
{"1234567890123456", "64p36d1l6orjge9g64p36d1l6o"},
|
||||||
} {
|
}, true)
|
||||||
actual := encodeFileName([]byte(test.in))
|
|
||||||
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
|
|
||||||
recovered, err := decodeFileName(test.expected)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", test.expected))
|
|
||||||
in := strings.ToUpper(test.expected)
|
|
||||||
recovered, err = decodeFileName(in)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", in))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDecodeFileName(t *testing.T) {
|
func TestEncodeFileNameBase64(t *testing.T) {
|
||||||
|
testEncodeFileName(t, "base64", []EncodingTestCase{
|
||||||
|
{"", ""},
|
||||||
|
{"1", "MQ"},
|
||||||
|
{"12", "MTI"},
|
||||||
|
{"123", "MTIz"},
|
||||||
|
{"1234", "MTIzNA"},
|
||||||
|
{"12345", "MTIzNDU"},
|
||||||
|
{"123456", "MTIzNDU2"},
|
||||||
|
{"1234567", "MTIzNDU2Nw"},
|
||||||
|
{"12345678", "MTIzNDU2Nzg"},
|
||||||
|
{"123456789", "MTIzNDU2Nzg5"},
|
||||||
|
{"1234567890", "MTIzNDU2Nzg5MA"},
|
||||||
|
{"12345678901", "MTIzNDU2Nzg5MDE"},
|
||||||
|
{"123456789012", "MTIzNDU2Nzg5MDEy"},
|
||||||
|
{"1234567890123", "MTIzNDU2Nzg5MDEyMw"},
|
||||||
|
{"12345678901234", "MTIzNDU2Nzg5MDEyMzQ"},
|
||||||
|
{"123456789012345", "MTIzNDU2Nzg5MDEyMzQ1"},
|
||||||
|
{"1234567890123456", "MTIzNDU2Nzg5MDEyMzQ1Ng"},
|
||||||
|
}, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEncodeFileNameBase32768(t *testing.T) {
|
||||||
|
testEncodeFileName(t, "base32768", []EncodingTestCase{
|
||||||
|
{"", ""},
|
||||||
|
{"1", "㼿"},
|
||||||
|
{"12", "㻙ɟ"},
|
||||||
|
{"123", "㻙ⲿ"},
|
||||||
|
{"1234", "㻙ⲍƟ"},
|
||||||
|
{"12345", "㻙ⲍ⍟"},
|
||||||
|
{"123456", "㻙ⲍ⍆ʏ"},
|
||||||
|
{"1234567", "㻙ⲍ⍆觟"},
|
||||||
|
{"12345678", "㻙ⲍ⍆觓ɧ"},
|
||||||
|
{"123456789", "㻙ⲍ⍆觓栯"},
|
||||||
|
{"1234567890", "㻙ⲍ⍆觓栩ɣ"},
|
||||||
|
{"12345678901", "㻙ⲍ⍆觓栩朧"},
|
||||||
|
{"123456789012", "㻙ⲍ⍆觓栩朤ʅ"},
|
||||||
|
{"1234567890123", "㻙ⲍ⍆觓栩朤談"},
|
||||||
|
{"12345678901234", "㻙ⲍ⍆觓栩朤諆ɔ"},
|
||||||
|
{"123456789012345", "㻙ⲍ⍆觓栩朤諆媕"},
|
||||||
|
{"1234567890123456", "㻙ⲍ⍆觓栩朤諆媕䆿"},
|
||||||
|
}, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDecodeFileNameBase32(t *testing.T) {
|
||||||
|
enc, err := NewNameEncoding("base32")
|
||||||
|
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
|
||||||
// We've tested decoding the valid ones above, now concentrate on the invalid ones
|
// We've tested decoding the valid ones above, now concentrate on the invalid ones
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
in string
|
in string
|
||||||
@@ -90,17 +148,65 @@ func TestDecodeFileName(t *testing.T) {
|
|||||||
{"!", base32.CorruptInputError(0)},
|
{"!", base32.CorruptInputError(0)},
|
||||||
{"hello=hello", base32.CorruptInputError(5)},
|
{"hello=hello", base32.CorruptInputError(5)},
|
||||||
} {
|
} {
|
||||||
actual, actualErr := decodeFileName(test.in)
|
actual, actualErr := enc.DecodeString(test.in)
|
||||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEncryptSegment(t *testing.T) {
|
func TestDecodeFileNameBase64(t *testing.T) {
|
||||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
enc, err := NewNameEncoding("base64")
|
||||||
|
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
|
||||||
|
// We've tested decoding the valid ones above, now concentrate on the invalid ones
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
in string
|
in string
|
||||||
expected string
|
expectedErr error
|
||||||
}{
|
}{
|
||||||
|
{"64=", base64.CorruptInputError(2)},
|
||||||
|
{"!", base64.CorruptInputError(0)},
|
||||||
|
{"Hello=Hello", base64.CorruptInputError(5)},
|
||||||
|
} {
|
||||||
|
actual, actualErr := enc.DecodeString(test.in)
|
||||||
|
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDecodeFileNameBase32768(t *testing.T) {
|
||||||
|
enc, err := NewNameEncoding("base32768")
|
||||||
|
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
|
||||||
|
// We've tested decoding the valid ones above, now concentrate on the invalid ones
|
||||||
|
for _, test := range []struct {
|
||||||
|
in string
|
||||||
|
expectedErr error
|
||||||
|
}{
|
||||||
|
{"㼿c", base32768.CorruptInputError(1)},
|
||||||
|
{"!", base32768.CorruptInputError(0)},
|
||||||
|
{"㻙ⲿ=㻙ⲿ", base32768.CorruptInputError(2)},
|
||||||
|
} {
|
||||||
|
actual, actualErr := enc.DecodeString(test.in)
|
||||||
|
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testEncryptSegment(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
|
||||||
|
enc, _ := NewNameEncoding(encoding)
|
||||||
|
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||||
|
for _, test := range testCases {
|
||||||
|
actual := c.encryptSegment(test.in)
|
||||||
|
assert.Equal(t, test.expected, actual, fmt.Sprintf("Testing %q", test.in))
|
||||||
|
recovered, err := c.decryptSegment(test.expected)
|
||||||
|
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", test.expected))
|
||||||
|
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", test.expected))
|
||||||
|
if caseInsensitive {
|
||||||
|
in := strings.ToUpper(test.expected)
|
||||||
|
recovered, err = c.decryptSegment(in)
|
||||||
|
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", in))
|
||||||
|
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", in))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEncryptSegmentBase32(t *testing.T) {
|
||||||
|
testEncryptSegment(t, "base32", []EncodingTestCase{
|
||||||
{"", ""},
|
{"", ""},
|
||||||
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
|
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
|
||||||
{"12", "l42g6771hnv3an9cgc8cr2n1ng"},
|
{"12", "l42g6771hnv3an9cgc8cr2n1ng"},
|
||||||
@@ -118,26 +224,61 @@ func TestEncryptSegment(t *testing.T) {
|
|||||||
{"12345678901234", "moq0uqdlqrblrc5pa5u5c7hq9g"},
|
{"12345678901234", "moq0uqdlqrblrc5pa5u5c7hq9g"},
|
||||||
{"123456789012345", "eeam3li4rnommi3a762h5n7meg"},
|
{"123456789012345", "eeam3li4rnommi3a762h5n7meg"},
|
||||||
{"1234567890123456", "mijbj0frqf6ms7frcr6bd9h0env53jv96pjaaoirk7forcgpt70g"},
|
{"1234567890123456", "mijbj0frqf6ms7frcr6bd9h0env53jv96pjaaoirk7forcgpt70g"},
|
||||||
} {
|
}, true)
|
||||||
actual := c.encryptSegment(test.in)
|
|
||||||
assert.Equal(t, test.expected, actual, fmt.Sprintf("Testing %q", test.in))
|
|
||||||
recovered, err := c.decryptSegment(test.expected)
|
|
||||||
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", test.expected))
|
|
||||||
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", test.expected))
|
|
||||||
in := strings.ToUpper(test.expected)
|
|
||||||
recovered, err = c.decryptSegment(in)
|
|
||||||
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", in))
|
|
||||||
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", in))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDecryptSegment(t *testing.T) {
|
func TestEncryptSegmentBase64(t *testing.T) {
|
||||||
|
testEncryptSegment(t, "base64", []EncodingTestCase{
|
||||||
|
{"", ""},
|
||||||
|
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
|
||||||
|
{"12", "qQUDHOGN_jVdLIMQzYrhvA"},
|
||||||
|
{"123", "1CxFf2Mti1xIPYlGruDh-A"},
|
||||||
|
{"1234", "RL-xOTmsxsG7kuTy2XJUxw"},
|
||||||
|
{"12345", "3FP_GHoeBJdq0yLgaED8IQ"},
|
||||||
|
{"123456", "Xc4T1Gqrs3OVYnrE6dpEWQ"},
|
||||||
|
{"1234567", "uZeEzssOnDWHEOzLqjwpog"},
|
||||||
|
{"12345678", "8noiTP5WkkbEuijsPhOpxQ"},
|
||||||
|
{"123456789", "GeNxgLA0wiaGAKU3U7qL4Q"},
|
||||||
|
{"1234567890", "x1DUhdmqoVWYVBLD3dha-A"},
|
||||||
|
{"12345678901", "iEyP_3BZR6vvv_2WM6NbZw"},
|
||||||
|
{"123456789012", "4OPGvS4SZdjvS568APUaFw"},
|
||||||
|
{"1234567890123", "Y8c5Wr8OhYYUo7fPwdojdg"},
|
||||||
|
{"12345678901234", "tjQPabXW112wuVF8Vh46TA"},
|
||||||
|
{"123456789012345", "c5Vh1kTd8WtIajmFEtz2dA"},
|
||||||
|
{"1234567890123456", "tKa5gfvTzW4d-2bMtqYgdf5Rz-k2ZqViW6HfjbIZ6cE"},
|
||||||
|
}, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEncryptSegmentBase32768(t *testing.T) {
|
||||||
|
testEncryptSegment(t, "base32768", []EncodingTestCase{
|
||||||
|
{"", ""},
|
||||||
|
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
|
||||||
|
{"12", "竢朧䉱虃光塬䟛⣡蓟"},
|
||||||
|
{"123", "遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
|
||||||
|
{"1234", "䢟銮䵵狌㐜燳谒颴詟"},
|
||||||
|
{"12345", "钉Ꞇ㖃蚩憶狫朰杜㜿"},
|
||||||
|
{"123456", "啇ᚵⵕ憗䋫➫➓肤卟"},
|
||||||
|
{"1234567", "茫螓翁連劘樓㶔抉矟"},
|
||||||
|
{"12345678", "龝☳䘊辄岅較络㧩襟"},
|
||||||
|
{"123456789", "ⲱ苀㱆犂媐Ꮤ锇惫靟"},
|
||||||
|
{"1234567890", "計宁憕偵匢皫╛纺ꌟ"},
|
||||||
|
{"12345678901", "檆䨿鑫㪺藝ꡖ勇䦛婟"},
|
||||||
|
{"123456789012", "雑頏䰂䲝淚哚鹡魺⪟"},
|
||||||
|
{"1234567890123", "塃璶繁躸圅㔟䗃肃懟"},
|
||||||
|
{"12345678901234", "腺ᕚ崚鏕鏥讥鼌䑺䲿"},
|
||||||
|
{"123456789012345", "怪绕滻蕶肣但⠥荖惟"},
|
||||||
|
{"1234567890123456", "肳哀旚挶靏鏻㾭䱠慟㪳ꏆ賊兲铧敻塹魀ʟ"},
|
||||||
|
}, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDecryptSegmentBase32(t *testing.T) {
|
||||||
// We've tested the forwards above, now concentrate on the errors
|
// We've tested the forwards above, now concentrate on the errors
|
||||||
longName := make([]byte, 3328)
|
longName := make([]byte, 3328)
|
||||||
for i := range longName {
|
for i := range longName {
|
||||||
longName[i] = 'a'
|
longName[i] = 'a'
|
||||||
}
|
}
|
||||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
enc, _ := NewNameEncoding("base32")
|
||||||
|
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
in string
|
in string
|
||||||
expectedErr error
|
expectedErr error
|
||||||
@@ -145,118 +286,371 @@ func TestDecryptSegment(t *testing.T) {
|
|||||||
{"64=", ErrorBadBase32Encoding},
|
{"64=", ErrorBadBase32Encoding},
|
||||||
{"!", base32.CorruptInputError(0)},
|
{"!", base32.CorruptInputError(0)},
|
||||||
{string(longName), ErrorTooLongAfterDecode},
|
{string(longName), ErrorTooLongAfterDecode},
|
||||||
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||||
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||||
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||||
} {
|
} {
|
||||||
actual, actualErr := c.decryptSegment(test.in)
|
actual, actualErr := c.decryptSegment(test.in)
|
||||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEncryptFileName(t *testing.T) {
|
func TestDecryptSegmentBase64(t *testing.T) {
|
||||||
|
// We've tested the forwards above, now concentrate on the errors
|
||||||
|
longName := make([]byte, 2816)
|
||||||
|
for i := range longName {
|
||||||
|
longName[i] = 'a'
|
||||||
|
}
|
||||||
|
enc, _ := NewNameEncoding("base64")
|
||||||
|
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||||
|
for _, test := range []struct {
|
||||||
|
in string
|
||||||
|
expectedErr error
|
||||||
|
}{
|
||||||
|
{"6H=", base64.CorruptInputError(2)},
|
||||||
|
{"!", base64.CorruptInputError(0)},
|
||||||
|
{string(longName), ErrorTooLongAfterDecode},
|
||||||
|
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||||
|
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||||
|
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||||
|
} {
|
||||||
|
actual, actualErr := c.decryptSegment(test.in)
|
||||||
|
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDecryptSegmentBase32768(t *testing.T) {
|
||||||
|
// We've tested the forwards above, now concentrate on the errors
|
||||||
|
longName := strings.Repeat("怪", 1280)
|
||||||
|
enc, _ := NewNameEncoding("base32768")
|
||||||
|
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||||
|
for _, test := range []struct {
|
||||||
|
in string
|
||||||
|
expectedErr error
|
||||||
|
}{
|
||||||
|
{"怪=", base32768.CorruptInputError(1)},
|
||||||
|
{"!", base32768.CorruptInputError(0)},
|
||||||
|
{longName, ErrorTooLongAfterDecode},
|
||||||
|
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||||
|
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||||
|
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||||
|
} {
|
||||||
|
actual, actualErr := c.decryptSegment(test.in)
|
||||||
|
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testStandardEncryptFileName(t *testing.T, encoding string, testCasesEncryptDir []EncodingTestCase, testCasesNoEncryptDir []EncodingTestCase) {
|
||||||
// First standard mode
|
// First standard mode
|
||||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
enc, _ := NewNameEncoding(encoding)
|
||||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
for _, test := range testCasesEncryptDir {
|
||||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
assert.Equal(t, test.expected, c.EncryptFileName(test.in))
|
||||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
|
}
|
||||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
|
|
||||||
// Standard mode with directory name encryption off
|
// Standard mode with directory name encryption off
|
||||||
c, _ = newCipher(NameEncryptionStandard, "", "", false)
|
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
|
||||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
for _, test := range testCasesNoEncryptDir {
|
||||||
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
assert.Equal(t, test.expected, c.EncryptFileName(test.in))
|
||||||
assert.Equal(t, "1/12/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
}
|
||||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
|
}
|
||||||
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
|
|
||||||
// Now off mode
|
func TestStandardEncryptFileNameBase32(t *testing.T) {
|
||||||
c, _ = newCipher(NameEncryptionOff, "", "", true)
|
testStandardEncryptFileName(t, "base32", []EncodingTestCase{
|
||||||
|
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
|
||||||
|
{"1/12", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng"},
|
||||||
|
{"1/12/123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0"},
|
||||||
|
{"1-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123"},
|
||||||
|
{"1/12-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123"},
|
||||||
|
}, []EncodingTestCase{
|
||||||
|
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
|
||||||
|
{"1/12", "1/l42g6771hnv3an9cgc8cr2n1ng"},
|
||||||
|
{"1/12/123", "1/12/qgm4avr35m5loi1th53ato71v0"},
|
||||||
|
{"1-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123"},
|
||||||
|
{"1/12-v2001-02-03-040506-123", "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123"},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStandardEncryptFileNameBase64(t *testing.T) {
|
||||||
|
testStandardEncryptFileName(t, "base64", []EncodingTestCase{
|
||||||
|
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
|
||||||
|
{"1/12", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA"},
|
||||||
|
{"1/12/123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A"},
|
||||||
|
{"1-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw-v2001-02-03-040506-123"},
|
||||||
|
{"1/12-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA-v2001-02-03-040506-123"},
|
||||||
|
}, []EncodingTestCase{
|
||||||
|
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
|
||||||
|
{"1/12", "1/qQUDHOGN_jVdLIMQzYrhvA"},
|
||||||
|
{"1/12/123", "1/12/1CxFf2Mti1xIPYlGruDh-A"},
|
||||||
|
{"1-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw-v2001-02-03-040506-123"},
|
||||||
|
{"1/12-v2001-02-03-040506-123", "1/qQUDHOGN_jVdLIMQzYrhvA-v2001-02-03-040506-123"},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStandardEncryptFileNameBase32768(t *testing.T) {
|
||||||
|
testStandardEncryptFileName(t, "base32768", []EncodingTestCase{
|
||||||
|
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
|
||||||
|
{"1/12", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟"},
|
||||||
|
{"1/12/123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
|
||||||
|
{"1-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟-v2001-02-03-040506-123"},
|
||||||
|
{"1/12-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟-v2001-02-03-040506-123"},
|
||||||
|
}, []EncodingTestCase{
|
||||||
|
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
|
||||||
|
{"1/12", "1/竢朧䉱虃光塬䟛⣡蓟"},
|
||||||
|
{"1/12/123", "1/12/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
|
||||||
|
{"1-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟-v2001-02-03-040506-123"},
|
||||||
|
{"1/12-v2001-02-03-040506-123", "1/竢朧䉱虃光塬䟛⣡蓟-v2001-02-03-040506-123"},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNonStandardEncryptFileName(t *testing.T) {
|
||||||
|
// Off mode
|
||||||
|
c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
|
||||||
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
|
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
|
||||||
// Obfuscation mode
|
// Obfuscation mode
|
||||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", true)
|
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
|
||||||
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||||
assert.Equal(t, "49.6/99.23/150.890/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
assert.Equal(t, "49.6/99.23/150.890/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
||||||
assert.Equal(t, "49.6/99.23/150.890/162.uryyB-v2001-02-03-040506-123.GKG", c.EncryptFileName("1/12/123/hello-v2001-02-03-040506-123.txt"))
|
assert.Equal(t, "49.6/99.23/150.890/162.uryyB-v2001-02-03-040506-123.GKG", c.EncryptFileName("1/12/123/hello-v2001-02-03-040506-123.txt"))
|
||||||
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
||||||
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
||||||
// Obfuscation mode with directory name encryption off
|
// Obfuscation mode with directory name encryption off
|
||||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", false)
|
c, _ = newCipher(NameEncryptionObfuscated, "", "", false, nil)
|
||||||
assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||||
assert.Equal(t, "1/12/123/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
assert.Equal(t, "1/12/123/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
||||||
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
||||||
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDecryptFileName(t *testing.T) {
|
func testStandardDecryptFileName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
|
||||||
for _, test := range []struct {
|
enc, _ := NewNameEncoding(encoding)
|
||||||
mode NameEncryptionMode
|
for _, test := range testCases {
|
||||||
dirNameEncrypt bool
|
// Test when dirNameEncrypt=true
|
||||||
in string
|
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||||
expected string
|
|
||||||
expectedErr error
|
|
||||||
}{
|
|
||||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
|
|
||||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
|
|
||||||
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
|
|
||||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
|
||||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
|
||||||
{NameEncryptionStandard, false, "1/12/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
|
||||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", "1-v2001-02-03-040506-123", nil},
|
|
||||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
|
||||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
|
||||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
|
||||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
|
|
||||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
|
|
||||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
|
|
||||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
|
||||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
|
||||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
|
||||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
|
||||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
|
||||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
|
|
||||||
} {
|
|
||||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
|
|
||||||
actual, actualErr := c.DecryptFileName(test.in)
|
actual, actualErr := c.DecryptFileName(test.in)
|
||||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
assert.NoError(t, actualErr)
|
||||||
assert.Equal(t, test.expected, actual, what)
|
assert.Equal(t, test.expected, actual)
|
||||||
assert.Equal(t, test.expectedErr, actualErr, what)
|
if caseInsensitive {
|
||||||
|
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||||
|
actual, actualErr := c.DecryptFileName(strings.ToUpper(test.in))
|
||||||
|
assert.NoError(t, actualErr)
|
||||||
|
assert.Equal(t, test.expected, actual)
|
||||||
|
}
|
||||||
|
// Add a character should raise ErrorNotAMultipleOfBlocksize
|
||||||
|
actual, actualErr = c.DecryptFileName(enc.EncodeToString([]byte("1")) + test.in)
|
||||||
|
assert.Equal(t, ErrorNotAMultipleOfBlocksize, actualErr)
|
||||||
|
assert.Equal(t, "", actual)
|
||||||
|
// Test when dirNameEncrypt=false
|
||||||
|
noDirEncryptIn := test.in
|
||||||
|
if strings.LastIndex(test.expected, "/") != -1 {
|
||||||
|
noDirEncryptIn = test.expected[:strings.LastIndex(test.expected, "/")] + test.in[strings.LastIndex(test.in, "/"):]
|
||||||
|
}
|
||||||
|
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
|
||||||
|
actual, actualErr = c.DecryptFileName(noDirEncryptIn)
|
||||||
|
assert.NoError(t, actualErr)
|
||||||
|
assert.Equal(t, test.expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStandardDecryptFileNameBase32(t *testing.T) {
|
||||||
|
testStandardDecryptFileName(t, "base32", []EncodingTestCase{
|
||||||
|
{"p0e52nreeaj0a5ea7s64m4j72s", "1"},
|
||||||
|
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12"},
|
||||||
|
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123"},
|
||||||
|
}, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStandardDecryptFileNameBase64(t *testing.T) {
|
||||||
|
testStandardDecryptFileName(t, "base64", []EncodingTestCase{
|
||||||
|
{"yBxRX25ypgUVyj8MSxJnFw", "1"},
|
||||||
|
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA", "1/12"},
|
||||||
|
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A", "1/12/123"},
|
||||||
|
}, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStandardDecryptFileNameBase32768(t *testing.T) {
|
||||||
|
testStandardDecryptFileName(t, "base32768", []EncodingTestCase{
|
||||||
|
{"詮㪗鐮僀伎作㻖㢧⪟", "1"},
|
||||||
|
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟", "1/12"},
|
||||||
|
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ", "1/12/123"},
|
||||||
|
}, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNonStandardDecryptFileName(t *testing.T) {
|
||||||
|
for _, encoding := range []string{"base32", "base64", "base32768"} {
|
||||||
|
enc, _ := NewNameEncoding(encoding)
|
||||||
|
for _, test := range []struct {
|
||||||
|
mode NameEncryptionMode
|
||||||
|
dirNameEncrypt bool
|
||||||
|
in string
|
||||||
|
expected string
|
||||||
|
expectedErr error
|
||||||
|
}{
|
||||||
|
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
||||||
|
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
||||||
|
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
||||||
|
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
|
||||||
|
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
|
||||||
|
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
|
||||||
|
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
||||||
|
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
||||||
|
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
||||||
|
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
||||||
|
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
||||||
|
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
|
||||||
|
} {
|
||||||
|
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
|
||||||
|
actual, actualErr := c.DecryptFileName(test.in)
|
||||||
|
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||||
|
assert.Equal(t, test.expected, actual, what)
|
||||||
|
assert.Equal(t, test.expectedErr, actualErr, what)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEncDecMatches(t *testing.T) {
|
func TestEncDecMatches(t *testing.T) {
|
||||||
for _, test := range []struct {
|
for _, encoding := range []string{"base32", "base64", "base32768"} {
|
||||||
mode NameEncryptionMode
|
enc, _ := NewNameEncoding(encoding)
|
||||||
in string
|
for _, test := range []struct {
|
||||||
}{
|
mode NameEncryptionMode
|
||||||
{NameEncryptionStandard, "1/2/3/4"},
|
in string
|
||||||
{NameEncryptionOff, "1/2/3/4"},
|
}{
|
||||||
{NameEncryptionObfuscated, "1/2/3/4/!hello\u03a0"},
|
{NameEncryptionStandard, "1/2/3/4"},
|
||||||
{NameEncryptionObfuscated, "Avatar The Last Airbender"},
|
{NameEncryptionOff, "1/2/3/4"},
|
||||||
} {
|
{NameEncryptionObfuscated, "1/2/3/4/!hello\u03a0"},
|
||||||
c, _ := newCipher(test.mode, "", "", true)
|
{NameEncryptionObfuscated, "Avatar The Last Airbender"},
|
||||||
out, err := c.DecryptFileName(c.EncryptFileName(test.in))
|
} {
|
||||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
c, _ := newCipher(test.mode, "", "", true, enc)
|
||||||
assert.Equal(t, out, test.in, what)
|
out, err := c.DecryptFileName(c.EncryptFileName(test.in))
|
||||||
assert.Equal(t, err, nil, what)
|
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||||
|
assert.Equal(t, out, test.in, what)
|
||||||
|
assert.Equal(t, err, nil, what)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEncryptDirName(t *testing.T) {
|
func testStandardEncryptDirName(t *testing.T, encoding string, testCases []EncodingTestCase) {
|
||||||
|
enc, _ := NewNameEncoding(encoding)
|
||||||
|
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||||
// First standard mode
|
// First standard mode
|
||||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
for _, test := range testCases {
|
||||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptDirName("1"))
|
assert.Equal(t, test.expected, c.EncryptDirName(test.in))
|
||||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptDirName("1/12"))
|
}
|
||||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptDirName("1/12/123"))
|
|
||||||
// Standard mode with dir name encryption off
|
|
||||||
c, _ = newCipher(NameEncryptionStandard, "", "", false)
|
|
||||||
assert.Equal(t, "1/12", c.EncryptDirName("1/12"))
|
|
||||||
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
|
|
||||||
// Now off mode
|
|
||||||
c, _ = newCipher(NameEncryptionOff, "", "", true)
|
|
||||||
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDecryptDirName(t *testing.T) {
|
func TestStandardEncryptDirNameBase32(t *testing.T) {
|
||||||
|
testStandardEncryptDirName(t, "base32", []EncodingTestCase{
|
||||||
|
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
|
||||||
|
{"1/12", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng"},
|
||||||
|
{"1/12/123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0"},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStandardEncryptDirNameBase64(t *testing.T) {
|
||||||
|
testStandardEncryptDirName(t, "base64", []EncodingTestCase{
|
||||||
|
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
|
||||||
|
{"1/12", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA"},
|
||||||
|
{"1/12/123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A"},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStandardEncryptDirNameBase32768(t *testing.T) {
|
||||||
|
testStandardEncryptDirName(t, "base32768", []EncodingTestCase{
|
||||||
|
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
|
||||||
|
{"1/12", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟"},
|
||||||
|
{"1/12/123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNonStandardEncryptDirName(t *testing.T) {
|
||||||
|
for _, encoding := range []string{"base32", "base64", "base32768"} {
|
||||||
|
enc, _ := NewNameEncoding(encoding)
|
||||||
|
c, _ := newCipher(NameEncryptionStandard, "", "", false, enc)
|
||||||
|
assert.Equal(t, "1/12", c.EncryptDirName("1/12"))
|
||||||
|
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
|
||||||
|
// Now off mode
|
||||||
|
c, _ = newCipher(NameEncryptionOff, "", "", true, enc)
|
||||||
|
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testStandardDecryptDirName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
|
||||||
|
enc, _ := NewNameEncoding(encoding)
|
||||||
|
for _, test := range testCases {
|
||||||
|
// Test dirNameEncrypt=true
|
||||||
|
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||||
|
actual, actualErr := c.DecryptDirName(test.in)
|
||||||
|
assert.Equal(t, test.expected, actual)
|
||||||
|
assert.NoError(t, actualErr)
|
||||||
|
if caseInsensitive {
|
||||||
|
actual, actualErr := c.DecryptDirName(strings.ToUpper(test.in))
|
||||||
|
assert.Equal(t, actual, test.expected)
|
||||||
|
assert.NoError(t, actualErr)
|
||||||
|
}
|
||||||
|
actual, actualErr = c.DecryptDirName(enc.EncodeToString([]byte("1")) + test.in)
|
||||||
|
assert.Equal(t, "", actual)
|
||||||
|
assert.Equal(t, ErrorNotAMultipleOfBlocksize, actualErr)
|
||||||
|
// Test dirNameEncrypt=false
|
||||||
|
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
|
||||||
|
actual, actualErr = c.DecryptDirName(test.in)
|
||||||
|
assert.Equal(t, test.in, actual)
|
||||||
|
assert.NoError(t, actualErr)
|
||||||
|
actual, actualErr = c.DecryptDirName(test.expected)
|
||||||
|
assert.Equal(t, test.expected, actual)
|
||||||
|
assert.NoError(t, actualErr)
|
||||||
|
// Test dirNameEncrypt=false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
enc, _ := NewNameEncoding(encoding)
|
||||||
|
for _, test := range []struct {
|
||||||
|
mode NameEncryptionMode
|
||||||
|
dirNameEncrypt bool
|
||||||
|
in string
|
||||||
|
expected string
|
||||||
|
expectedErr error
|
||||||
|
}{
|
||||||
|
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
|
||||||
|
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
|
||||||
|
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
|
||||||
|
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||||
|
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
||||||
|
{NameEncryptionStandard, false, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", nil},
|
||||||
|
{NameEncryptionStandard, false, "1/12/123", "1/12/123", nil},
|
||||||
|
} {
|
||||||
|
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
|
||||||
|
actual, actualErr := c.DecryptDirName(test.in)
|
||||||
|
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||||
|
assert.Equal(t, test.expected, actual, what)
|
||||||
|
assert.Equal(t, test.expectedErr, actualErr, what)
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
func TestStandardDecryptDirNameBase32(t *testing.T) {
|
||||||
|
testStandardDecryptDirName(t, "base32", []EncodingTestCase{
|
||||||
|
{"p0e52nreeaj0a5ea7s64m4j72s", "1"},
|
||||||
|
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12"},
|
||||||
|
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123"},
|
||||||
|
}, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStandardDecryptDirNameBase64(t *testing.T) {
|
||||||
|
testStandardDecryptDirName(t, "base64", []EncodingTestCase{
|
||||||
|
{"yBxRX25ypgUVyj8MSxJnFw", "1"},
|
||||||
|
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA", "1/12"},
|
||||||
|
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A", "1/12/123"},
|
||||||
|
}, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStandardDecryptDirNameBase32768(t *testing.T) {
|
||||||
|
testStandardDecryptDirName(t, "base32768", []EncodingTestCase{
|
||||||
|
{"詮㪗鐮僀伎作㻖㢧⪟", "1"},
|
||||||
|
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟", "1/12"},
|
||||||
|
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ", "1/12/123"},
|
||||||
|
}, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNonStandardDecryptDirName(t *testing.T) {
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
mode NameEncryptionMode
|
mode NameEncryptionMode
|
||||||
dirNameEncrypt bool
|
dirNameEncrypt bool
|
||||||
@@ -264,18 +658,11 @@ func TestDecryptDirName(t *testing.T) {
|
|||||||
expected string
|
expected string
|
||||||
expectedErr error
|
expectedErr error
|
||||||
}{
|
}{
|
||||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
|
|
||||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
|
|
||||||
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
|
|
||||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
|
||||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
|
||||||
{NameEncryptionStandard, false, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", nil},
|
|
||||||
{NameEncryptionStandard, false, "1/12/123", "1/12/123", nil},
|
|
||||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123.bin", nil},
|
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123.bin", nil},
|
||||||
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil},
|
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil},
|
||||||
{NameEncryptionOff, true, ".bin", ".bin", nil},
|
{NameEncryptionOff, true, ".bin", ".bin", nil},
|
||||||
} {
|
} {
|
||||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
|
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, nil)
|
||||||
actual, actualErr := c.DecryptDirName(test.in)
|
actual, actualErr := c.DecryptDirName(test.in)
|
||||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||||
assert.Equal(t, test.expected, actual, what)
|
assert.Equal(t, test.expected, actual, what)
|
||||||
@@ -284,7 +671,7 @@ func TestDecryptDirName(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestEncryptedSize(t *testing.T) {
|
func TestEncryptedSize(t *testing.T) {
|
||||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
c, _ := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
in int64
|
in int64
|
||||||
expected int64
|
expected int64
|
||||||
@@ -308,7 +695,7 @@ func TestEncryptedSize(t *testing.T) {
|
|||||||
|
|
||||||
func TestDecryptedSize(t *testing.T) {
|
func TestDecryptedSize(t *testing.T) {
|
||||||
// Test the errors since we tested the reverse above
|
// Test the errors since we tested the reverse above
|
||||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
c, _ := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
in int64
|
in int64
|
||||||
expectedErr error
|
expectedErr error
|
||||||
@@ -637,7 +1024,7 @@ func (r *randomSource) Read(p []byte) (n int, err error) {
|
|||||||
func (r *randomSource) Write(p []byte) (n int, err error) {
|
func (r *randomSource) Write(p []byte) (n int, err error) {
|
||||||
for i := range p {
|
for i := range p {
|
||||||
if p[i] != r.next() {
|
if p[i] != r.next() {
|
||||||
return 0, errors.Errorf("Error in stream at %d", r.counter)
|
return 0, fmt.Errorf("Error in stream at %d", r.counter)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return len(p), nil
|
return len(p), nil
|
||||||
@@ -679,7 +1066,7 @@ func (z *zeroes) Read(p []byte) (n int, err error) {
|
|||||||
|
|
||||||
// Test encrypt decrypt with different buffer sizes
|
// Test encrypt decrypt with different buffer sizes
|
||||||
func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
|
func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
|
||||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
c.cryptoRand = &zeroes{} // zero out the nonce
|
c.cryptoRand = &zeroes{} // zero out the nonce
|
||||||
buf := make([]byte, bufSize)
|
buf := make([]byte, bufSize)
|
||||||
@@ -749,7 +1136,7 @@ func TestEncryptData(t *testing.T) {
|
|||||||
{[]byte{1}, file1},
|
{[]byte{1}, file1},
|
||||||
{[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, file16},
|
{[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, file16},
|
||||||
} {
|
} {
|
||||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||||
|
|
||||||
@@ -772,7 +1159,7 @@ func TestEncryptData(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewEncrypter(t *testing.T) {
|
func TestNewEncrypter(t *testing.T) {
|
||||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||||
|
|
||||||
@@ -788,13 +1175,12 @@ func TestNewEncrypter(t *testing.T) {
|
|||||||
fh, err = c.newEncrypter(z, nil)
|
fh, err = c.newEncrypter(z, nil)
|
||||||
assert.Nil(t, fh)
|
assert.Nil(t, fh)
|
||||||
assert.Error(t, err, "short read of nonce")
|
assert.Error(t, err, "short read of nonce")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to
|
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to
|
||||||
// cause a fatal loop
|
// cause a fatal loop
|
||||||
func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
|
func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
|
||||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
in := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
in := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
||||||
@@ -823,7 +1209,7 @@ func (c *closeDetector) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewDecrypter(t *testing.T) {
|
func TestNewDecrypter(t *testing.T) {
|
||||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||||
|
|
||||||
@@ -866,7 +1252,7 @@ func TestNewDecrypter(t *testing.T) {
|
|||||||
|
|
||||||
// Test the stream returning 0, io.ErrUnexpectedEOF
|
// Test the stream returning 0, io.ErrUnexpectedEOF
|
||||||
func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
||||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
||||||
@@ -882,7 +1268,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewDecrypterSeekLimit(t *testing.T) {
|
func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
c.cryptoRand = &zeroes{} // nodge the crypto rand generator
|
c.cryptoRand = &zeroes{} // nodge the crypto rand generator
|
||||||
|
|
||||||
@@ -1088,7 +1474,7 @@ func TestDecrypterCalculateUnderlying(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDecrypterRead(t *testing.T) {
|
func TestDecrypterRead(t *testing.T) {
|
||||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// Test truncating the file at each possible point
|
// Test truncating the file at each possible point
|
||||||
@@ -1152,7 +1538,7 @@ func TestDecrypterRead(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDecrypterClose(t *testing.T) {
|
func TestDecrypterClose(t *testing.T) {
|
||||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
cd := newCloseDetector(bytes.NewBuffer(file16))
|
cd := newCloseDetector(bytes.NewBuffer(file16))
|
||||||
@@ -1190,7 +1576,7 @@ func TestDecrypterClose(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPutGetBlock(t *testing.T) {
|
func TestPutGetBlock(t *testing.T) {
|
||||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
block := c.getBlock()
|
block := c.getBlock()
|
||||||
@@ -1201,7 +1587,7 @@ func TestPutGetBlock(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestKey(t *testing.T) {
|
func TestKey(t *testing.T) {
|
||||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// Check zero keys OK
|
// Check zero keys OK
|
||||||
|
|||||||
@@ -3,13 +3,13 @@ package crypt
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/cache"
|
"github.com/rclone/rclone/fs/cache"
|
||||||
@@ -116,6 +116,29 @@ names, or for debugging purposes.`,
|
|||||||
Help: "Encrypt file data.",
|
Help: "Encrypt file data.",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
Name: "filename_encoding",
|
||||||
|
Help: `How to encode the encrypted filename to text string.
|
||||||
|
|
||||||
|
This option could help with shortening the encrypted filename. The
|
||||||
|
suitable option would depend on the way your remote count the filename
|
||||||
|
length and if it's case sensitve.`,
|
||||||
|
Default: "base32",
|
||||||
|
Examples: []fs.OptionExample{
|
||||||
|
{
|
||||||
|
Value: "base32",
|
||||||
|
Help: "Encode using base32. Suitable for all remote.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "base64",
|
||||||
|
Help: "Encode using base64. Suitable for case sensitive remote.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "base32768",
|
||||||
|
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -131,18 +154,22 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
|
|||||||
}
|
}
|
||||||
password, err := obscure.Reveal(opt.Password)
|
password, err := obscure.Reveal(opt.Password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to decrypt password")
|
return nil, fmt.Errorf("failed to decrypt password: %w", err)
|
||||||
}
|
}
|
||||||
var salt string
|
var salt string
|
||||||
if opt.Password2 != "" {
|
if opt.Password2 != "" {
|
||||||
salt, err = obscure.Reveal(opt.Password2)
|
salt, err = obscure.Reveal(opt.Password2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to decrypt password2")
|
return nil, fmt.Errorf("failed to decrypt password2: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
|
enc, err := NewNameEncoding(opt.FilenameEncoding)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to make cipher")
|
return nil, err
|
||||||
|
}
|
||||||
|
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption, enc)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to make cipher: %w", err)
|
||||||
}
|
}
|
||||||
return cipher, nil
|
return cipher, nil
|
||||||
}
|
}
|
||||||
@@ -192,7 +219,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != fs.ErrorIsFile && err != nil {
|
if err != fs.ErrorIsFile && err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote)
|
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
|
||||||
}
|
}
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
Fs: wrappedFs,
|
Fs: wrappedFs,
|
||||||
@@ -229,6 +256,7 @@ type Options struct {
|
|||||||
Password2 string `config:"password2"`
|
Password2 string `config:"password2"`
|
||||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||||
ShowMapping bool `config:"show_mapping"`
|
ShowMapping bool `config:"show_mapping"`
|
||||||
|
FilenameEncoding string `config:"filename_encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a wrapped fs.Fs
|
// Fs represents a wrapped fs.Fs
|
||||||
@@ -300,7 +328,7 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
|
|||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
f.addDir(ctx, &newEntries, x)
|
f.addDir(ctx, &newEntries, x)
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("Unknown object type %T", entry)
|
return nil, fmt.Errorf("Unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return newEntries, nil
|
return newEntries, nil
|
||||||
@@ -406,7 +434,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
|||||||
var dstHash string
|
var dstHash string
|
||||||
dstHash, err = o.Hash(ctx, ht)
|
dstHash, err = o.Hash(ctx, ht)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to read destination hash")
|
return nil, fmt.Errorf("failed to read destination hash: %w", err)
|
||||||
}
|
}
|
||||||
if srcHash != "" && dstHash != "" {
|
if srcHash != "" && dstHash != "" {
|
||||||
if srcHash != dstHash {
|
if srcHash != dstHash {
|
||||||
@@ -415,7 +443,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||||
}
|
}
|
||||||
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||||
}
|
}
|
||||||
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||||
}
|
}
|
||||||
@@ -616,24 +644,24 @@ func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Objec
|
|||||||
// Open the src for input
|
// Open the src for input
|
||||||
in, err := src.Open(ctx)
|
in, err := src.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "failed to open src")
|
return "", fmt.Errorf("failed to open src: %w", err)
|
||||||
}
|
}
|
||||||
defer fs.CheckClose(in, &err)
|
defer fs.CheckClose(in, &err)
|
||||||
|
|
||||||
// Now encrypt the src with the nonce
|
// Now encrypt the src with the nonce
|
||||||
out, err := f.cipher.newEncrypter(in, &nonce)
|
out, err := f.cipher.newEncrypter(in, &nonce)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "failed to make encrypter")
|
return "", fmt.Errorf("failed to make encrypter: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// pipe into hash
|
// pipe into hash
|
||||||
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
|
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "failed to make hasher")
|
return "", fmt.Errorf("failed to make hasher: %w", err)
|
||||||
}
|
}
|
||||||
_, err = io.Copy(m, out)
|
_, err = io.Copy(m, out)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "failed to hash data")
|
return "", fmt.Errorf("failed to hash data: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return m.Sums()[hashType], nil
|
return m.Sums()[hashType], nil
|
||||||
@@ -652,12 +680,12 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
|
|||||||
// use a limited read so we only read the header
|
// use a limited read so we only read the header
|
||||||
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
return "", fmt.Errorf("failed to open object to read nonce: %w", err)
|
||||||
}
|
}
|
||||||
d, err := f.cipher.newDecrypter(in)
|
d, err := f.cipher.newDecrypter(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = in.Close()
|
_ = in.Close()
|
||||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
return "", fmt.Errorf("failed to open object to read nonce: %w", err)
|
||||||
}
|
}
|
||||||
nonce := d.nonce
|
nonce := d.nonce
|
||||||
// fs.Debugf(o, "Read nonce % 2x", nonce)
|
// fs.Debugf(o, "Read nonce % 2x", nonce)
|
||||||
@@ -676,7 +704,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
|
|||||||
// Close d (and hence in) once we have read the nonce
|
// Close d (and hence in) once we have read the nonce
|
||||||
err = d.Close()
|
err = d.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "failed to close nonce read")
|
return "", fmt.Errorf("failed to close nonce read: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.computeHashWithNonce(ctx, nonce, src, hashType)
|
return f.computeHashWithNonce(ctx, nonce, src, hashType)
|
||||||
@@ -795,7 +823,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
for _, encryptedFileName := range arg {
|
for _, encryptedFileName := range arg {
|
||||||
fileName, err := f.DecryptFileName(encryptedFileName)
|
fileName, err := f.DecryptFileName(encryptedFileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName))
|
return out, fmt.Errorf("failed to decrypt: %s: %w", encryptedFileName, err)
|
||||||
}
|
}
|
||||||
out = append(out, fileName)
|
out = append(out, fileName)
|
||||||
}
|
}
|
||||||
@@ -1021,6 +1049,10 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
|||||||
}
|
}
|
||||||
// if this is wrapping a local object then we work out the hash
|
// if this is wrapping a local object then we work out the hash
|
||||||
if srcObj.Fs().Features().IsLocal {
|
if srcObj.Fs().Features().IsLocal {
|
||||||
|
if o.f.opt.NoDataEncryption {
|
||||||
|
// If no encryption, just return the hash of the underlying object
|
||||||
|
return srcObj.Hash(ctx, hash)
|
||||||
|
}
|
||||||
// Read the data and encrypt it to calculate the hash
|
// Read the data and encrypt it to calculate the hash
|
||||||
fs.Debugf(o, "Computing %v hash of encrypted source", hash)
|
fs.Debugf(o, "Computing %v hash of encrypted source", hash)
|
||||||
return o.f.computeHashWithNonce(ctx, o.nonce, srcObj, hash)
|
return o.f.computeHashWithNonce(ctx, o.nonce, srcObj, hash)
|
||||||
|
|||||||
@@ -77,7 +77,11 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
|||||||
enc, err := f.cipher.newEncrypter(inBuf, nil)
|
enc, err := f.cipher.newEncrypter(inBuf, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
nonce := enc.nonce // read the nonce at the start
|
nonce := enc.nonce // read the nonce at the start
|
||||||
_, err = io.Copy(&outBuf, enc)
|
if f.opt.NoDataEncryption {
|
||||||
|
_, err = outBuf.WriteString(contents)
|
||||||
|
} else {
|
||||||
|
_, err = io.Copy(&outBuf, enc)
|
||||||
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var oi fs.ObjectInfo = obj
|
var oi fs.ObjectInfo = obj
|
||||||
@@ -96,7 +100,12 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
|||||||
assert.NotEqual(t, path, src.Remote())
|
assert.NotEqual(t, path, src.Remote())
|
||||||
|
|
||||||
// Test ObjectInfo.Hash
|
// Test ObjectInfo.Hash
|
||||||
wantHash := md5.Sum(outBuf.Bytes())
|
var wantHash [md5.Size]byte
|
||||||
|
if f.opt.NoDataEncryption {
|
||||||
|
wantHash = md5.Sum([]byte(contents))
|
||||||
|
} else {
|
||||||
|
wantHash = md5.Sum(outBuf.Bytes())
|
||||||
|
}
|
||||||
gotHash, err := src.Hash(ctx, hash.MD5)
|
gotHash, err := src.Hash(ctx, hash.MD5)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, fmt.Sprintf("%x", wantHash), gotHash)
|
assert.Equal(t, fmt.Sprintf("%x", wantHash), gotHash)
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ func TestIntegration(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TestStandard runs integration tests against the remote
|
// TestStandard runs integration tests against the remote
|
||||||
func TestStandard(t *testing.T) {
|
func TestStandardBase32(t *testing.T) {
|
||||||
if *fstest.RemoteName != "" {
|
if *fstest.RemoteName != "" {
|
||||||
t.Skip("Skipping as -remote set")
|
t.Skip("Skipping as -remote set")
|
||||||
}
|
}
|
||||||
@@ -46,6 +46,51 @@ func TestStandard(t *testing.T) {
|
|||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
|
QuickTestOK: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStandardBase64(t *testing.T) {
|
||||||
|
if *fstest.RemoteName != "" {
|
||||||
|
t.Skip("Skipping as -remote set")
|
||||||
|
}
|
||||||
|
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||||
|
name := "TestCrypt"
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: name + ":",
|
||||||
|
NilObject: (*crypt.Object)(nil),
|
||||||
|
ExtraConfig: []fstests.ExtraConfigItem{
|
||||||
|
{Name: name, Key: "type", Value: "crypt"},
|
||||||
|
{Name: name, Key: "remote", Value: tempdir},
|
||||||
|
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||||
|
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||||
|
{Name: name, Key: "filename_encoding", Value: "base64"},
|
||||||
|
},
|
||||||
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
|
QuickTestOK: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStandardBase32768(t *testing.T) {
|
||||||
|
if *fstest.RemoteName != "" {
|
||||||
|
t.Skip("Skipping as -remote set")
|
||||||
|
}
|
||||||
|
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||||
|
name := "TestCrypt"
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: name + ":",
|
||||||
|
NilObject: (*crypt.Object)(nil),
|
||||||
|
ExtraConfig: []fstests.ExtraConfigItem{
|
||||||
|
{Name: name, Key: "type", Value: "crypt"},
|
||||||
|
{Name: name, Key: "remote", Value: tempdir},
|
||||||
|
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||||
|
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||||
|
{Name: name, Key: "filename_encoding", Value: "base32768"},
|
||||||
|
},
|
||||||
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -67,6 +112,7 @@ func TestOff(t *testing.T) {
|
|||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,6 +135,7 @@ func TestObfuscate(t *testing.T) {
|
|||||||
SkipBadWindowsCharacters: true,
|
SkipBadWindowsCharacters: true,
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -112,5 +159,6 @@ func TestNoDataObfuscate(t *testing.T) {
|
|||||||
SkipBadWindowsCharacters: true,
|
SkipBadWindowsCharacters: true,
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
// buffers which are a multiple of an underlying crypto block size.
|
// buffers which are a multiple of an underlying crypto block size.
|
||||||
package pkcs7
|
package pkcs7
|
||||||
|
|
||||||
import "github.com/pkg/errors"
|
import "errors"
|
||||||
|
|
||||||
// Errors Unpad can return
|
// Errors Unpad can return
|
||||||
var (
|
var (
|
||||||
|
|||||||
114
backend/drive/drive.go
Executable file → Normal file
114
backend/drive/drive.go
Executable file → Normal file
@@ -11,6 +11,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@@ -25,7 +26,6 @@ import (
|
|||||||
"text/template"
|
"text/template"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/cache"
|
"github.com/rclone/rclone/fs/cache"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -188,7 +188,7 @@ func init() {
|
|||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't parse config into struct")
|
return nil, fmt.Errorf("couldn't parse config into struct: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch config.State {
|
switch config.State {
|
||||||
@@ -226,7 +226,7 @@ func init() {
|
|||||||
case "teamdrive_config":
|
case "teamdrive_config":
|
||||||
f, err := newFs(ctx, name, "", m)
|
f, err := newFs(ctx, name, "", m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to make Fs to list Shared Drives")
|
return nil, fmt.Errorf("failed to make Fs to list Shared Drives: %w", err)
|
||||||
}
|
}
|
||||||
teamDrives, err := f.listTeamDrives(ctx)
|
teamDrives, err := f.listTeamDrives(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -755,7 +755,7 @@ func (f *Fs) getFile(ctx context.Context, ID string, fields googleapi.Field) (in
|
|||||||
func (f *Fs) getRootID(ctx context.Context) (string, error) {
|
func (f *Fs) getRootID(ctx context.Context) (string, error) {
|
||||||
info, err := f.getFile(ctx, "root", "id")
|
info, err := f.getFile(ctx, "root", "id")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "couldn't find root directory ID")
|
return "", fmt.Errorf("couldn't find root directory ID: %w", err)
|
||||||
}
|
}
|
||||||
return info.Id, nil
|
return info.Id, nil
|
||||||
}
|
}
|
||||||
@@ -882,7 +882,7 @@ OUTER:
|
|||||||
return f.shouldRetry(ctx, err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Wrap(err, "couldn't list directory")
|
return false, fmt.Errorf("couldn't list directory: %w", err)
|
||||||
}
|
}
|
||||||
if files.IncompleteSearch {
|
if files.IncompleteSearch {
|
||||||
fs.Errorf(f, "search result INCOMPLETE")
|
fs.Errorf(f, "search result INCOMPLETE")
|
||||||
@@ -904,7 +904,7 @@ OUTER:
|
|||||||
}
|
}
|
||||||
item, err = f.resolveShortcut(ctx, item)
|
item, err = f.resolveShortcut(ctx, item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Wrap(err, "list")
|
return false, fmt.Errorf("list: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Check the case of items is correct since
|
// Check the case of items is correct since
|
||||||
@@ -965,7 +965,7 @@ func fixMimeType(mimeTypeIn string) string {
|
|||||||
mimeTypeOut = mime.FormatMediaType(mediaType, param)
|
mimeTypeOut = mime.FormatMediaType(mediaType, param)
|
||||||
}
|
}
|
||||||
if mimeTypeOut == "" {
|
if mimeTypeOut == "" {
|
||||||
panic(errors.Errorf("unable to fix MIME type %q", mimeTypeIn))
|
panic(fmt.Errorf("unable to fix MIME type %q", mimeTypeIn))
|
||||||
}
|
}
|
||||||
return mimeTypeOut
|
return mimeTypeOut
|
||||||
}
|
}
|
||||||
@@ -1000,7 +1000,7 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
|
|||||||
}
|
}
|
||||||
mt := mime.TypeByExtension(extension)
|
mt := mime.TypeByExtension(extension)
|
||||||
if mt == "" {
|
if mt == "" {
|
||||||
return extensions, mimeTypes, errors.Errorf("couldn't find MIME type for extension %q", extension)
|
return extensions, mimeTypes, fmt.Errorf("couldn't find MIME type for extension %q", extension)
|
||||||
}
|
}
|
||||||
if !containsString(extensions, extension) {
|
if !containsString(extensions, extension) {
|
||||||
extensions = append(extensions, extension)
|
extensions = append(extensions, extension)
|
||||||
@@ -1027,7 +1027,7 @@ func getServiceAccountClient(ctx context.Context, opt *Options, credentialsData
|
|||||||
scopes := driveScopes(opt.Scope)
|
scopes := driveScopes(opt.Scope)
|
||||||
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
|
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error processing credentials")
|
return nil, fmt.Errorf("error processing credentials: %w", err)
|
||||||
}
|
}
|
||||||
if opt.Impersonate != "" {
|
if opt.Impersonate != "" {
|
||||||
conf.Subject = opt.Impersonate
|
conf.Subject = opt.Impersonate
|
||||||
@@ -1044,19 +1044,19 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
|
|||||||
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
|
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
|
||||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error opening service account credentials file")
|
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
|
||||||
}
|
}
|
||||||
opt.ServiceAccountCredentials = string(loadedCreds)
|
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||||
}
|
}
|
||||||
if opt.ServiceAccountCredentials != "" {
|
if opt.ServiceAccountCredentials != "" {
|
||||||
oAuthClient, err = getServiceAccountClient(ctx, opt, []byte(opt.ServiceAccountCredentials))
|
oAuthClient, err = getServiceAccountClient(ctx, opt, []byte(opt.ServiceAccountCredentials))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to create oauth client from service account")
|
return nil, fmt.Errorf("failed to create oauth client from service account: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
|
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to create oauth client")
|
return nil, fmt.Errorf("failed to create oauth client: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1065,10 +1065,10 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
|
|||||||
|
|
||||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||||
if !isPowerOfTwo(int64(cs)) {
|
if !isPowerOfTwo(int64(cs)) {
|
||||||
return errors.Errorf("%v isn't a power of two", cs)
|
return fmt.Errorf("%v isn't a power of two", cs)
|
||||||
}
|
}
|
||||||
if cs < minChunkSize {
|
if cs < minChunkSize {
|
||||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1106,16 +1106,16 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
|||||||
}
|
}
|
||||||
err = checkUploadCutoff(opt.UploadCutoff)
|
err = checkUploadCutoff(opt.UploadCutoff)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "drive: upload cutoff")
|
return nil, fmt.Errorf("drive: upload cutoff: %w", err)
|
||||||
}
|
}
|
||||||
err = checkUploadChunkSize(opt.ChunkSize)
|
err = checkUploadChunkSize(opt.ChunkSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "drive: chunk size")
|
return nil, fmt.Errorf("drive: chunk size: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
oAuthClient, err := createOAuthClient(ctx, opt, name, m)
|
oAuthClient, err := createOAuthClient(ctx, opt, name, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "drive: failed when making oauth client")
|
return nil, fmt.Errorf("drive: failed when making oauth client: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
root, err := parseDrivePath(path)
|
root, err := parseDrivePath(path)
|
||||||
@@ -1149,13 +1149,13 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
|||||||
f.client = oAuthClient
|
f.client = oAuthClient
|
||||||
f.svc, err = drive.New(f.client)
|
f.svc, err = drive.New(f.client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't create Drive client")
|
return nil, fmt.Errorf("couldn't create Drive client: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.opt.V2DownloadMinSize >= 0 {
|
if f.opt.V2DownloadMinSize >= 0 {
|
||||||
f.v2Svc, err = drive_v2.New(f.client)
|
f.v2Svc, err = drive_v2.New(f.client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't create Drive v2 client")
|
return nil, fmt.Errorf("couldn't create Drive v2 client: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1180,7 +1180,8 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
|
|||||||
// otherwise look up the actual root ID
|
// otherwise look up the actual root ID
|
||||||
rootID, err := f.getRootID(ctx)
|
rootID, err := f.getRootID(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
|
var gerr *googleapi.Error
|
||||||
|
if errors.As(err, &gerr) && gerr.Code == 404 {
|
||||||
// 404 means that this scope does not have permission to get the
|
// 404 means that this scope does not have permission to get the
|
||||||
// root so just use "root"
|
// root so just use "root"
|
||||||
rootID = "root"
|
rootID = "root"
|
||||||
@@ -1322,7 +1323,7 @@ func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, expor
|
|||||||
func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
|
func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
|
||||||
t := linkTemplate(exportMimeType)
|
t := linkTemplate(exportMimeType)
|
||||||
if t == nil {
|
if t == nil {
|
||||||
return nil, errors.Errorf("unsupported link type %s", exportMimeType)
|
return nil, fmt.Errorf("unsupported link type %s", exportMimeType)
|
||||||
}
|
}
|
||||||
xdgIcon := _mimeTypeToXDGLinkIcons[info.MimeType]
|
xdgIcon := _mimeTypeToXDGLinkIcons[info.MimeType]
|
||||||
if xdgIcon == "" {
|
if xdgIcon == "" {
|
||||||
@@ -1335,7 +1336,7 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
|
|||||||
info.WebViewLink, info.Name, xdgIcon,
|
info.WebViewLink, info.Name, xdgIcon,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "executing template failed")
|
return nil, fmt.Errorf("executing template failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
baseObject := f.newBaseObject(remote+extension, info)
|
baseObject := f.newBaseObject(remote+extension, info)
|
||||||
@@ -1372,7 +1373,7 @@ func (f *Fs) newObjectWithExportInfo(
|
|||||||
// will have been resolved so this will do nothing.
|
// will have been resolved so this will do nothing.
|
||||||
info, err = f.resolveShortcut(ctx, info)
|
info, err = f.resolveShortcut(ctx, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "new object")
|
return nil, fmt.Errorf("new object: %w", err)
|
||||||
}
|
}
|
||||||
switch {
|
switch {
|
||||||
case info.MimeType == driveFolderType:
|
case info.MimeType == driveFolderType:
|
||||||
@@ -2015,13 +2016,14 @@ func (f *Fs) resolveShortcut(ctx context.Context, item *drive.File) (newItem *dr
|
|||||||
}
|
}
|
||||||
newItem, err = f.getFile(ctx, item.ShortcutDetails.TargetId, f.fileFields)
|
newItem, err = f.getFile(ctx, item.ShortcutDetails.TargetId, f.fileFields)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
|
var gerr *googleapi.Error
|
||||||
|
if errors.As(err, &gerr) && gerr.Code == 404 {
|
||||||
// 404 means dangling shortcut, so just return the shortcut with the mime type mangled
|
// 404 means dangling shortcut, so just return the shortcut with the mime type mangled
|
||||||
fs.Logf(nil, "Dangling shortcut %q detected", item.Name)
|
fs.Logf(nil, "Dangling shortcut %q detected", item.Name)
|
||||||
item.MimeType = shortcutMimeTypeDangling
|
item.MimeType = shortcutMimeTypeDangling
|
||||||
return item, nil
|
return item, nil
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, "failed to resolve shortcut")
|
return nil, fmt.Errorf("failed to resolve shortcut: %w", err)
|
||||||
}
|
}
|
||||||
// make sure we use the Name, Parents and Trashed from the original item
|
// make sure we use the Name, Parents and Trashed from the original item
|
||||||
newItem.Name = item.Name
|
newItem.Name = item.Name
|
||||||
@@ -2123,10 +2125,10 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
|||||||
|
|
||||||
exportExt, _, _ = f.findExportFormatByMimeType(ctx, importMimeType)
|
exportExt, _, _ = f.findExportFormatByMimeType(ctx, importMimeType)
|
||||||
if exportExt == "" {
|
if exportExt == "" {
|
||||||
return nil, errors.Errorf("No export format found for %q", importMimeType)
|
return nil, fmt.Errorf("No export format found for %q", importMimeType)
|
||||||
}
|
}
|
||||||
if exportExt != srcExt && !f.opt.AllowImportNameChange {
|
if exportExt != srcExt && !f.opt.AllowImportNameChange {
|
||||||
return nil, errors.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
|
return nil, fmt.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2194,7 +2196,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "MergeDirs list failed on %v", srcDir)
|
return fmt.Errorf("MergeDirs list failed on %v: %w", srcDir, err)
|
||||||
}
|
}
|
||||||
// move them into place
|
// move them into place
|
||||||
for _, info := range infos {
|
for _, info := range infos {
|
||||||
@@ -2210,14 +2212,14 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|||||||
return f.shouldRetry(ctx, err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir)
|
return fmt.Errorf("MergeDirs move failed on %q in %v: %w", info.Name, srcDir, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// rmdir (into trash) the now empty source directory
|
// rmdir (into trash) the now empty source directory
|
||||||
fs.Infof(srcDir, "removing empty directory")
|
fs.Infof(srcDir, "removing empty directory")
|
||||||
err = f.delete(ctx, srcDir.ID(), true)
|
err = f.delete(ctx, srcDir.ID(), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
|
return fmt.Errorf("MergeDirs move failed to rmdir %q: %w", srcDir, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -2280,7 +2282,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if found {
|
if found {
|
||||||
return errors.Errorf("directory not empty")
|
return fmt.Errorf("directory not empty")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if root != "" {
|
if root != "" {
|
||||||
@@ -2458,7 +2460,7 @@ func (f *Fs) cleanupTeamDrive(ctx context.Context, dir string, directoryID strin
|
|||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrap(err, "failed to list directory")
|
err = fmt.Errorf("failed to list directory: %w", err)
|
||||||
r.Errors++
|
r.Errors++
|
||||||
fs.Errorf(dir, "%v", err)
|
fs.Errorf(dir, "%v", err)
|
||||||
}
|
}
|
||||||
@@ -2502,7 +2504,7 @@ func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
|
|||||||
return f.shouldRetry(ctx, err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to get Shared Drive info")
|
return fmt.Errorf("failed to get Shared Drive info: %w", err)
|
||||||
}
|
}
|
||||||
fs.Debugf(f, "read info from Shared Drive %q", td.Name)
|
fs.Debugf(f, "read info from Shared Drive %q", td.Name)
|
||||||
return err
|
return err
|
||||||
@@ -2525,7 +2527,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
return f.shouldRetry(ctx, err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to get Drive storageQuota")
|
return nil, fmt.Errorf("failed to get Drive storageQuota: %w", err)
|
||||||
}
|
}
|
||||||
q := about.StorageQuota
|
q := about.StorageQuota
|
||||||
usage := &fs.Usage{
|
usage := &fs.Usage{
|
||||||
@@ -2849,7 +2851,7 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
func (f *Fs) changeChunkSize(chunkSizeString string) (err error) {
|
func (f *Fs) changeChunkSize(chunkSizeString string) (err error) {
|
||||||
chunkSizeInt, err := strconv.ParseInt(chunkSizeString, 10, 64)
|
chunkSizeInt, err := strconv.ParseInt(chunkSizeString, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't convert chunk size to int")
|
return fmt.Errorf("couldn't convert chunk size to int: %w", err)
|
||||||
}
|
}
|
||||||
chunkSize := fs.SizeSuffix(chunkSizeInt)
|
chunkSize := fs.SizeSuffix(chunkSizeInt)
|
||||||
if chunkSize == f.opt.ChunkSize {
|
if chunkSize == f.opt.ChunkSize {
|
||||||
@@ -2886,17 +2888,17 @@ func (f *Fs) changeServiceAccountFile(ctx context.Context, file string) (err err
|
|||||||
f.opt.ServiceAccountCredentials = ""
|
f.opt.ServiceAccountCredentials = ""
|
||||||
oAuthClient, err := createOAuthClient(ctx, &f.opt, f.name, f.m)
|
oAuthClient, err := createOAuthClient(ctx, &f.opt, f.name, f.m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "drive: failed when making oauth client")
|
return fmt.Errorf("drive: failed when making oauth client: %w", err)
|
||||||
}
|
}
|
||||||
f.client = oAuthClient
|
f.client = oAuthClient
|
||||||
f.svc, err = drive.New(f.client)
|
f.svc, err = drive.New(f.client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't create Drive client")
|
return fmt.Errorf("couldn't create Drive client: %w", err)
|
||||||
}
|
}
|
||||||
if f.opt.V2DownloadMinSize >= 0 {
|
if f.opt.V2DownloadMinSize >= 0 {
|
||||||
f.v2Svc, err = drive_v2.New(f.client)
|
f.v2Svc, err = drive_v2.New(f.client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't create Drive v2 client")
|
return fmt.Errorf("couldn't create Drive v2 client: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -2925,12 +2927,12 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
|||||||
isDir = true
|
isDir = true
|
||||||
} else if srcObj, err := srcFs.NewObject(ctx, srcPath); err != nil {
|
} else if srcObj, err := srcFs.NewObject(ctx, srcPath); err != nil {
|
||||||
if err != fs.ErrorIsDir {
|
if err != fs.ErrorIsDir {
|
||||||
return nil, errors.Wrap(err, "can't find source")
|
return nil, fmt.Errorf("can't find source: %w", err)
|
||||||
}
|
}
|
||||||
// source was a directory
|
// source was a directory
|
||||||
srcID, err = srcFs.dirCache.FindDir(ctx, srcPath, false)
|
srcID, err = srcFs.dirCache.FindDir(ctx, srcPath, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to find source dir")
|
return nil, fmt.Errorf("failed to find source dir: %w", err)
|
||||||
}
|
}
|
||||||
isDir = true
|
isDir = true
|
||||||
} else {
|
} else {
|
||||||
@@ -2947,13 +2949,13 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
|||||||
} else if err == fs.ErrorIsDir {
|
} else if err == fs.ErrorIsDir {
|
||||||
err = errors.New("existing directory")
|
err = errors.New("existing directory")
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, "not overwriting shortcut target")
|
return nil, fmt.Errorf("not overwriting shortcut target: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create destination shortcut
|
// Create destination shortcut
|
||||||
createInfo, err := dstFs.createFileInfo(ctx, dstPath, time.Now())
|
createInfo, err := dstFs.createFileInfo(ctx, dstPath, time.Now())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "shortcut destination failed")
|
return nil, fmt.Errorf("shortcut destination failed: %w", err)
|
||||||
}
|
}
|
||||||
createInfo.MimeType = shortcutMimeType
|
createInfo.MimeType = shortcutMimeType
|
||||||
createInfo.ShortcutDetails = &drive.FileShortcutDetails{
|
createInfo.ShortcutDetails = &drive.FileShortcutDetails{
|
||||||
@@ -2970,7 +2972,7 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
|||||||
return dstFs.shouldRetry(ctx, err)
|
return dstFs.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "shortcut creation failed")
|
return nil, fmt.Errorf("shortcut creation failed: %w", err)
|
||||||
}
|
}
|
||||||
if isDir {
|
if isDir {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@@ -2990,7 +2992,7 @@ func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.Drive, err err
|
|||||||
return defaultFs.shouldRetry(ctx, err)
|
return defaultFs.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return drives, errors.Wrap(err, "listing Team Drives failed")
|
return drives, fmt.Errorf("listing Team Drives failed: %w", err)
|
||||||
}
|
}
|
||||||
drives = append(drives, teamDrives.Drives...)
|
drives = append(drives, teamDrives.Drives...)
|
||||||
if teamDrives.NextPageToken == "" {
|
if teamDrives.NextPageToken == "" {
|
||||||
@@ -3033,7 +3035,7 @@ func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurs
|
|||||||
return f.shouldRetry(ctx, err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrap(err, "failed to restore")
|
err = fmt.Errorf("failed to restore: %w", err)
|
||||||
r.Errors++
|
r.Errors++
|
||||||
fs.Errorf(remote, "%v", err)
|
fs.Errorf(remote, "%v", err)
|
||||||
} else {
|
} else {
|
||||||
@@ -3050,7 +3052,7 @@ func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurs
|
|||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrap(err, "failed to list directory")
|
err = fmt.Errorf("failed to list directory: %w", err)
|
||||||
r.Errors++
|
r.Errors++
|
||||||
fs.Errorf(dir, "%v", err)
|
fs.Errorf(dir, "%v", err)
|
||||||
}
|
}
|
||||||
@@ -3074,10 +3076,10 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
|
|||||||
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||||
info, err := f.getFile(ctx, id, f.fileFields)
|
info, err := f.getFile(ctx, id, f.fileFields)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't find id")
|
return fmt.Errorf("couldn't find id: %w", err)
|
||||||
}
|
}
|
||||||
if info.MimeType == driveFolderType {
|
if info.MimeType == driveFolderType {
|
||||||
return errors.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
|
return fmt.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
|
||||||
}
|
}
|
||||||
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
||||||
o, err := f.newObjectWithInfo(ctx, info.Name, info)
|
o, err := f.newObjectWithInfo(ctx, info.Name, info)
|
||||||
@@ -3100,7 +3102,7 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
|||||||
}
|
}
|
||||||
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "copy failed")
|
return fmt.Errorf("copy failed: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -3299,7 +3301,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
if ok {
|
if ok {
|
||||||
targetFs, err := cache.Get(ctx, target)
|
targetFs, err := cache.Get(ctx, target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't find target")
|
return nil, fmt.Errorf("couldn't find target: %w", err)
|
||||||
}
|
}
|
||||||
dstFs, ok = targetFs.(*Fs)
|
dstFs, ok = targetFs.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -3338,7 +3340,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
arg = arg[2:]
|
arg = arg[2:]
|
||||||
err = f.copyID(ctx, id, dest)
|
err = f.copyID(ctx, id, dest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed copying %q to %q", id, dest)
|
return nil, fmt.Errorf("failed copying %q to %q: %w", id, dest, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@@ -3572,11 +3574,11 @@ func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOpt
|
|||||||
url += "acknowledgeAbuse=true"
|
url += "acknowledgeAbuse=true"
|
||||||
_, res, err = o.httpResponse(ctx, url, "GET", options)
|
_, res, err = o.httpResponse(ctx, url, "GET", options)
|
||||||
} else {
|
} else {
|
||||||
err = errors.Wrap(err, "Use the --drive-acknowledge-abuse flag to download this file")
|
err = fmt.Errorf("Use the --drive-acknowledge-abuse flag to download this file: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "open file failed")
|
return nil, fmt.Errorf("open file failed: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return res.Body, nil
|
return res.Body, nil
|
||||||
@@ -3740,14 +3742,14 @@ func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.Object
|
|||||||
}
|
}
|
||||||
|
|
||||||
if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs {
|
if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs {
|
||||||
return errors.Errorf("can't update google document type without --drive-import-formats")
|
return fmt.Errorf("can't update google document type without --drive-import-formats")
|
||||||
}
|
}
|
||||||
importMimeType = o.fs.findImportFormat(ctx, updateInfo.MimeType)
|
importMimeType = o.fs.findImportFormat(ctx, updateInfo.MimeType)
|
||||||
if importMimeType == "" {
|
if importMimeType == "" {
|
||||||
return errors.Errorf("no import format found for %q", srcMimeType)
|
return fmt.Errorf("no import format found for %q", srcMimeType)
|
||||||
}
|
}
|
||||||
if importMimeType != o.documentMimeType {
|
if importMimeType != o.documentMimeType {
|
||||||
return errors.Errorf("can't change google document type (o: %q, src: %q, import: %q)", o.documentMimeType, srcMimeType, importMimeType)
|
return fmt.Errorf("can't change google document type (o: %q, src: %q, import: %q)", o.documentMimeType, srcMimeType, importMimeType)
|
||||||
}
|
}
|
||||||
updateInfo.MimeType = importMimeType
|
updateInfo.MimeType = importMimeType
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@@ -15,7 +16,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/filter"
|
"github.com/rclone/rclone/fs/filter"
|
||||||
|
|||||||
@@ -8,13 +8,13 @@ package dropbox
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/lib/atexit"
|
"github.com/rclone/rclone/lib/atexit"
|
||||||
@@ -66,7 +66,7 @@ type batcherResponse struct {
|
|||||||
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
|
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
|
||||||
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
|
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
|
||||||
if size > maxBatchSize || size < 0 {
|
if size > maxBatchSize || size < 0 {
|
||||||
return nil, errors.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
|
return nil, fmt.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
async := false
|
async := false
|
||||||
@@ -91,7 +91,7 @@ func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.
|
|||||||
case "off":
|
case "off":
|
||||||
size = 0
|
size = 0
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
|
return nil, fmt.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
|
||||||
}
|
}
|
||||||
|
|
||||||
b := &batcher{
|
b := &batcher{
|
||||||
@@ -135,7 +135,7 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
|
|||||||
return err != nil, err
|
return err != nil, err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "batch commit failed")
|
return nil, fmt.Errorf("batch commit failed: %w", err)
|
||||||
}
|
}
|
||||||
return batchStatus, nil
|
return batchStatus, nil
|
||||||
}
|
}
|
||||||
@@ -180,7 +180,7 @@ func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *f
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
err = errors.New("batch didn't complete")
|
err = errors.New("batch didn't complete")
|
||||||
}
|
}
|
||||||
return nil, errors.Wrapf(err, "wait for batch failed after %d tries in %v", try, time.Since(startTime))
|
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// commit a batch
|
// commit a batch
|
||||||
@@ -216,13 +216,13 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
|
|||||||
case "complete":
|
case "complete":
|
||||||
complete = batchStatus.Complete
|
complete = batchStatus.Complete
|
||||||
default:
|
default:
|
||||||
return errors.Errorf("batch returned unknown status %q", batchStatus.Tag)
|
return fmt.Errorf("batch returned unknown status %q", batchStatus.Tag)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check we got the right number of entries
|
// Check we got the right number of entries
|
||||||
entries := complete.Entries
|
entries := complete.Entries
|
||||||
if len(entries) != len(results) {
|
if len(entries) != len(results) {
|
||||||
return errors.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
|
return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Report results to clients
|
// Report results to clients
|
||||||
@@ -250,7 +250,7 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
|
|||||||
errorTag += "/" + item.Failure.PropertiesError.Tag
|
errorTag += "/" + item.Failure.PropertiesError.Tag
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
resp.err = errors.Errorf("batch upload failed: %s", errorTag)
|
resp.err = fmt.Errorf("batch upload failed: %s", errorTag)
|
||||||
}
|
}
|
||||||
if !b.async {
|
if !b.async {
|
||||||
results[i] <- resp
|
results[i] <- resp
|
||||||
@@ -261,7 +261,7 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
|
|||||||
|
|
||||||
// Report an error if any failed in the batch
|
// Report an error if any failed in the batch
|
||||||
if errorTag != "" {
|
if errorTag != "" {
|
||||||
return errors.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
|
return fmt.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
|
||||||
}
|
}
|
||||||
|
|
||||||
fs.Debugf(b.f, "Committed %s", desc)
|
fs.Debugf(b.f, "Committed %s", desc)
|
||||||
|
|||||||
77
backend/dropbox/dropbox.go
Executable file → Normal file
77
backend/dropbox/dropbox.go
Executable file → Normal file
@@ -23,6 +23,7 @@ of path_display and all will be well.
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
@@ -38,7 +39,6 @@ import (
|
|||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/sharing"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/sharing"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/team"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/team"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/users"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/users"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/dropbox/dbhash"
|
"github.com/rclone/rclone/backend/dropbox/dbhash"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -363,24 +363,24 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
baseErrString := errors.Cause(err).Error()
|
errString := err.Error()
|
||||||
// First check for specific errors
|
// First check for specific errors
|
||||||
if strings.Contains(baseErrString, "insufficient_space") {
|
if strings.Contains(errString, "insufficient_space") {
|
||||||
return false, fserrors.FatalError(err)
|
return false, fserrors.FatalError(err)
|
||||||
} else if strings.Contains(baseErrString, "malformed_path") {
|
} else if strings.Contains(errString, "malformed_path") {
|
||||||
return false, fserrors.NoRetryError(err)
|
return false, fserrors.NoRetryError(err)
|
||||||
}
|
}
|
||||||
// Then handle any official Retry-After header from Dropbox's SDK
|
// Then handle any official Retry-After header from Dropbox's SDK
|
||||||
switch e := err.(type) {
|
switch e := err.(type) {
|
||||||
case auth.RateLimitAPIError:
|
case auth.RateLimitAPIError:
|
||||||
if e.RateLimitError.RetryAfter > 0 {
|
if e.RateLimitError.RetryAfter > 0 {
|
||||||
fs.Logf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||||
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
||||||
}
|
}
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
// Keep old behavior for backward compatibility
|
// Keep old behavior for backward compatibility
|
||||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
|
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
return fserrors.ShouldRetry(err), err
|
return fserrors.ShouldRetry(err), err
|
||||||
@@ -389,10 +389,10 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
|||||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||||
const minChunkSize = fs.SizeSuffixBase
|
const minChunkSize = fs.SizeSuffixBase
|
||||||
if cs < minChunkSize {
|
if cs < minChunkSize {
|
||||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||||
}
|
}
|
||||||
if cs > maxChunkSize {
|
if cs > maxChunkSize {
|
||||||
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
|
return fmt.Errorf("%s is greater than %s", cs, maxChunkSize)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -415,7 +415,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
err = checkUploadChunkSize(opt.ChunkSize)
|
err = checkUploadChunkSize(opt.ChunkSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "dropbox: chunk size")
|
return nil, fmt.Errorf("dropbox: chunk size: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert the old token if it exists. The old token was just
|
// Convert the old token if it exists. The old token was just
|
||||||
@@ -427,13 +427,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||||
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "NewFS convert token")
|
return nil, fmt.Errorf("NewFS convert token: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, getOauthConfig(m))
|
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, getOauthConfig(m))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure dropbox")
|
return nil, fmt.Errorf("failed to configure dropbox: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
@@ -474,7 +474,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
memberIds, err := f.team.MembersGetInfo(args)
|
memberIds, err := f.team.MembersGetInfo(args)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
|
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
||||||
@@ -551,7 +551,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "get current account failed")
|
return nil, fmt.Errorf("get current account failed: %w", err)
|
||||||
}
|
}
|
||||||
switch x := acc.RootInfo.(type) {
|
switch x := acc.RootInfo.(type) {
|
||||||
case *common.TeamRootInfo:
|
case *common.TeamRootInfo:
|
||||||
@@ -559,22 +559,24 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
case *common.UserRootInfo:
|
case *common.UserRootInfo:
|
||||||
f.ns = x.RootNamespaceId
|
f.ns = x.RootNamespaceId
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
|
return nil, fmt.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
|
||||||
}
|
}
|
||||||
fs.Debugf(f, "Using root namespace %q", f.ns)
|
fs.Debugf(f, "Using root namespace %q", f.ns)
|
||||||
}
|
}
|
||||||
f.setRoot(root)
|
f.setRoot(root)
|
||||||
|
|
||||||
// See if the root is actually an object
|
// See if the root is actually an object
|
||||||
_, err = f.getFileMetadata(ctx, f.slashRoot)
|
if f.root != "" {
|
||||||
if err == nil {
|
_, err = f.getFileMetadata(ctx, f.slashRoot)
|
||||||
newRoot := path.Dir(f.root)
|
if err == nil {
|
||||||
if newRoot == "." {
|
newRoot := path.Dir(f.root)
|
||||||
newRoot = ""
|
if newRoot == "." {
|
||||||
|
newRoot = ""
|
||||||
|
}
|
||||||
|
f.setRoot(newRoot)
|
||||||
|
// return an error with an fs which points to the parent
|
||||||
|
return f, fs.ErrorIsFile
|
||||||
}
|
}
|
||||||
f.setRoot(newRoot)
|
|
||||||
// return an error with an fs which points to the parent
|
|
||||||
return f, fs.ErrorIsFile
|
|
||||||
}
|
}
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
@@ -710,7 +712,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "list continue")
|
return nil, fmt.Errorf("list continue: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, entry := range res.Entries {
|
for _, entry := range res.Entries {
|
||||||
@@ -784,7 +786,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "list continue")
|
return nil, fmt.Errorf("list continue: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, entry := range res.Entries {
|
for _, entry := range res.Entries {
|
||||||
@@ -850,6 +852,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
arg := files.ListFolderArg{
|
arg := files.ListFolderArg{
|
||||||
Path: f.opt.Enc.FromStandardPath(root),
|
Path: f.opt.Enc.FromStandardPath(root),
|
||||||
Recursive: false,
|
Recursive: false,
|
||||||
|
Limit: 1000,
|
||||||
}
|
}
|
||||||
if root == "/" {
|
if root == "/" {
|
||||||
arg.Path = "" // Specify root folder as empty string
|
arg.Path = "" // Specify root folder as empty string
|
||||||
@@ -877,7 +880,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "list continue")
|
return nil, fmt.Errorf("list continue: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, entry := range res.Entries {
|
for _, entry := range res.Entries {
|
||||||
@@ -989,7 +992,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
|||||||
// check directory exists
|
// check directory exists
|
||||||
_, err = f.getDirMetadata(ctx, root)
|
_, err = f.getDirMetadata(ctx, root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Rmdir")
|
return fmt.Errorf("Rmdir: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
root = f.opt.Enc.FromStandardPath(root)
|
root = f.opt.Enc.FromStandardPath(root)
|
||||||
@@ -1007,7 +1010,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Rmdir")
|
return fmt.Errorf("Rmdir: %w", err)
|
||||||
}
|
}
|
||||||
if len(res.Entries) != 0 {
|
if len(res.Entries) != 0 {
|
||||||
return errors.New("directory not empty")
|
return errors.New("directory not empty")
|
||||||
@@ -1073,7 +1076,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "copy failed")
|
return nil, fmt.Errorf("copy failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the metadata
|
// Set the metadata
|
||||||
@@ -1083,7 +1086,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
err = dstObj.setMetadataFromEntry(fileInfo)
|
err = dstObj.setMetadataFromEntry(fileInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "copy failed")
|
return nil, fmt.Errorf("copy failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return dstObj, nil
|
return dstObj, nil
|
||||||
@@ -1134,7 +1137,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "move failed")
|
return nil, fmt.Errorf("move failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the metadata
|
// Set the metadata
|
||||||
@@ -1144,7 +1147,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
err = dstObj.setMetadataFromEntry(fileInfo)
|
err = dstObj.setMetadataFromEntry(fileInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "move failed")
|
return nil, fmt.Errorf("move failed: %w", err)
|
||||||
}
|
}
|
||||||
return dstObj, nil
|
return dstObj, nil
|
||||||
}
|
}
|
||||||
@@ -1252,7 +1255,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "MoveDir failed")
|
return fmt.Errorf("MoveDir failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -1266,7 +1269,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "about failed")
|
return nil, fmt.Errorf("about failed: %w", err)
|
||||||
}
|
}
|
||||||
var total uint64
|
var total uint64
|
||||||
if q.Allocation != nil {
|
if q.Allocation != nil {
|
||||||
@@ -1406,7 +1409,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "list continue")
|
return "", fmt.Errorf("list continue: %w", err)
|
||||||
}
|
}
|
||||||
cursor = changeList.Cursor
|
cursor = changeList.Cursor
|
||||||
var entryType fs.EntryType
|
var entryType fs.EntryType
|
||||||
@@ -1485,7 +1488,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||||||
}
|
}
|
||||||
err := o.readMetaData(ctx)
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "failed to read hash from metadata")
|
return "", fmt.Errorf("failed to read hash from metadata: %w", err)
|
||||||
}
|
}
|
||||||
return o.hash, nil
|
return o.hash, nil
|
||||||
}
|
}
|
||||||
@@ -1738,7 +1741,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
remote := o.remotePath()
|
remote := o.remotePath()
|
||||||
if ignoredFiles.MatchString(remote) {
|
if ignoredFiles.MatchString(remote) {
|
||||||
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
return fserrors.NoRetryError(fmt.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
||||||
}
|
}
|
||||||
commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
|
commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
|
||||||
commitInfo.Mode.Tag = "overwrite"
|
commitInfo.Mode.Tag = "overwrite"
|
||||||
@@ -1762,7 +1765,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "upload failed")
|
return fmt.Errorf("upload failed: %w", err)
|
||||||
}
|
}
|
||||||
// If we haven't received data back from batch upload then fake it
|
// If we haven't received data back from batch upload then fake it
|
||||||
//
|
//
|
||||||
|
|||||||
@@ -2,6 +2,8 @@ package fichier
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
@@ -10,7 +12,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
@@ -81,7 +82,7 @@ func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't read file info")
|
return nil, fmt.Errorf("couldn't read file info: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &file, err
|
return &file, err
|
||||||
@@ -110,7 +111,7 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
|
|||||||
return doretry || !validToken(&token), err
|
return doretry || !validToken(&token), err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't list files")
|
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &token, nil
|
return &token, nil
|
||||||
@@ -144,7 +145,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't list files")
|
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
entries = make([]fs.DirEntry, len(sharedFiles))
|
entries = make([]fs.DirEntry, len(sharedFiles))
|
||||||
@@ -173,7 +174,7 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't list files")
|
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||||
}
|
}
|
||||||
for i := range filesList.Items {
|
for i := range filesList.Items {
|
||||||
item := &filesList.Items[i]
|
item := &filesList.Items[i]
|
||||||
@@ -201,7 +202,7 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't list folders")
|
return nil, fmt.Errorf("couldn't list folders: %w", err)
|
||||||
}
|
}
|
||||||
foldersList.Name = f.opt.Enc.ToStandardName(foldersList.Name)
|
foldersList.Name = f.opt.Enc.ToStandardName(foldersList.Name)
|
||||||
for i := range foldersList.SubFolders {
|
for i := range foldersList.SubFolders {
|
||||||
@@ -295,7 +296,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't create folder")
|
return nil, fmt.Errorf("couldn't create folder: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
|
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
|
||||||
@@ -322,10 +323,10 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't remove folder")
|
return nil, fmt.Errorf("couldn't remove folder: %w", err)
|
||||||
}
|
}
|
||||||
if response.Status != "OK" {
|
if response.Status != "OK" {
|
||||||
return nil, errors.Errorf("can't remove folder: %s", response.Message)
|
return nil, fmt.Errorf("can't remove folder: %s", response.Message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
|
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
|
||||||
@@ -352,7 +353,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't remove file")
|
return nil, fmt.Errorf("couldn't remove file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// fs.Debugf(f, "Removed file with url `%s`", url)
|
// fs.Debugf(f, "Removed file with url `%s`", url)
|
||||||
@@ -379,7 +380,7 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't copy file")
|
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return response, nil
|
return response, nil
|
||||||
@@ -404,7 +405,7 @@ func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename stri
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't copy file")
|
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return response, nil
|
return response, nil
|
||||||
@@ -432,7 +433,7 @@ func (f *Fs) renameFile(ctx context.Context, url string, newName string) (respon
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't rename file")
|
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return response, nil
|
return response, nil
|
||||||
@@ -453,7 +454,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "didnt got an upload node")
|
return nil, fmt.Errorf("didnt got an upload node: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// fs.Debugf(f, "Got Upload node")
|
// fs.Debugf(f, "Got Upload node")
|
||||||
@@ -497,7 +498,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't upload file")
|
return nil, fmt.Errorf("couldn't upload file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// fs.Debugf(f, "Uploaded File `%s`", fileName)
|
// fs.Debugf(f, "Uploaded File `%s`", fileName)
|
||||||
@@ -531,7 +532,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't finish file upload")
|
return nil, fmt.Errorf("couldn't finish file upload: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return response, err
|
return response, err
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package fichier
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -9,7 +10,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
@@ -454,10 +454,10 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
if currentDirectoryID == directoryID {
|
if currentDirectoryID == directoryID {
|
||||||
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
|
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't rename file")
|
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
||||||
}
|
}
|
||||||
if resp.Status != "OK" {
|
if resp.Status != "OK" {
|
||||||
return nil, errors.Errorf("couldn't rename file: %s", resp.Message)
|
return nil, fmt.Errorf("couldn't rename file: %s", resp.Message)
|
||||||
}
|
}
|
||||||
url = resp.URLs[0].URL
|
url = resp.URLs[0].URL
|
||||||
} else {
|
} else {
|
||||||
@@ -467,10 +467,10 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't move file")
|
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||||
}
|
}
|
||||||
if resp.Status != "OK" {
|
if resp.Status != "OK" {
|
||||||
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
|
return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
|
||||||
}
|
}
|
||||||
url = resp.URLs[0]
|
url = resp.URLs[0]
|
||||||
}
|
}
|
||||||
@@ -503,10 +503,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf)
|
resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't move file")
|
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||||
}
|
}
|
||||||
if resp.Status != "OK" {
|
if resp.Status != "OK" {
|
||||||
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
|
return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
|
||||||
}
|
}
|
||||||
|
|
||||||
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
|
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
|
||||||
|
|||||||
@@ -2,11 +2,12 @@ package fichier
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
@@ -122,7 +123,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
// Delete duplicate after successful upload
|
// Delete duplicate after successful upload
|
||||||
err = o.Remove(ctx)
|
err = o.Remove(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to remove old version")
|
return fmt.Errorf("failed to remove old version: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Replace guts of old object with new one
|
// Replace guts of old object with new one
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@@ -32,7 +33,6 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/filefabric/api"
|
"github.com/rclone/rclone/backend/filefabric/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -267,7 +267,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, rootID string, path string
|
|||||||
"pid": rootID,
|
"pid": rootID,
|
||||||
}, &resp, nil)
|
}, &resp, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to check path exists")
|
return nil, fmt.Errorf("failed to check path exists: %w", err)
|
||||||
}
|
}
|
||||||
if resp.Exists != "y" {
|
if resp.Exists != "y" {
|
||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
@@ -308,7 +308,7 @@ func (f *Fs) getApplianceInfo(ctx context.Context) error {
|
|||||||
"token": "*",
|
"token": "*",
|
||||||
}, &applianceInfo, nil)
|
}, &applianceInfo, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to read appliance version")
|
return fmt.Errorf("failed to read appliance version: %w", err)
|
||||||
}
|
}
|
||||||
f.opt.Version = applianceInfo.SoftwareVersionLabel
|
f.opt.Version = applianceInfo.SoftwareVersionLabel
|
||||||
f.m.Set("version", f.opt.Version)
|
f.m.Set("version", f.opt.Version)
|
||||||
@@ -349,7 +349,7 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
|
|||||||
"authtoken": f.opt.PermanentToken,
|
"authtoken": f.opt.PermanentToken,
|
||||||
}, &info, nil)
|
}, &info, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "failed to get session token")
|
return "", fmt.Errorf("failed to get session token: %w", err)
|
||||||
}
|
}
|
||||||
refreshed = true
|
refreshed = true
|
||||||
now = now.Add(tokenLifeTime)
|
now = now.Add(tokenLifeTime)
|
||||||
@@ -562,7 +562,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||||||
"fi_name": f.opt.Enc.FromStandardName(leaf),
|
"fi_name": f.opt.Enc.FromStandardName(leaf),
|
||||||
}, &info, nil)
|
}, &info, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "failed to create directory")
|
return "", fmt.Errorf("failed to create directory: %w", err)
|
||||||
}
|
}
|
||||||
// fmt.Printf("...Id %q\n", *info.Id)
|
// fmt.Printf("...Id %q\n", *info.Id)
|
||||||
return info.Item.ID, nil
|
return info.Item.ID, nil
|
||||||
@@ -595,7 +595,7 @@ OUTER:
|
|||||||
var info api.GetFolderContentsResponse
|
var info api.GetFolderContentsResponse
|
||||||
_, err = f.rpc(ctx, "getFolderContents", p, &info, nil)
|
_, err = f.rpc(ctx, "getFolderContents", p, &info, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Wrap(err, "failed to list directory")
|
return false, fmt.Errorf("failed to list directory: %w", err)
|
||||||
}
|
}
|
||||||
for i := range info.Items {
|
for i := range info.Items {
|
||||||
item := &info.Items[i]
|
item := &info.Items[i]
|
||||||
@@ -726,7 +726,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) (err error) {
|
|||||||
"completedeletion": "n",
|
"completedeletion": "n",
|
||||||
}, &info, nil)
|
}, &info, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to delete file")
|
return fmt.Errorf("failed to delete file: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -763,7 +763,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||||||
}, &info, nil)
|
}, &info, nil)
|
||||||
f.dirCache.FlushDir(dir)
|
f.dirCache.FlushDir(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to remove directory")
|
return fmt.Errorf("failed to remove directory: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -825,7 +825,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
_, err = f.rpc(ctx, "doCopyFile", p, &info, nil)
|
_, err = f.rpc(ctx, "doCopyFile", p, &info, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to copy file")
|
return nil, fmt.Errorf("failed to copy file: %w", err)
|
||||||
}
|
}
|
||||||
err = dstObj.setMetaData(&info.Item)
|
err = dstObj.setMetaData(&info.Item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -857,7 +857,7 @@ func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err
|
|||||||
"taskid": taskID,
|
"taskid": taskID,
|
||||||
}, &info, nil)
|
}, &info, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "failed to wait for task %s to complete", taskID)
|
return fmt.Errorf("failed to wait for task %s to complete: %w", taskID, err)
|
||||||
}
|
}
|
||||||
if len(info.Tasks) == 0 {
|
if len(info.Tasks) == 0 {
|
||||||
// task has finished
|
// task has finished
|
||||||
@@ -890,7 +890,7 @@ func (f *Fs) renameLeaf(ctx context.Context, isDir bool, id string, newLeaf stri
|
|||||||
"fi_name": newLeaf,
|
"fi_name": newLeaf,
|
||||||
}, &info, nil)
|
}, &info, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to rename leaf")
|
return nil, fmt.Errorf("failed to rename leaf: %w", err)
|
||||||
}
|
}
|
||||||
err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
|
err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -934,7 +934,7 @@ func (f *Fs) move(ctx context.Context, isDir bool, id, oldLeaf, newLeaf, oldDire
|
|||||||
"dir_id": newDirectoryID,
|
"dir_id": newDirectoryID,
|
||||||
}, &info, nil)
|
}, &info, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to move file to new directory")
|
return nil, fmt.Errorf("failed to move file to new directory: %w", err)
|
||||||
}
|
}
|
||||||
item = &info.Item
|
item = &info.Item
|
||||||
err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
|
err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
|
||||||
@@ -1037,7 +1037,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
|||||||
var info api.EmptyResponse
|
var info api.EmptyResponse
|
||||||
_, err = f.rpc(ctx, "emptyTrashInBackground", params{}, &info, nil)
|
_, err = f.rpc(ctx, "emptyTrashInBackground", params{}, &info, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to empty trash")
|
return fmt.Errorf("failed to empty trash: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1164,7 +1164,7 @@ func (o *Object) modifyFile(ctx context.Context, keyValues [][2]string) error {
|
|||||||
"data": data.String(),
|
"data": data.String(),
|
||||||
}, &info, nil)
|
}, &info, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to update metadata")
|
return fmt.Errorf("failed to update metadata: %w", err)
|
||||||
}
|
}
|
||||||
return o.setMetaData(&info.Item)
|
return o.setMetaData(&info.Item)
|
||||||
}
|
}
|
||||||
@@ -1247,7 +1247,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
_, err = o.fs.rpc(ctx, "doInitUpload", p, &upload, nil)
|
_, err = o.fs.rpc(ctx, "doInitUpload", p, &upload, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to initialize upload")
|
return fmt.Errorf("failed to initialize upload: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cancel the upload if aborted or it fails
|
// Cancel the upload if aborted or it fails
|
||||||
@@ -1290,13 +1290,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return o.fs.shouldRetry(ctx, resp, err, nil, try)
|
return o.fs.shouldRetry(ctx, resp, err, nil, try)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to upload")
|
return fmt.Errorf("failed to upload: %w", err)
|
||||||
}
|
}
|
||||||
if uploader.Success != "y" {
|
if uploader.Success != "y" {
|
||||||
return errors.Errorf("upload failed")
|
return fmt.Errorf("upload failed")
|
||||||
}
|
}
|
||||||
if size > 0 && uploader.FileSize != size {
|
if size > 0 && uploader.FileSize != size {
|
||||||
return errors.Errorf("upload failed: size mismatch: want %d got %d", size, uploader.FileSize)
|
return fmt.Errorf("upload failed: size mismatch: want %d got %d", size, uploader.FileSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now finalize the file
|
// Now finalize the file
|
||||||
@@ -1308,7 +1308,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
_, err = o.fs.rpc(ctx, "doCompleteUpload", p, &finalize, nil)
|
_, err = o.fs.rpc(ctx, "doCompleteUpload", p, &finalize, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to finalize upload")
|
return fmt.Errorf("failed to finalize upload: %w", err)
|
||||||
}
|
}
|
||||||
finalized = true
|
finalized = true
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,8 @@ package ftp
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/textproto"
|
"net/textproto"
|
||||||
@@ -14,7 +16,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/jlaffaye/ftp"
|
"github.com/jlaffaye/ftp"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -60,7 +61,6 @@ func init() {
|
|||||||
Name: "pass",
|
Name: "pass",
|
||||||
Help: "FTP password.",
|
Help: "FTP password.",
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
Required: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "tls",
|
Name: "tls",
|
||||||
Help: `Use Implicit FTPS (FTP over TLS).
|
Help: `Use Implicit FTPS (FTP over TLS).
|
||||||
@@ -138,6 +138,14 @@ Enabled by default. Use 0 to disable.`,
|
|||||||
Help: "Maximum time to wait for data connection closing status.",
|
Help: "Maximum time to wait for data connection closing status.",
|
||||||
Default: fs.Duration(60 * time.Second),
|
Default: fs.Duration(60 * time.Second),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "ask_password",
|
||||||
|
Default: false,
|
||||||
|
Help: `Allow asking for FTP password when needed.
|
||||||
|
|
||||||
|
If this is set and no password is supplied then rclone will ask for a password
|
||||||
|
`,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -178,6 +186,7 @@ type Options struct {
|
|||||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||||
CloseTimeout fs.Duration `config:"close_timeout"`
|
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
ShutTimeout fs.Duration `config:"shut_timeout"`
|
||||||
|
AskPassword bool `config:"ask_password"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -349,7 +358,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrapf(err, "failed to make FTP connection to %q", f.dialAddr)
|
err = fmt.Errorf("failed to make FTP connection to %q: %w", f.dialAddr, err)
|
||||||
}
|
}
|
||||||
return c, err
|
return c, err
|
||||||
}
|
}
|
||||||
@@ -396,8 +405,8 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
|||||||
*pc = nil
|
*pc = nil
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If not a regular FTP error code then check the connection
|
// If not a regular FTP error code then check the connection
|
||||||
_, isRegularError := errors.Cause(err).(*textproto.Error)
|
var tpErr *textproto.Error
|
||||||
if !isRegularError {
|
if !errors.As(err, &tpErr) {
|
||||||
nopErr := c.NoOp()
|
nopErr := c.NoOp()
|
||||||
if nopErr != nil {
|
if nopErr != nil {
|
||||||
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
||||||
@@ -443,9 +452,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
pass, err := obscure.Reveal(opt.Pass)
|
pass := ""
|
||||||
if err != nil {
|
if opt.AskPassword && opt.Pass == "" {
|
||||||
return nil, errors.Wrap(err, "NewFS decrypt password")
|
pass = config.GetPassword("FTP server password")
|
||||||
|
} else {
|
||||||
|
pass, err = obscure.Reveal(opt.Pass)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("NewFS decrypt password: %w", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
user := opt.User
|
user := opt.User
|
||||||
if user == "" {
|
if user == "" {
|
||||||
@@ -502,7 +516,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
|||||||
// Make a connection and pool it to return errors early
|
// Make a connection and pool it to return errors early
|
||||||
c, err := f.getFtpConnection(ctx)
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "NewFs")
|
return nil, fmt.Errorf("NewFs: %w", err)
|
||||||
}
|
}
|
||||||
f.fGetTime = c.IsGetTimeSupported()
|
f.fGetTime = c.IsGetTimeSupported()
|
||||||
f.fSetTime = c.IsSetTimeSupported()
|
f.fSetTime = c.IsSetTimeSupported()
|
||||||
@@ -520,7 +534,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
|||||||
}
|
}
|
||||||
_, err := f.NewObject(ctx, remote)
|
_, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
f.root = root
|
f.root = root
|
||||||
return f, nil
|
return f, nil
|
||||||
@@ -599,7 +613,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
|
|||||||
|
|
||||||
c, err := f.getFtpConnection(ctx)
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "findItem")
|
return nil, fmt.Errorf("findItem: %w", err)
|
||||||
}
|
}
|
||||||
files, err := c.List(f.dirFromStandardPath(dir))
|
files, err := c.List(f.dirFromStandardPath(dir))
|
||||||
f.putFtpConnection(&c, err)
|
f.putFtpConnection(&c, err)
|
||||||
@@ -643,7 +657,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
|
|||||||
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
|
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
|
||||||
entry, err := f.findItem(ctx, remote)
|
entry, err := f.findItem(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Wrap(err, "dirExists")
|
return false, fmt.Errorf("dirExists: %w", err)
|
||||||
}
|
}
|
||||||
if entry != nil && entry.Type == ftp.EntryTypeFolder {
|
if entry != nil && entry.Type == ftp.EntryTypeFolder {
|
||||||
return true, nil
|
return true, nil
|
||||||
@@ -664,7 +678,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
||||||
c, err := f.getFtpConnection(ctx)
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "list")
|
return nil, fmt.Errorf("list: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var listErr error
|
var listErr error
|
||||||
@@ -702,7 +716,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
if len(files) == 0 {
|
if len(files) == 0 {
|
||||||
exists, err := f.dirExists(ctx, dir)
|
exists, err := f.dirExists(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "list")
|
return nil, fmt.Errorf("list: %w", err)
|
||||||
}
|
}
|
||||||
if !exists {
|
if !exists {
|
||||||
return nil, fs.ErrorDirNotFound
|
return nil, fs.ErrorDirNotFound
|
||||||
@@ -766,7 +780,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
||||||
err := f.mkParentDir(ctx, src.Remote())
|
err := f.mkParentDir(ctx, src.Remote())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Put mkParentDir failed")
|
return nil, fmt.Errorf("Put mkParentDir failed: %w", err)
|
||||||
}
|
}
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
@@ -789,7 +803,7 @@ func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err erro
|
|||||||
|
|
||||||
c, err := f.getFtpConnection(ctx)
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "getInfo")
|
return nil, fmt.Errorf("getInfo: %w", err)
|
||||||
}
|
}
|
||||||
files, err := c.List(f.dirFromStandardPath(dir))
|
files, err := c.List(f.dirFromStandardPath(dir))
|
||||||
f.putFtpConnection(&c, err)
|
f.putFtpConnection(&c, err)
|
||||||
@@ -827,7 +841,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
|||||||
}
|
}
|
||||||
return fs.ErrorIsFile
|
return fs.ErrorIsFile
|
||||||
} else if err != fs.ErrorObjectNotFound {
|
} else if err != fs.ErrorObjectNotFound {
|
||||||
return errors.Wrapf(err, "mkdir %q failed", abspath)
|
return fmt.Errorf("mkdir %q failed: %w", abspath, err)
|
||||||
}
|
}
|
||||||
parent := path.Dir(abspath)
|
parent := path.Dir(abspath)
|
||||||
err = f.mkdir(ctx, parent)
|
err = f.mkdir(ctx, parent)
|
||||||
@@ -836,7 +850,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
|||||||
}
|
}
|
||||||
c, connErr := f.getFtpConnection(ctx)
|
c, connErr := f.getFtpConnection(ctx)
|
||||||
if connErr != nil {
|
if connErr != nil {
|
||||||
return errors.Wrap(connErr, "mkdir")
|
return fmt.Errorf("mkdir: %w", connErr)
|
||||||
}
|
}
|
||||||
err = c.MakeDir(f.dirFromStandardPath(abspath))
|
err = c.MakeDir(f.dirFromStandardPath(abspath))
|
||||||
f.putFtpConnection(&c, err)
|
f.putFtpConnection(&c, err)
|
||||||
@@ -872,7 +886,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
|||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
c, err := f.getFtpConnection(ctx)
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
return fmt.Errorf("Rmdir: %w", translateErrorFile(err))
|
||||||
}
|
}
|
||||||
err = c.RemoveDir(f.dirFromStandardPath(path.Join(f.root, dir)))
|
err = c.RemoveDir(f.dirFromStandardPath(path.Join(f.root, dir)))
|
||||||
f.putFtpConnection(&c, err)
|
f.putFtpConnection(&c, err)
|
||||||
@@ -888,11 +902,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
err := f.mkParentDir(ctx, remote)
|
err := f.mkParentDir(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move mkParentDir failed")
|
return nil, fmt.Errorf("Move mkParentDir failed: %w", err)
|
||||||
}
|
}
|
||||||
c, err := f.getFtpConnection(ctx)
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move")
|
return nil, fmt.Errorf("Move: %w", err)
|
||||||
}
|
}
|
||||||
err = c.Rename(
|
err = c.Rename(
|
||||||
f.opt.Enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
|
f.opt.Enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
|
||||||
@@ -900,11 +914,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
)
|
)
|
||||||
f.putFtpConnection(&c, err)
|
f.putFtpConnection(&c, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move Rename failed")
|
return nil, fmt.Errorf("Move Rename failed: %w", err)
|
||||||
}
|
}
|
||||||
dstObj, err := f.NewObject(ctx, remote)
|
dstObj, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move NewObject failed")
|
return nil, fmt.Errorf("Move NewObject failed: %w", err)
|
||||||
}
|
}
|
||||||
return dstObj, nil
|
return dstObj, nil
|
||||||
}
|
}
|
||||||
@@ -934,19 +948,19 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
}
|
}
|
||||||
return fs.ErrorIsFile
|
return fs.ErrorIsFile
|
||||||
} else if err != fs.ErrorObjectNotFound {
|
} else if err != fs.ErrorObjectNotFound {
|
||||||
return errors.Wrapf(err, "DirMove getInfo failed")
|
return fmt.Errorf("DirMove getInfo failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure the parent directory exists
|
// Make sure the parent directory exists
|
||||||
err = f.mkdir(ctx, path.Dir(dstPath))
|
err = f.mkdir(ctx, path.Dir(dstPath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
return fmt.Errorf("DirMove mkParentDir dst failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do the move
|
// Do the move
|
||||||
c, err := f.getFtpConnection(ctx)
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "DirMove")
|
return fmt.Errorf("DirMove: %w", err)
|
||||||
}
|
}
|
||||||
err = c.Rename(
|
err = c.Rename(
|
||||||
f.dirFromStandardPath(srcPath),
|
f.dirFromStandardPath(srcPath),
|
||||||
@@ -954,7 +968,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
)
|
)
|
||||||
f.putFtpConnection(&c, err)
|
f.putFtpConnection(&c, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "DirMove Rename(%q,%q) failed", srcPath, dstPath)
|
return fmt.Errorf("DirMove Rename(%q,%q) failed: %w", srcPath, dstPath, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1111,12 +1125,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
}
|
}
|
||||||
c, err := o.fs.getFtpConnection(ctx)
|
c, err := o.fs.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "open")
|
return nil, fmt.Errorf("open: %w", err)
|
||||||
}
|
}
|
||||||
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
o.fs.putFtpConnection(&c, err)
|
o.fs.putFtpConnection(&c, err)
|
||||||
return nil, errors.Wrap(err, "open")
|
return nil, fmt.Errorf("open: %w", err)
|
||||||
}
|
}
|
||||||
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
|
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
|
||||||
return rc, nil
|
return rc, nil
|
||||||
@@ -1146,7 +1160,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
c, err := o.fs.getFtpConnection(ctx)
|
c, err := o.fs.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Update")
|
return fmt.Errorf("Update: %w", err)
|
||||||
}
|
}
|
||||||
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
|
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
|
||||||
// Ignore error 250 here - send by some servers
|
// Ignore error 250 here - send by some servers
|
||||||
@@ -1164,15 +1178,15 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
// recycle connection in advance to let remove() find free token
|
// recycle connection in advance to let remove() find free token
|
||||||
o.fs.putFtpConnection(nil, err)
|
o.fs.putFtpConnection(nil, err)
|
||||||
remove()
|
remove()
|
||||||
return errors.Wrap(err, "update stor")
|
return fmt.Errorf("update stor: %w", err)
|
||||||
}
|
}
|
||||||
o.fs.putFtpConnection(&c, nil)
|
o.fs.putFtpConnection(&c, nil)
|
||||||
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
|
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
|
||||||
return errors.Wrap(err, "SetModTime")
|
return fmt.Errorf("SetModTime: %w", err)
|
||||||
}
|
}
|
||||||
o.info, err = o.fs.getInfo(ctx, path)
|
o.info, err = o.fs.getInfo(ctx, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "update getinfo")
|
return fmt.Errorf("update getinfo: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1191,7 +1205,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
|||||||
} else {
|
} else {
|
||||||
c, err := o.fs.getFtpConnection(ctx)
|
c, err := o.fs.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Remove")
|
return fmt.Errorf("Remove: %w", err)
|
||||||
}
|
}
|
||||||
err = c.Delete(o.fs.opt.Enc.FromStandardPath(path))
|
err = c.Delete(o.fs.opt.Enc.FromStandardPath(path))
|
||||||
o.fs.putFtpConnection(&c, err)
|
o.fs.putFtpConnection(&c, err)
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ func (f *Fs) testUploadTimeout(t *testing.T) {
|
|||||||
const (
|
const (
|
||||||
fileSize = 100000000 // 100 MiB
|
fileSize = 100000000 // 100 MiB
|
||||||
idleTimeout = 40 * time.Millisecond // small because test server is local
|
idleTimeout = 40 * time.Millisecond // small because test server is local
|
||||||
maxTime = 5 * time.Second // prevent test hangup
|
maxTime = 10 * time.Second // prevent test hangup
|
||||||
)
|
)
|
||||||
|
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@@ -25,7 +26,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
@@ -375,7 +375,7 @@ func (o *Object) split() (bucket, bucketPath string) {
|
|||||||
func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) {
|
func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) {
|
||||||
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error processing credentials")
|
return nil, fmt.Errorf("error processing credentials: %w", err)
|
||||||
}
|
}
|
||||||
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx))
|
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx))
|
||||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||||
@@ -408,7 +408,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
|
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
|
||||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error opening service account credentials file")
|
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
|
||||||
}
|
}
|
||||||
opt.ServiceAccountCredentials = string(loadedCreds)
|
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||||
}
|
}
|
||||||
@@ -417,7 +417,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
} else if opt.ServiceAccountCredentials != "" {
|
} else if opt.ServiceAccountCredentials != "" {
|
||||||
oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials))
|
oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
|
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||||
@@ -425,7 +425,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -449,7 +449,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
f.client = oAuthClient
|
f.client = oAuthClient
|
||||||
f.svc, err = storage.New(f.client)
|
f.svc, err = storage.New(f.client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
|
return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||||
@@ -759,10 +759,10 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
|||||||
return nil
|
return nil
|
||||||
} else if gErr, ok := err.(*googleapi.Error); ok {
|
} else if gErr, ok := err.(*googleapi.Error); ok {
|
||||||
if gErr.Code != http.StatusNotFound {
|
if gErr.Code != http.StatusNotFound {
|
||||||
return errors.Wrap(err, "failed to get bucket")
|
return fmt.Errorf("failed to get bucket: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return errors.Wrap(err, "failed to get bucket")
|
return fmt.Errorf("failed to get bucket: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.opt.ProjectNumber == "" {
|
if f.opt.ProjectNumber == "" {
|
||||||
@@ -1065,7 +1065,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
_, isRanging := req.Header["Range"]
|
_, isRanging := req.Header["Range"]
|
||||||
if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) {
|
if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) {
|
||||||
_ = res.Body.Close() // ignore error
|
_ = res.Body.Close() // ignore error
|
||||||
return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
|
return nil, fmt.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
|
||||||
}
|
}
|
||||||
return res.Body, nil
|
return res.Body, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ package googlephotos
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -17,7 +18,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -85,7 +85,7 @@ func init() {
|
|||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't parse config into struct")
|
return nil, fmt.Errorf("couldn't parse config into struct: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch config.State {
|
switch config.State {
|
||||||
@@ -139,7 +139,7 @@ you want to read the media.`,
|
|||||||
Default: false,
|
Default: false,
|
||||||
Help: `Also view and download archived media.
|
Help: `Also view and download archived media.
|
||||||
|
|
||||||
By default rclone does not request archived media. Thus, when syncing,
|
By default, rclone does not request archived media. Thus, when syncing,
|
||||||
archived media is not visible in directory listings or transferred.
|
archived media is not visible in directory listings or transferred.
|
||||||
|
|
||||||
Note that media in albums is always visible and synced, no matter
|
Note that media in albums is always visible and synced, no matter
|
||||||
@@ -292,7 +292,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
baseClient := fshttp.NewClient(ctx)
|
baseClient := fshttp.NewClient(ctx)
|
||||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure Box")
|
return nil, fmt.Errorf("failed to configure Box: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
root = strings.Trim(path.Clean(root), "/")
|
root = strings.Trim(path.Clean(root), "/")
|
||||||
@@ -345,13 +345,13 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "couldn't read openID config")
|
return "", fmt.Errorf("couldn't read openID config: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find userinfo endpoint
|
// Find userinfo endpoint
|
||||||
endpoint, ok := openIDconfig[name].(string)
|
endpoint, ok := openIDconfig[name].(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
return "", errors.Errorf("couldn't find %q from openID config", name)
|
return "", fmt.Errorf("couldn't find %q from openID config", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
return endpoint, nil
|
return endpoint, nil
|
||||||
@@ -374,7 +374,7 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't read user info")
|
return nil, fmt.Errorf("couldn't read user info: %w", err)
|
||||||
}
|
}
|
||||||
return userInfo, nil
|
return userInfo, nil
|
||||||
}
|
}
|
||||||
@@ -405,7 +405,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't revoke token")
|
return fmt.Errorf("couldn't revoke token: %w", err)
|
||||||
}
|
}
|
||||||
fs.Infof(f, "res = %+v", res)
|
fs.Infof(f, "res = %+v", res)
|
||||||
return nil
|
return nil
|
||||||
@@ -492,7 +492,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't list albums")
|
return nil, fmt.Errorf("couldn't list albums: %w", err)
|
||||||
}
|
}
|
||||||
newAlbums := result.Albums
|
newAlbums := result.Albums
|
||||||
if shared {
|
if shared {
|
||||||
@@ -549,7 +549,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't list files")
|
return fmt.Errorf("couldn't list files: %w", err)
|
||||||
}
|
}
|
||||||
items := result.MediaItems
|
items := result.MediaItems
|
||||||
if len(items) > 0 && items[0].ID == lastID {
|
if len(items) > 0 && items[0].ID == lastID {
|
||||||
@@ -693,7 +693,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't create album")
|
return nil, fmt.Errorf("couldn't create album: %w", err)
|
||||||
}
|
}
|
||||||
f.albums[false].add(&result)
|
f.albums[false].add(&result)
|
||||||
return &result, nil
|
return &result, nil
|
||||||
@@ -879,7 +879,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't get media item")
|
return fmt.Errorf("couldn't get media item: %w", err)
|
||||||
}
|
}
|
||||||
o.setMetaData(&item)
|
o.setMetaData(&item)
|
||||||
return nil
|
return nil
|
||||||
@@ -1014,7 +1014,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't upload file")
|
return fmt.Errorf("couldn't upload file: %w", err)
|
||||||
}
|
}
|
||||||
uploadToken := strings.TrimSpace(string(token))
|
uploadToken := strings.TrimSpace(string(token))
|
||||||
if uploadToken == "" {
|
if uploadToken == "" {
|
||||||
@@ -1042,14 +1042,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to create media item")
|
return fmt.Errorf("failed to create media item: %w", err)
|
||||||
}
|
}
|
||||||
if len(result.NewMediaItemResults) != 1 {
|
if len(result.NewMediaItemResults) != 1 {
|
||||||
return errors.New("bad response to BatchCreate wrong number of items")
|
return errors.New("bad response to BatchCreate wrong number of items")
|
||||||
}
|
}
|
||||||
mediaItemResult := result.NewMediaItemResults[0]
|
mediaItemResult := result.NewMediaItemResults[0]
|
||||||
if mediaItemResult.Status.Code != 0 {
|
if mediaItemResult.Status.Code != 0 {
|
||||||
return errors.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
|
return fmt.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
|
||||||
}
|
}
|
||||||
o.setMetaData(&mediaItemResult.MediaItem)
|
o.setMetaData(&mediaItemResult.MediaItem)
|
||||||
|
|
||||||
@@ -1071,7 +1071,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
|||||||
albumTitle, fileName := match[1], match[2]
|
albumTitle, fileName := match[1], match[2]
|
||||||
album, ok := o.fs.albums[false].get(albumTitle)
|
album, ok := o.fs.albums[false].get(albumTitle)
|
||||||
if !ok {
|
if !ok {
|
||||||
return errors.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
|
return fmt.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
|
||||||
}
|
}
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
@@ -1087,7 +1087,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't delete item from album")
|
return fmt.Errorf("couldn't delete item from album: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
)
|
)
|
||||||
@@ -270,7 +269,7 @@ func days(ctx context.Context, f lister, prefix string, match []string) (entries
|
|||||||
year := match[1]
|
year := match[1]
|
||||||
current, err := time.Parse("2006", year)
|
current, err := time.Parse("2006", year)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Errorf("bad year %q", match[1])
|
return nil, fmt.Errorf("bad year %q", match[1])
|
||||||
}
|
}
|
||||||
currentYear := current.Year()
|
currentYear := current.Year()
|
||||||
for current.Year() == currentYear {
|
for current.Year() == currentYear {
|
||||||
@@ -284,7 +283,7 @@ func days(ctx context.Context, f lister, prefix string, match []string) (entries
|
|||||||
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
|
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
|
||||||
year, err := strconv.Atoi(match[1])
|
year, err := strconv.Atoi(match[1])
|
||||||
if err != nil || year < 1000 || year > 3000 {
|
if err != nil || year < 1000 || year > 3000 {
|
||||||
return sf, errors.Errorf("bad year %q", match[1])
|
return sf, fmt.Errorf("bad year %q", match[1])
|
||||||
}
|
}
|
||||||
sf = api.SearchFilter{
|
sf = api.SearchFilter{
|
||||||
Filters: &api.Filters{
|
Filters: &api.Filters{
|
||||||
@@ -300,14 +299,14 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
|
|||||||
if len(match) >= 3 {
|
if len(match) >= 3 {
|
||||||
month, err := strconv.Atoi(match[2])
|
month, err := strconv.Atoi(match[2])
|
||||||
if err != nil || month < 1 || month > 12 {
|
if err != nil || month < 1 || month > 12 {
|
||||||
return sf, errors.Errorf("bad month %q", match[2])
|
return sf, fmt.Errorf("bad month %q", match[2])
|
||||||
}
|
}
|
||||||
sf.Filters.DateFilter.Dates[0].Month = month
|
sf.Filters.DateFilter.Dates[0].Month = month
|
||||||
}
|
}
|
||||||
if len(match) >= 4 {
|
if len(match) >= 4 {
|
||||||
day, err := strconv.Atoi(match[3])
|
day, err := strconv.Atoi(match[3])
|
||||||
if err != nil || day < 1 || day > 31 {
|
if err != nil || day < 1 || day > 31 {
|
||||||
return sf, errors.Errorf("bad day %q", match[3])
|
return sf, fmt.Errorf("bad day %q", match[3])
|
||||||
}
|
}
|
||||||
sf.Filters.DateFilter.Dates[0].Day = day
|
sf.Filters.DateFilter.Dates[0].Day = day
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,9 +2,10 @@ package hasher
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/cache"
|
"github.com/rclone/rclone/fs/cache"
|
||||||
@@ -118,18 +119,18 @@ func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bo
|
|||||||
case fs.ErrorIsFile:
|
case fs.ErrorIsFile:
|
||||||
// ok
|
// ok
|
||||||
case nil:
|
case nil:
|
||||||
return errors.Errorf("not a file: %s", sumRemote)
|
return fmt.Errorf("not a file: %s", sumRemote)
|
||||||
default:
|
default:
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
sumObj, err := sumFs.NewObject(ctx, path.Base(sumPath))
|
sumObj, err := sumFs.NewObject(ctx, path.Base(sumPath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "cannot open sum file")
|
return fmt.Errorf("cannot open sum file: %w", err)
|
||||||
}
|
}
|
||||||
hashes, err := operations.ParseSumFile(ctx, sumObj)
|
hashes, err := operations.ParseSumFile(ctx, sumObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to parse sum file")
|
return fmt.Errorf("failed to parse sum file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if sticky {
|
if sticky {
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ package hasher
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
@@ -11,7 +12,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/cache"
|
"github.com/rclone/rclone/fs/cache"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
@@ -102,7 +102,7 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
|
|||||||
remotePath := fspath.JoinRootPath(opt.Remote, rpath)
|
remotePath := fspath.JoinRootPath(opt.Remote, rpath)
|
||||||
baseFs, err := cache.Get(ctx, remotePath)
|
baseFs, err := cache.Get(ctx, remotePath)
|
||||||
if err != nil && err != fs.ErrorIsFile {
|
if err != nil && err != fs.ErrorIsFile {
|
||||||
return nil, errors.Wrapf(err, "failed to derive base remote %q", opt.Remote)
|
return nil, fmt.Errorf("failed to derive base remote %q: %w", opt.Remote, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
@@ -127,7 +127,7 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
|
|||||||
for _, hashName := range opt.Hashes {
|
for _, hashName := range opt.Hashes {
|
||||||
var ht hash.Type
|
var ht hash.Type
|
||||||
if err := ht.Set(hashName); err != nil {
|
if err := ht.Set(hashName); err != nil {
|
||||||
return nil, errors.Errorf("invalid token %q in hash string %q", hashName, opt.Hashes.String())
|
return nil, fmt.Errorf("invalid token %q in hash string %q", hashName, opt.Hashes.String())
|
||||||
}
|
}
|
||||||
if !f.slowHashes.Contains(ht) {
|
if !f.slowHashes.Contains(ht) {
|
||||||
f.autoHashes.Add(ht)
|
f.autoHashes.Add(ht)
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ func TestIntegration(t *testing.T) {
|
|||||||
"OpenWriterAt",
|
"OpenWriterAt",
|
||||||
},
|
},
|
||||||
UnimplementableObjectMethods: []string{},
|
UnimplementableObjectMethods: []string{},
|
||||||
|
QuickTestOK: true,
|
||||||
}
|
}
|
||||||
if *fstest.RemoteName == "" {
|
if *fstest.RemoteName == "" {
|
||||||
tempDir := filepath.Join(os.TempDir(), "rclone-hasher-test")
|
tempDir := filepath.Join(os.TempDir(), "rclone-hasher-test")
|
||||||
|
|||||||
@@ -4,11 +4,11 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
@@ -199,10 +199,10 @@ func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) {
|
|||||||
r.Hashes[hashType] = hashVal
|
r.Hashes[hashType] = hashVal
|
||||||
}
|
}
|
||||||
if data, err = r.encode(op.key); err != nil {
|
if data, err = r.encode(op.key); err != nil {
|
||||||
return errors.Wrap(err, "marshal failed")
|
return fmt.Errorf("marshal failed: %w", err)
|
||||||
}
|
}
|
||||||
if err = b.Put([]byte(op.key), data); err != nil {
|
if err = b.Put([]byte(op.key), data); err != nil {
|
||||||
return errors.Wrap(err, "put failed")
|
return fmt.Errorf("put failed: %w", err)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -294,7 +294,7 @@ func (f *Fs) dumpLine(r *hashRecord, path string, include bool, err error) strin
|
|||||||
if hashVal == "" || err != nil {
|
if hashVal == "" || err != nil {
|
||||||
hashVal = "-"
|
hashVal = "-"
|
||||||
}
|
}
|
||||||
hashVal = fmt.Sprintf("%-*s", hash.Width(hashType), hashVal)
|
hashVal = fmt.Sprintf("%-*s", hash.Width(hashType, false), hashVal)
|
||||||
hashes = append(hashes, hashName+":"+hashVal)
|
hashes = append(hashes, hashName+":"+hashVal)
|
||||||
}
|
}
|
||||||
hashesStr := strings.Join(hashes, " ")
|
hashesStr := strings.Join(hashes, " ")
|
||||||
|
|||||||
@@ -2,13 +2,13 @@ package hasher
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"path"
|
"path"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
|
|||||||
@@ -263,6 +263,98 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
return f.client.RemoveAll(realpath)
|
return f.client.RemoveAll(realpath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Move src to this remote using server-side move operations.
|
||||||
|
//
|
||||||
|
// This is stored with the remote path given
|
||||||
|
//
|
||||||
|
// It returns the destination Object and a possible error
|
||||||
|
//
|
||||||
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
|
//
|
||||||
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
|
srcObj, ok := src.(*Object)
|
||||||
|
if !ok {
|
||||||
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
|
return nil, fs.ErrorCantMove
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the real paths from the remote specs:
|
||||||
|
sourcePath := srcObj.fs.realpath(srcObj.remote)
|
||||||
|
targetPath := f.realpath(remote)
|
||||||
|
fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath)
|
||||||
|
|
||||||
|
// Make sure the target folder exists:
|
||||||
|
dirname := path.Dir(targetPath)
|
||||||
|
err := f.client.MkdirAll(dirname, 0755)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do the move
|
||||||
|
// Note that the underlying HDFS library hard-codes Overwrite=True, but this is expected rclone behaviour.
|
||||||
|
err = f.client.Rename(sourcePath, targetPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look up the resulting object
|
||||||
|
info, err := f.client.Stat(targetPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// And return it:
|
||||||
|
return &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: remote,
|
||||||
|
size: info.Size(),
|
||||||
|
modTime: info.ModTime(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
|
// using server-side move operations.
|
||||||
|
//
|
||||||
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
|
//
|
||||||
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
|
//
|
||||||
|
// If destination exists then return fs.ErrorDirExists
|
||||||
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||||
|
srcFs, ok := src.(*Fs)
|
||||||
|
if !ok {
|
||||||
|
return fs.ErrorCantDirMove
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the real paths from the remote specs:
|
||||||
|
sourcePath := srcFs.realpath(srcRemote)
|
||||||
|
targetPath := f.realpath(dstRemote)
|
||||||
|
fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath)
|
||||||
|
|
||||||
|
// Check if the destination exists:
|
||||||
|
info, err := f.client.Stat(targetPath)
|
||||||
|
if err == nil {
|
||||||
|
fs.Debugf(f, "target directory already exits, IsDir = [%t]", info.IsDir())
|
||||||
|
return fs.ErrorDirExists
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure the targets parent folder exists:
|
||||||
|
dirname := path.Dir(targetPath)
|
||||||
|
err = f.client.MkdirAll(dirname, 0755)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do the move
|
||||||
|
err = f.client.Rename(sourcePath, targetPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// About gets quota information from the Fs
|
// About gets quota information from the Fs
|
||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
info, err := f.client.StatFs()
|
info, err := f.client.StatFs()
|
||||||
@@ -318,4 +410,6 @@ var (
|
|||||||
_ fs.Purger = (*Fs)(nil)
|
_ fs.Purger = (*Fs)(nil)
|
||||||
_ fs.PutStreamer = (*Fs)(nil)
|
_ fs.PutStreamer = (*Fs)(nil)
|
||||||
_ fs.Abouter = (*Fs)(nil)
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
|
_ fs.Mover = (*Fs)(nil)
|
||||||
|
_ fs.DirMover = (*Fs)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ package http
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"mime"
|
"mime"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -16,7 +18,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
@@ -49,7 +50,7 @@ Use this to set additional HTTP headers for all transactions.
|
|||||||
The input format is comma separated list of key,value pairs. Standard
|
The input format is comma separated list of key,value pairs. Standard
|
||||||
[CSV encoding](https://godoc.org/encoding/csv) may be used.
|
[CSV encoding](https://godoc.org/encoding/csv) may be used.
|
||||||
|
|
||||||
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
For example, to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||||
|
|
||||||
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
|
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
|
||||||
`,
|
`,
|
||||||
@@ -132,7 +133,7 @@ func statusError(res *http.Response, err error) error {
|
|||||||
}
|
}
|
||||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||||
_ = res.Body.Close()
|
_ = res.Body.Close()
|
||||||
return errors.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
|
return fmt.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -377,15 +378,15 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
|||||||
URL := f.url(dir)
|
URL := f.url(dir)
|
||||||
u, err := url.Parse(URL)
|
u, err := url.Parse(URL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to readDir")
|
return nil, fmt.Errorf("failed to readDir: %w", err)
|
||||||
}
|
}
|
||||||
if !strings.HasSuffix(URL, "/") {
|
if !strings.HasSuffix(URL, "/") {
|
||||||
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
return nil, fmt.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||||
}
|
}
|
||||||
// Do the request
|
// Do the request
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", URL, nil)
|
req, err := http.NewRequestWithContext(ctx, "GET", URL, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "readDir failed")
|
return nil, fmt.Errorf("readDir failed: %w", err)
|
||||||
}
|
}
|
||||||
f.addHeaders(req)
|
f.addHeaders(req)
|
||||||
res, err := f.httpClient.Do(req)
|
res, err := f.httpClient.Do(req)
|
||||||
@@ -397,7 +398,7 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
|||||||
}
|
}
|
||||||
err = statusError(res, err)
|
err = statusError(res, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to readDir")
|
return nil, fmt.Errorf("failed to readDir: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
|
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
|
||||||
@@ -405,10 +406,10 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
|||||||
case "text/html":
|
case "text/html":
|
||||||
names, err = parse(u, res.Body)
|
names, err = parse(u, res.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "readDir")
|
return nil, fmt.Errorf("readDir: %w", err)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("Can't parse content type %q", contentType)
|
return nil, fmt.Errorf("Can't parse content type %q", contentType)
|
||||||
}
|
}
|
||||||
return names, nil
|
return names, nil
|
||||||
}
|
}
|
||||||
@@ -428,7 +429,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
}
|
}
|
||||||
names, err := f.readDir(ctx, dir)
|
names, err := f.readDir(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error listing %q", dir)
|
return nil, fmt.Errorf("error listing %q: %w", dir, err)
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
entriesMu sync.Mutex // to protect entries
|
entriesMu sync.Mutex // to protect entries
|
||||||
@@ -540,7 +541,7 @@ func (o *Object) stat(ctx context.Context) error {
|
|||||||
url := o.url()
|
url := o.url()
|
||||||
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
|
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "stat failed")
|
return fmt.Errorf("stat failed: %w", err)
|
||||||
}
|
}
|
||||||
o.fs.addHeaders(req)
|
o.fs.addHeaders(req)
|
||||||
res, err := o.fs.httpClient.Do(req)
|
res, err := o.fs.httpClient.Do(req)
|
||||||
@@ -549,7 +550,7 @@ func (o *Object) stat(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
err = statusError(res, err)
|
err = statusError(res, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to stat")
|
return fmt.Errorf("failed to stat: %w", err)
|
||||||
}
|
}
|
||||||
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
|
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -562,7 +563,7 @@ func (o *Object) stat(ctx context.Context) error {
|
|||||||
if o.fs.opt.NoSlash {
|
if o.fs.opt.NoSlash {
|
||||||
mediaType, _, err := mime.ParseMediaType(o.contentType)
|
mediaType, _, err := mime.ParseMediaType(o.contentType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "failed to parse Content-Type: %q", o.contentType)
|
return fmt.Errorf("failed to parse Content-Type: %q: %w", o.contentType, err)
|
||||||
}
|
}
|
||||||
if mediaType == "text/html" {
|
if mediaType == "text/html" {
|
||||||
return fs.ErrorNotAFile
|
return fs.ErrorNotAFile
|
||||||
@@ -588,7 +589,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
url := o.url()
|
url := o.url()
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Open failed")
|
return nil, fmt.Errorf("Open failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add optional headers
|
// Add optional headers
|
||||||
@@ -601,7 +602,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
res, err := o.fs.httpClient.Do(req)
|
res, err := o.fs.httpClient.Do(req)
|
||||||
err = statusError(res, err)
|
err = statusError(res, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Open failed")
|
return nil, fmt.Errorf("Open failed: %w", err)
|
||||||
}
|
}
|
||||||
return res.Body, nil
|
return res.Body, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ package hubic
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -16,7 +17,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
swiftLib "github.com/ncw/swift/v2"
|
swiftLib "github.com/ncw/swift/v2"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/swift"
|
"github.com/rclone/rclone/backend/swift"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
@@ -120,7 +120,7 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
|||||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||||
body, _ := ioutil.ReadAll(resp.Body)
|
body, _ := ioutil.ReadAll(resp.Body)
|
||||||
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
|
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
|
||||||
return errors.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
||||||
}
|
}
|
||||||
decoder := json.NewDecoder(resp.Body)
|
decoder := json.NewDecoder(resp.Body)
|
||||||
var result credentials
|
var result credentials
|
||||||
@@ -146,7 +146,7 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
|||||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure Hubic")
|
return nil, fmt.Errorf("failed to configure Hubic: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
@@ -163,7 +163,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
err = c.Authenticate(ctx)
|
err = c.Authenticate(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error authenticating swift connection")
|
return nil, fmt.Errorf("error authenticating swift connection: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse config into swift.Options struct
|
// Parse config into swift.Options struct
|
||||||
|
|||||||
@@ -2,10 +2,9 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@@ -19,7 +20,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/jottacloud/api"
|
"github.com/rclone/rclone/backend/jottacloud/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
@@ -69,6 +69,10 @@ const (
|
|||||||
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
||||||
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
||||||
teliaCloudClientID = "desktop"
|
teliaCloudClientID = "desktop"
|
||||||
|
|
||||||
|
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
|
||||||
|
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
|
||||||
|
tele2CloudClientID = "desktop"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
@@ -131,6 +135,9 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
|||||||
}, {
|
}, {
|
||||||
Value: "telia",
|
Value: "telia",
|
||||||
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud.",
|
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud.",
|
||||||
|
}, {
|
||||||
|
Value: "tele2",
|
||||||
|
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
|
||||||
}})
|
}})
|
||||||
case "auth_type_done":
|
case "auth_type_done":
|
||||||
// Jump to next state according to config chosen
|
// Jump to next state according to config chosen
|
||||||
@@ -146,12 +153,12 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
|||||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||||
token, tokenEndpoint, err := doTokenAuth(ctx, srv, loginToken)
|
token, tokenEndpoint, err := doTokenAuth(ctx, srv, loginToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to get oauth token")
|
return nil, fmt.Errorf("failed to get oauth token: %w", err)
|
||||||
}
|
}
|
||||||
m.Set(configTokenURL, tokenEndpoint)
|
m.Set(configTokenURL, tokenEndpoint)
|
||||||
err = oauthutil.PutToken(name, m, &token, true)
|
err = oauthutil.PutToken(name, m, &token, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error while saving token")
|
return nil, fmt.Errorf("error while saving token: %w", err)
|
||||||
}
|
}
|
||||||
return fs.ConfigGoto("choose_device")
|
return fs.ConfigGoto("choose_device")
|
||||||
case "legacy": // configure a jottacloud backend using legacy authentication
|
case "legacy": // configure a jottacloud backend using legacy authentication
|
||||||
@@ -168,7 +175,7 @@ machines.`)
|
|||||||
if config.Result == "true" {
|
if config.Result == "true" {
|
||||||
deviceRegistration, err := registerDevice(ctx, srv)
|
deviceRegistration, err := registerDevice(ctx, srv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to register device")
|
return nil, fmt.Errorf("failed to register device: %w", err)
|
||||||
}
|
}
|
||||||
m.Set(configClientID, deviceRegistration.ClientID)
|
m.Set(configClientID, deviceRegistration.ClientID)
|
||||||
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
||||||
@@ -216,11 +223,11 @@ machines.`)
|
|||||||
m.Set("password", "")
|
m.Set("password", "")
|
||||||
m.Set("auth_code", "")
|
m.Set("auth_code", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to get oauth token")
|
return nil, fmt.Errorf("failed to get oauth token: %w", err)
|
||||||
}
|
}
|
||||||
err = oauthutil.PutToken(name, m, &token, true)
|
err = oauthutil.PutToken(name, m, &token, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error while saving token")
|
return nil, fmt.Errorf("error while saving token: %w", err)
|
||||||
}
|
}
|
||||||
return fs.ConfigGoto("choose_device")
|
return fs.ConfigGoto("choose_device")
|
||||||
case "telia": // telia cloud config
|
case "telia": // telia cloud config
|
||||||
@@ -238,6 +245,21 @@ machines.`)
|
|||||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
case "tele2": // tele2 cloud config
|
||||||
|
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||||
|
m.Set(configClientID, tele2CloudClientID)
|
||||||
|
m.Set(configTokenURL, tele2CloudTokenURL)
|
||||||
|
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||||
|
OAuth2Config: &oauth2.Config{
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
|
AuthURL: tele2CloudAuthURL,
|
||||||
|
TokenURL: tele2CloudTokenURL,
|
||||||
|
},
|
||||||
|
ClientID: tele2CloudClientID,
|
||||||
|
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||||
|
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||||
|
},
|
||||||
|
})
|
||||||
case "choose_device":
|
case "choose_device":
|
||||||
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", "Use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?")
|
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", "Use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?")
|
||||||
case "choose_device_query":
|
case "choose_device_query":
|
||||||
@@ -529,7 +551,7 @@ func getCustomerInfo(ctx context.Context, apiSrv *rest.Client) (info *api.Custom
|
|||||||
|
|
||||||
_, err = apiSrv.CallJSON(ctx, &opts, nil, &info)
|
_, err = apiSrv.CallJSON(ctx, &opts, nil, &info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't get customer info")
|
return nil, fmt.Errorf("couldn't get customer info: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return info, nil
|
return info, nil
|
||||||
@@ -544,7 +566,7 @@ func getDriveInfo(ctx context.Context, srv *rest.Client, username string) (info
|
|||||||
|
|
||||||
_, err = srv.CallXML(ctx, &opts, nil, &info)
|
_, err = srv.CallXML(ctx, &opts, nil, &info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't get drive info")
|
return nil, fmt.Errorf("couldn't get drive info: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return info, nil
|
return info, nil
|
||||||
@@ -559,7 +581,7 @@ func getDeviceInfo(ctx context.Context, srv *rest.Client, path string) (info *ap
|
|||||||
|
|
||||||
_, err = srv.CallXML(ctx, &opts, nil, &info)
|
_, err = srv.CallXML(ctx, &opts, nil, &info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't get device info")
|
return nil, fmt.Errorf("couldn't get device info: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return info, nil
|
return info, nil
|
||||||
@@ -597,7 +619,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Jo
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "read metadata failed")
|
return nil, fmt.Errorf("read metadata failed: %w", err)
|
||||||
}
|
}
|
||||||
if result.XMLName.Local == "folder" {
|
if result.XMLName.Local == "folder" {
|
||||||
return nil, fs.ErrorIsDir
|
return nil, fs.ErrorIsDir
|
||||||
@@ -720,7 +742,7 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
|||||||
// Create OAuth Client
|
// Create OAuth Client
|
||||||
oAuthClient, ts, err = oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
oAuthClient, ts, err = oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
|
return nil, nil, fmt.Errorf("Failed to configure Jottacloud oauth client: %w", err)
|
||||||
}
|
}
|
||||||
return oAuthClient, ts, nil
|
return oAuthClient, ts, nil
|
||||||
}
|
}
|
||||||
@@ -786,7 +808,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
_, err := f.NewObject(context.TODO(), remote)
|
_, err := f.NewObject(context.TODO(), remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if uErr := errors.Cause(err); uErr == fs.ErrorObjectNotFound || uErr == fs.ErrorNotAFile || uErr == fs.ErrorIsDir {
|
if errors.Is(err, fs.ErrorObjectNotFound) || errors.Is(err, fs.ErrorNotAFile) || errors.Is(err, fs.ErrorIsDir) {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
f.root = root
|
f.root = root
|
||||||
return f, nil
|
return f, nil
|
||||||
@@ -881,7 +903,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
return nil, fs.ErrorDirNotFound
|
return nil, fs.ErrorDirNotFound
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, "couldn't list files")
|
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !f.validFolder(&result) {
|
if !f.validFolder(&result) {
|
||||||
@@ -981,7 +1003,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
return fs.ErrorDirNotFound
|
return fs.ErrorDirNotFound
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return errors.Wrap(err, "couldn't list files")
|
return fmt.Errorf("couldn't list files: %w", err)
|
||||||
}
|
}
|
||||||
list := walk.NewListRHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
err = f.listFileDir(ctx, dir, &result, func(entry fs.DirEntry) error {
|
err = f.listFileDir(ctx, dir, &result, func(entry fs.DirEntry) error {
|
||||||
@@ -1081,7 +1103,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't purge directory")
|
return fmt.Errorf("couldn't purge directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -1148,7 +1170,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
|
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't copy file")
|
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.newObjectWithInfo(ctx, remote, info)
|
return f.newObjectWithInfo(ctx, remote, info)
|
||||||
@@ -1178,7 +1200,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote)
|
info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't move file")
|
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.newObjectWithInfo(ctx, remote, info)
|
return f.newObjectWithInfo(ctx, remote, info)
|
||||||
@@ -1222,7 +1244,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't move directory")
|
return fmt.Errorf("couldn't move directory: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1256,13 +1278,13 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if unlink {
|
if unlink {
|
||||||
return "", errors.Wrap(err, "couldn't remove public link")
|
return "", fmt.Errorf("couldn't remove public link: %w", err)
|
||||||
}
|
}
|
||||||
return "", errors.Wrap(err, "couldn't create public link")
|
return "", fmt.Errorf("couldn't create public link: %w", err)
|
||||||
}
|
}
|
||||||
if unlink {
|
if unlink {
|
||||||
if result.PublicURI != "" {
|
if result.PublicURI != "" {
|
||||||
return "", errors.Errorf("couldn't remove public link - %q", result.PublicURI)
|
return "", fmt.Errorf("couldn't remove public link - %q", result.PublicURI)
|
||||||
}
|
}
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
@@ -1322,7 +1344,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||||||
var info api.TrashResponse
|
var info api.TrashResponse
|
||||||
_, err := f.apiSrv.CallJSON(ctx, &opts, nil, &info)
|
_, err := f.apiSrv.CallJSON(ctx, &opts, nil, &info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't empty trash")
|
return fmt.Errorf("couldn't empty trash: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -1584,7 +1606,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
// if the object exists delete it
|
// if the object exists delete it
|
||||||
err = o.remove(ctx, true)
|
err = o.remove(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to remove old object")
|
return fmt.Errorf("failed to remove old object: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// if the object does not exist we can just continue but if the error is something different we should report that
|
// if the object does not exist we can just continue but if the error is something different we should report that
|
||||||
@@ -1605,7 +1627,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
md5String, in, cleanup, err = readMD5(in, size, int64(o.fs.opt.MD5MemoryThreshold))
|
md5String, in, cleanup, err = readMD5(in, size, int64(o.fs.opt.MD5MemoryThreshold))
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to calculate MD5")
|
return fmt.Errorf("failed to calculate MD5: %w", err)
|
||||||
}
|
}
|
||||||
// Wrap the accounting back onto the stream
|
// Wrap the accounting back onto the stream
|
||||||
in = wrap(in)
|
in = wrap(in)
|
||||||
|
|||||||
@@ -5,10 +5,10 @@ package local
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -20,7 +20,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return nil, fs.ErrorDirNotFound
|
return nil, fs.ErrorDirNotFound
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, "failed to read disk usage")
|
return nil, fmt.Errorf("failed to read disk usage: %w", err)
|
||||||
}
|
}
|
||||||
bs := int64(s.Bsize) // nolint: unconvert
|
bs := int64(s.Bsize) // nolint: unconvert
|
||||||
usage := &fs.Usage{
|
usage := &fs.Usage{
|
||||||
|
|||||||
@@ -5,10 +5,10 @@ package local
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -24,7 +24,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
|
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
|
||||||
)
|
)
|
||||||
if e1 != syscall.Errno(0) {
|
if e1 != syscall.Errno(0) {
|
||||||
return nil, errors.Wrap(e1, "failed to read disk usage")
|
return nil, fmt.Errorf("failed to read disk usage: %w", e1)
|
||||||
}
|
}
|
||||||
usage := &fs.Usage{
|
usage := &fs.Usage{
|
||||||
Total: fs.NewUsageValue(total), // quota of bytes that can be used
|
Total: fs.NewUsageValue(total), // quota of bytes that can be used
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ package local
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@@ -16,7 +17,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -432,7 +432,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
fd, err := os.Open(fsDirPath)
|
fd, err := os.Open(fsDirPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
isPerm := os.IsPermission(err)
|
isPerm := os.IsPermission(err)
|
||||||
err = errors.Wrapf(err, "failed to open directory %q", dir)
|
err = fmt.Errorf("failed to open directory %q: %w", dir, err)
|
||||||
fs.Errorf(dir, "%v", err)
|
fs.Errorf(dir, "%v", err)
|
||||||
if isPerm {
|
if isPerm {
|
||||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
|
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
|
||||||
@@ -443,7 +443,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
defer func() {
|
defer func() {
|
||||||
cerr := fd.Close()
|
cerr := fd.Close()
|
||||||
if cerr != nil && err == nil {
|
if cerr != nil && err == nil {
|
||||||
err = errors.Wrapf(cerr, "failed to close directory %q:", dir)
|
err = fmt.Errorf("failed to close directory %q:: %w", dir, cerr)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -473,7 +473,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if fierr != nil {
|
if fierr != nil {
|
||||||
err = errors.Wrapf(err, "failed to read directory %q", namepath)
|
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
|
||||||
fs.Errorf(dir, "%v", fierr)
|
fs.Errorf(dir, "%v", fierr)
|
||||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||||
continue
|
continue
|
||||||
@@ -483,7 +483,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to read directory entry")
|
return nil, fmt.Errorf("failed to read directory entry: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, fi := range fis {
|
for _, fi := range fis {
|
||||||
@@ -496,7 +496,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
fi, err = os.Stat(localPath)
|
fi, err = os.Stat(localPath)
|
||||||
if os.IsNotExist(err) || isCircularSymlinkError(err) {
|
if os.IsNotExist(err) || isCircularSymlinkError(err) {
|
||||||
// Skip bad symlinks and circular symlinks
|
// Skip bad symlinks and circular symlinks
|
||||||
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
|
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
|
||||||
fs.Errorf(newRemote, "Listing error: %v", err)
|
fs.Errorf(newRemote, "Listing error: %v", err)
|
||||||
err = accounting.Stats(ctx).Error(err)
|
err = accounting.Stats(ctx).Error(err)
|
||||||
continue
|
continue
|
||||||
@@ -672,7 +672,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !fi.Mode().IsDir() {
|
if !fi.Mode().IsDir() {
|
||||||
return errors.Errorf("can't purge non directory: %q", dir)
|
return fmt.Errorf("can't purge non directory: %q", dir)
|
||||||
}
|
}
|
||||||
return os.RemoveAll(dir)
|
return os.RemoveAll(dir)
|
||||||
}
|
}
|
||||||
@@ -866,12 +866,12 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
|||||||
err := o.lstat()
|
err := o.lstat()
|
||||||
var changed bool
|
var changed bool
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(errors.Cause(err)) {
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
// If file not found then we assume any accumulated
|
// If file not found then we assume any accumulated
|
||||||
// hashes are OK - this will error on Open
|
// hashes are OK - this will error on Open
|
||||||
changed = true
|
changed = true
|
||||||
} else {
|
} else {
|
||||||
return "", errors.Wrap(err, "hash: failed to stat")
|
return "", fmt.Errorf("hash: failed to stat: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
o.fs.objectMetaMu.RLock()
|
o.fs.objectMetaMu.RLock()
|
||||||
@@ -900,16 +900,16 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
|||||||
in = readers.NewLimitedReadCloser(in, o.size)
|
in = readers.NewLimitedReadCloser(in, o.size)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "hash: failed to open")
|
return "", fmt.Errorf("hash: failed to open: %w", err)
|
||||||
}
|
}
|
||||||
var hashes map[hash.Type]string
|
var hashes map[hash.Type]string
|
||||||
hashes, err = hash.StreamTypes(in, hash.NewHashSet(r))
|
hashes, err = hash.StreamTypes(in, hash.NewHashSet(r))
|
||||||
closeErr := in.Close()
|
closeErr := in.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "hash: failed to read")
|
return "", fmt.Errorf("hash: failed to read: %w", err)
|
||||||
}
|
}
|
||||||
if closeErr != nil {
|
if closeErr != nil {
|
||||||
return "", errors.Wrap(closeErr, "hash: failed to close")
|
return "", fmt.Errorf("hash: failed to close: %w", closeErr)
|
||||||
}
|
}
|
||||||
hashValue = hashes[r]
|
hashValue = hashes[r]
|
||||||
o.fs.objectMetaMu.Lock()
|
o.fs.objectMetaMu.Lock()
|
||||||
@@ -990,17 +990,17 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
|
|||||||
// Check if file has the same size and modTime
|
// Check if file has the same size and modTime
|
||||||
fi, err := file.fd.Stat()
|
fi, err := file.fd.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, errors.Wrap(err, "can't read status of source file while transferring")
|
return 0, fmt.Errorf("can't read status of source file while transferring: %w", err)
|
||||||
}
|
}
|
||||||
file.o.fs.objectMetaMu.RLock()
|
file.o.fs.objectMetaMu.RLock()
|
||||||
oldtime := file.o.modTime
|
oldtime := file.o.modTime
|
||||||
oldsize := file.o.size
|
oldsize := file.o.size
|
||||||
file.o.fs.objectMetaMu.RUnlock()
|
file.o.fs.objectMetaMu.RUnlock()
|
||||||
if oldsize != fi.Size() {
|
if oldsize != fi.Size() {
|
||||||
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size()))
|
return 0, fserrors.NoLowLevelRetryError(fmt.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size()))
|
||||||
}
|
}
|
||||||
if !oldtime.Equal(fi.ModTime()) {
|
if !oldtime.Equal(fi.ModTime()) {
|
||||||
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime()))
|
return 0, fserrors.NoLowLevelRetryError(fmt.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -93,16 +93,16 @@ func TestSymlink(t *testing.T) {
|
|||||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||||
|
|
||||||
// Check with no symlink flags
|
// Check with no symlink flags
|
||||||
fstest.CheckItems(t, r.Flocal, file1)
|
r.CheckLocalItems(t, file1)
|
||||||
fstest.CheckItems(t, r.Fremote)
|
r.CheckRemoteItems(t)
|
||||||
|
|
||||||
// Set fs into "-L" mode
|
// Set fs into "-L" mode
|
||||||
f.opt.FollowSymlinks = true
|
f.opt.FollowSymlinks = true
|
||||||
f.opt.TranslateSymlinks = false
|
f.opt.TranslateSymlinks = false
|
||||||
f.lstat = os.Stat
|
f.lstat = os.Stat
|
||||||
|
|
||||||
fstest.CheckItems(t, r.Flocal, file1, file2d)
|
r.CheckLocalItems(t, file1, file2d)
|
||||||
fstest.CheckItems(t, r.Fremote)
|
r.CheckRemoteItems(t)
|
||||||
|
|
||||||
// Set fs into "-l" mode
|
// Set fs into "-l" mode
|
||||||
f.opt.FollowSymlinks = false
|
f.opt.FollowSymlinks = false
|
||||||
@@ -111,7 +111,7 @@ func TestSymlink(t *testing.T) {
|
|||||||
|
|
||||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2}, nil, fs.ModTimeNotSupported)
|
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2}, nil, fs.ModTimeNotSupported)
|
||||||
if haveLChtimes {
|
if haveLChtimes {
|
||||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
r.CheckLocalItems(t, file1, file2)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a symlink
|
// Create a symlink
|
||||||
@@ -119,7 +119,7 @@ func TestSymlink(t *testing.T) {
|
|||||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||||
if haveLChtimes {
|
if haveLChtimes {
|
||||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
r.CheckLocalItems(t, file1, file2, file3)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check it got the correct contents
|
// Check it got the correct contents
|
||||||
|
|||||||
@@ -11,7 +11,8 @@ import (
|
|||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "",
|
RemoteName: "",
|
||||||
NilObject: (*local.Object)(nil),
|
NilObject: (*local.Object)(nil),
|
||||||
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,11 +6,11 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package mailru
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
gohash "hash"
|
gohash "hash"
|
||||||
"io"
|
"io"
|
||||||
@@ -40,7 +41,6 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -269,7 +269,7 @@ func errorHandler(res *http.Response) (err error) {
|
|||||||
}
|
}
|
||||||
serverError.Message = string(data)
|
serverError.Message = string(data)
|
||||||
if serverError.Message == "" || strings.HasPrefix(serverError.Message, "{") {
|
if serverError.Message == "" || strings.HasPrefix(serverError.Message, "{") {
|
||||||
// Replace empty or JSON response with a human readable text.
|
// Replace empty or JSON response with a human-readable text.
|
||||||
serverError.Message = res.Status
|
serverError.Message = res.Status
|
||||||
}
|
}
|
||||||
serverError.Status = res.StatusCode
|
serverError.Status = res.StatusCode
|
||||||
@@ -438,7 +438,7 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
|
|||||||
err = errors.New("Invalid token")
|
err = errors.New("Invalid token")
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Failed to authorize")
|
return fmt.Errorf("Failed to authorize: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = oauthutil.PutToken(f.name, f.m, t, false); err != nil {
|
if err = oauthutil.PutToken(f.name, f.m, t, false); err != nil {
|
||||||
@@ -507,7 +507,7 @@ func (f *Fs) reAuthorize(opts *rest.Opts, origErr error) error {
|
|||||||
func (f *Fs) accessToken() (string, error) {
|
func (f *Fs) accessToken() (string, error) {
|
||||||
token, err := f.source.Token()
|
token, err := f.source.Token()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "cannot refresh access token")
|
return "", fmt.Errorf("cannot refresh access token: %w", err)
|
||||||
}
|
}
|
||||||
return token.AccessToken, nil
|
return token.AccessToken, nil
|
||||||
}
|
}
|
||||||
@@ -1196,7 +1196,7 @@ func (f *Fs) purgeWithCheck(ctx context.Context, dir string, check bool, opName
|
|||||||
|
|
||||||
_, dirSize, err := f.readItemMetaData(ctx, path)
|
_, dirSize, err := f.readItemMetaData(ctx, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "%s failed", opName)
|
return fmt.Errorf("%s failed: %w", opName, err)
|
||||||
}
|
}
|
||||||
if check && dirSize > 0 {
|
if check && dirSize > 0 {
|
||||||
return fs.ErrorDirectoryNotEmpty
|
return fs.ErrorDirectoryNotEmpty
|
||||||
@@ -1300,7 +1300,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't copy file")
|
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||||
}
|
}
|
||||||
if response.Status != 200 {
|
if response.Status != 200 {
|
||||||
return nil, fmt.Errorf("copy failed with code %d", response.Status)
|
return nil, fmt.Errorf("copy failed with code %d", response.Status)
|
||||||
@@ -1684,7 +1684,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
spoolFile, mrHash, err := makeTempFile(ctx, tmpFs, wrapIn, src)
|
spoolFile, mrHash, err := makeTempFile(ctx, tmpFs, wrapIn, src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Failed to create spool file")
|
return fmt.Errorf("Failed to create spool file: %w", err)
|
||||||
}
|
}
|
||||||
if o.putByHash(ctx, mrHash, src, "spool") {
|
if o.putByHash(ctx, mrHash, src, "spool") {
|
||||||
// If put by hash is successful, ignore transitive error
|
// If put by hash is successful, ignore transitive error
|
||||||
@@ -2318,7 +2318,7 @@ func (p *serverPool) Dispatch(ctx context.Context, current string) (string, erro
|
|||||||
})
|
})
|
||||||
if err != nil || url == "" {
|
if err != nil || url == "" {
|
||||||
closeBody(res)
|
closeBody(res)
|
||||||
return "", errors.Wrap(err, "Failed to request file server")
|
return "", fmt.Errorf("Failed to request file server: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
p.addServer(url, now)
|
p.addServer(url, now)
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ Improvements:
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
@@ -24,7 +25,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
@@ -165,13 +165,6 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
|||||||
}
|
}
|
||||||
// Let the mega library handle the low level retries
|
// Let the mega library handle the low level retries
|
||||||
return false, err
|
return false, err
|
||||||
/*
|
|
||||||
switch errors.Cause(err) {
|
|
||||||
case mega.EAGAIN, mega.ERATELIMIT, mega.ETEMPUNAVAIL:
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
return fserrors.ShouldRetry(err), err
|
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// readMetaDataForPath reads the metadata from the path
|
// readMetaDataForPath reads the metadata from the path
|
||||||
@@ -195,7 +188,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
var err error
|
var err error
|
||||||
opt.Pass, err = obscure.Reveal(opt.Pass)
|
opt.Pass, err = obscure.Reveal(opt.Pass)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't decrypt password")
|
return nil, fmt.Errorf("couldn't decrypt password: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
@@ -222,7 +215,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
|
|
||||||
err := srv.Login(opt.User, opt.Pass)
|
err := srv.Login(opt.User, opt.Pass)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't login")
|
return nil, fmt.Errorf("couldn't login: %w", err)
|
||||||
}
|
}
|
||||||
megaCache[opt.User] = srv
|
megaCache[opt.User] = srv
|
||||||
}
|
}
|
||||||
@@ -261,7 +254,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
|
|
||||||
// splitNodePath splits nodePath into / separated parts, returning nil if it
|
// splitNodePath splits nodePath into / separated parts, returning nil if it
|
||||||
// should refer to the root.
|
// should refer to the root.
|
||||||
// It also encodes the parts into backend specific encoding
|
// It also encodes the parts into backend-specific encoding
|
||||||
func (f *Fs) splitNodePath(nodePath string) (parts []string) {
|
func (f *Fs) splitNodePath(nodePath string) (parts []string) {
|
||||||
nodePath = path.Clean(nodePath)
|
nodePath = path.Clean(nodePath)
|
||||||
if nodePath == "." || nodePath == "/" {
|
if nodePath == "." || nodePath == "/" {
|
||||||
@@ -350,11 +343,11 @@ func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err != mega.ENOENT {
|
if err != mega.ENOENT {
|
||||||
return nil, errors.Wrap(err, "mkdir lookup failed")
|
return nil, fmt.Errorf("mkdir lookup failed: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "internal error: mkdir called with non existent root node")
|
return nil, fmt.Errorf("internal error: mkdir called with non-existent root node: %w", err)
|
||||||
}
|
}
|
||||||
// i is number of directories to create (may be 0)
|
// i is number of directories to create (may be 0)
|
||||||
// node is directory to create them from
|
// node is directory to create them from
|
||||||
@@ -365,7 +358,7 @@ func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "mkdir create node failed")
|
return nil, fmt.Errorf("mkdir create node failed: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return node, nil
|
return node, nil
|
||||||
@@ -428,7 +421,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
|||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "CleanUp failed to list items in trash")
|
return fmt.Errorf("CleanUp failed to list items in trash: %w", err)
|
||||||
}
|
}
|
||||||
fs.Infof(f, "Deleting %d items from the trash", len(items))
|
fs.Infof(f, "Deleting %d items from the trash", len(items))
|
||||||
errors := 0
|
errors := 0
|
||||||
@@ -489,7 +482,7 @@ type listFn func(*mega.Node) bool
|
|||||||
func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, err error) {
|
func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, err error) {
|
||||||
nodes, err := f.srv.FS.GetChildren(dir)
|
nodes, err := f.srv.FS.GetChildren(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Wrapf(err, "list failed")
|
return false, fmt.Errorf("list failed: %w", err)
|
||||||
}
|
}
|
||||||
for _, item := range nodes {
|
for _, item := range nodes {
|
||||||
if fn(item) {
|
if fn(item) {
|
||||||
@@ -609,7 +602,10 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = f.mkdir(ctx, rootNode, dir)
|
_, err = f.mkdir(ctx, rootNode, dir)
|
||||||
return errors.Wrap(err, "Mkdir failed")
|
if err != nil {
|
||||||
|
return fmt.Errorf("Mkdir failed: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// deleteNode removes a file or directory, observing useTrash
|
// deleteNode removes a file or directory, observing useTrash
|
||||||
@@ -639,7 +635,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||||||
if check {
|
if check {
|
||||||
children, err := f.srv.FS.GetChildren(dirNode)
|
children, err := f.srv.FS.GetChildren(dirNode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "purgeCheck GetChildren failed")
|
return fmt.Errorf("purgeCheck GetChildren failed: %w", err)
|
||||||
}
|
}
|
||||||
if len(children) > 0 {
|
if len(children) > 0 {
|
||||||
return fs.ErrorDirectoryNotEmpty
|
return fs.ErrorDirectoryNotEmpty
|
||||||
@@ -650,7 +646,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||||||
|
|
||||||
err = f.deleteNode(ctx, dirNode)
|
err = f.deleteNode(ctx, dirNode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "delete directory node failed")
|
return fmt.Errorf("delete directory node failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove the root node if we just deleted it
|
// Remove the root node if we just deleted it
|
||||||
@@ -704,7 +700,7 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
|
|||||||
dstDirNode, err = dstFs.mkdir(ctx, absRoot, dstParent)
|
dstDirNode, err = dstFs.mkdir(ctx, absRoot, dstParent)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "server-side move failed to make dst parent dir")
|
return fmt.Errorf("server-side move failed to make dst parent dir: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if srcRemote != "" {
|
if srcRemote != "" {
|
||||||
@@ -717,7 +713,7 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
|
|||||||
srcDirNode, err = f.findDir(absRoot, srcParent)
|
srcDirNode, err = f.findDir(absRoot, srcParent)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "server-side move failed to lookup src parent dir")
|
return fmt.Errorf("server-side move failed to lookup src parent dir: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// move the object into its new directory if required
|
// move the object into its new directory if required
|
||||||
@@ -728,7 +724,7 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "server-side move failed")
|
return fmt.Errorf("server-side move failed: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -742,7 +738,7 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "server-side rename failed")
|
return fmt.Errorf("server-side rename failed: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -812,7 +808,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return fs.ErrorDirExists
|
return fs.ErrorDirExists
|
||||||
} else if err != fs.ErrorDirNotFound {
|
} else if err != fs.ErrorDirNotFound {
|
||||||
return errors.Wrap(err, "DirMove error while checking dest directory")
|
return fmt.Errorf("DirMove error while checking dest directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do the move
|
// Do the move
|
||||||
@@ -844,15 +840,15 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
|
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
|
||||||
root, err := f.findRoot(ctx, false)
|
root, err := f.findRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "PublicLink failed to find root node")
|
return "", fmt.Errorf("PublicLink failed to find root node: %w", err)
|
||||||
}
|
}
|
||||||
node, err := f.findNode(root, remote)
|
node, err := f.findNode(root, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "PublicLink failed to find path")
|
return "", fmt.Errorf("PublicLink failed to find path: %w", err)
|
||||||
}
|
}
|
||||||
link, err = f.srv.Link(node, true)
|
link, err = f.srv.Link(node, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "PublicLink failed to create link")
|
return "", fmt.Errorf("PublicLink failed to create link: %w", err)
|
||||||
}
|
}
|
||||||
return link, nil
|
return link, nil
|
||||||
}
|
}
|
||||||
@@ -867,13 +863,13 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|||||||
dstDir := dirs[0]
|
dstDir := dirs[0]
|
||||||
dstDirNode := f.srv.FS.HashLookup(dstDir.ID())
|
dstDirNode := f.srv.FS.HashLookup(dstDir.ID())
|
||||||
if dstDirNode == nil {
|
if dstDirNode == nil {
|
||||||
return errors.Errorf("MergeDirs failed to find node for: %v", dstDir)
|
return fmt.Errorf("MergeDirs failed to find node for: %v", dstDir)
|
||||||
}
|
}
|
||||||
for _, srcDir := range dirs[1:] {
|
for _, srcDir := range dirs[1:] {
|
||||||
// find src directory
|
// find src directory
|
||||||
srcDirNode := f.srv.FS.HashLookup(srcDir.ID())
|
srcDirNode := f.srv.FS.HashLookup(srcDir.ID())
|
||||||
if srcDirNode == nil {
|
if srcDirNode == nil {
|
||||||
return errors.Errorf("MergeDirs failed to find node for: %v", srcDir)
|
return fmt.Errorf("MergeDirs failed to find node for: %v", srcDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// list the objects
|
// list the objects
|
||||||
@@ -883,7 +879,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "MergeDirs list failed on %v", srcDir)
|
return fmt.Errorf("MergeDirs list failed on %v: %w", srcDir, err)
|
||||||
}
|
}
|
||||||
// move them into place
|
// move them into place
|
||||||
for _, info := range infos {
|
for _, info := range infos {
|
||||||
@@ -893,14 +889,14 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", f.opt.Enc.ToStandardName(info.GetName()), srcDir)
|
return fmt.Errorf("MergeDirs move failed on %q in %v: %w", f.opt.Enc.ToStandardName(info.GetName()), srcDir, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// rmdir (into trash) the now empty source directory
|
// rmdir (into trash) the now empty source directory
|
||||||
fs.Infof(srcDir, "removing empty directory")
|
fs.Infof(srcDir, "removing empty directory")
|
||||||
err = f.deleteNode(ctx, srcDirNode)
|
err = f.deleteNode(ctx, srcDirNode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
|
return fmt.Errorf("MergeDirs move failed to rmdir %q: %w", srcDir, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -915,7 +911,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to get Mega Quota")
|
return nil, fmt.Errorf("failed to get Mega Quota: %w", err)
|
||||||
}
|
}
|
||||||
usage := &fs.Usage{
|
usage := &fs.Usage{
|
||||||
Total: fs.NewUsageValue(int64(q.Mstrg)), // quota of bytes that can be used
|
Total: fs.NewUsageValue(int64(q.Mstrg)), // quota of bytes that can be used
|
||||||
@@ -1076,7 +1072,7 @@ func (oo *openObject) Close() (err error) {
|
|||||||
return shouldRetry(oo.ctx, err)
|
return shouldRetry(oo.ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to finish download")
|
return fmt.Errorf("failed to finish download: %w", err)
|
||||||
}
|
}
|
||||||
oo.closed = true
|
oo.closed = true
|
||||||
return nil
|
return nil
|
||||||
@@ -1104,7 +1100,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "open download file failed")
|
return nil, fmt.Errorf("open download file failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
oo := &openObject{
|
oo := &openObject{
|
||||||
@@ -1133,7 +1129,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
// Create the parent directory
|
// Create the parent directory
|
||||||
dirNode, leaf, err := o.fs.mkdirParent(ctx, remote)
|
dirNode, leaf, err := o.fs.mkdirParent(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "update make parent dir failed")
|
return fmt.Errorf("update make parent dir failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var u *mega.Upload
|
var u *mega.Upload
|
||||||
@@ -1142,7 +1138,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "upload file failed to create session")
|
return fmt.Errorf("upload file failed to create session: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upload the chunks
|
// Upload the chunks
|
||||||
@@ -1150,12 +1146,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
for id := 0; id < u.Chunks(); id++ {
|
for id := 0; id < u.Chunks(); id++ {
|
||||||
_, chunkSize, err := u.ChunkLocation(id)
|
_, chunkSize, err := u.ChunkLocation(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "upload failed to read chunk location")
|
return fmt.Errorf("upload failed to read chunk location: %w", err)
|
||||||
}
|
}
|
||||||
chunk := make([]byte, chunkSize)
|
chunk := make([]byte, chunkSize)
|
||||||
_, err = io.ReadFull(in, chunk)
|
_, err = io.ReadFull(in, chunk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "upload failed to read data")
|
return fmt.Errorf("upload failed to read data: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
@@ -1163,7 +1159,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "upload file failed to upload chunk")
|
return fmt.Errorf("upload file failed to upload chunk: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1174,14 +1170,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to finish upload")
|
return fmt.Errorf("failed to finish upload: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the upload succeeded and the original object existed, then delete it
|
// If the upload succeeded and the original object existed, then delete it
|
||||||
if o.info != nil {
|
if o.info != nil {
|
||||||
err = o.fs.deleteNode(ctx, o.info)
|
err = o.fs.deleteNode(ctx, o.info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "upload failed to remove old version")
|
return fmt.Errorf("upload failed to remove old version: %w", err)
|
||||||
}
|
}
|
||||||
o.info = nil
|
o.info = nil
|
||||||
}
|
}
|
||||||
@@ -1193,7 +1189,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
func (o *Object) Remove(ctx context.Context) error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
err := o.fs.deleteNode(ctx, o.info)
|
err := o.fs.deleteNode(ctx, o.info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Remove object failed")
|
return fmt.Errorf("Remove object failed: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
@@ -586,7 +585,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
data, err := ioutil.ReadAll(in)
|
data, err := ioutil.ReadAll(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to update memory object")
|
return fmt.Errorf("failed to update memory object: %w", err)
|
||||||
}
|
}
|
||||||
o.od = &objectData{
|
o.od = &objectData{
|
||||||
data: data,
|
data: data,
|
||||||
|
|||||||
@@ -10,7 +10,8 @@ import (
|
|||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: ":memory:",
|
RemoteName: ":memory:",
|
||||||
NilObject: (*Object)(nil),
|
NilObject: (*Object)(nil),
|
||||||
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
52
backend/onedrive/onedrive.go
Executable file → Normal file
52
backend/onedrive/onedrive.go
Executable file → Normal file
@@ -7,6 +7,7 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -18,7 +19,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/onedrive/api"
|
"github.com/rclone/rclone/backend/onedrive/api"
|
||||||
"github.com/rclone/rclone/backend/onedrive/quickxorhash"
|
"github.com/rclone/rclone/backend/onedrive/quickxorhash"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
@@ -141,7 +141,7 @@ Note that the chunks will be buffered into memory.`,
|
|||||||
Name: "expose_onenote_files",
|
Name: "expose_onenote_files",
|
||||||
Help: `Set to make OneNote files show up in directory listings.
|
Help: `Set to make OneNote files show up in directory listings.
|
||||||
|
|
||||||
By default rclone will hide OneNote files in directory listings because
|
By default, rclone will hide OneNote files in directory listings because
|
||||||
operations like "Open" and "Update" won't work on them. But this
|
operations like "Open" and "Update" won't work on them. But this
|
||||||
behaviour may also prevent you from deleting them. If you want to
|
behaviour may also prevent you from deleting them. If you want to
|
||||||
delete OneNote files or otherwise want them to show up in directory
|
delete OneNote files or otherwise want them to show up in directory
|
||||||
@@ -385,7 +385,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
|||||||
|
|
||||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure OneDrive")
|
return nil, fmt.Errorf("failed to configure OneDrive: %w", err)
|
||||||
}
|
}
|
||||||
srv := rest.NewClient(oAuthClient)
|
srv := rest.NewClient(oAuthClient)
|
||||||
|
|
||||||
@@ -754,10 +754,10 @@ func errorHandler(resp *http.Response) error {
|
|||||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||||
const minChunkSize = fs.SizeSuffixBase
|
const minChunkSize = fs.SizeSuffixBase
|
||||||
if cs%chunkSizeMultiple != 0 {
|
if cs%chunkSizeMultiple != 0 {
|
||||||
return errors.Errorf("%s is not a multiple of %s", cs, chunkSizeMultiple)
|
return fmt.Errorf("%s is not a multiple of %s", cs, chunkSizeMultiple)
|
||||||
}
|
}
|
||||||
if cs < minChunkSize {
|
if cs < minChunkSize {
|
||||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -781,7 +781,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
|
|
||||||
err = checkUploadChunkSize(opt.ChunkSize)
|
err = checkUploadChunkSize(opt.ChunkSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "onedrive: chunk size")
|
return nil, fmt.Errorf("onedrive: chunk size: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if opt.DriveID == "" || opt.DriveType == "" {
|
if opt.DriveID == "" || opt.DriveType == "" {
|
||||||
@@ -797,7 +797,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
root = parsePath(root)
|
root = parsePath(root)
|
||||||
oAuthClient, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
oAuthClient, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure OneDrive")
|
return nil, fmt.Errorf("failed to configure OneDrive: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
@@ -827,8 +827,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
|
|
||||||
// Get rootID
|
// Get rootID
|
||||||
rootInfo, _, err := f.readMetaDataForPath(ctx, "")
|
rootInfo, _, err := f.readMetaDataForPath(ctx, "")
|
||||||
if err != nil || rootInfo.GetID() == "" {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to get root")
|
return nil, fmt.Errorf("failed to get root: %w", err)
|
||||||
|
}
|
||||||
|
if rootInfo.GetID() == "" {
|
||||||
|
return nil, errors.New("failed to get root: ID was empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
f.dirCache = dircache.New(root, rootInfo.GetID(), f)
|
f.dirCache = dircache.New(root, rootInfo.GetID(), f)
|
||||||
@@ -971,7 +974,7 @@ OUTER:
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return found, errors.Wrap(err, "couldn't list files")
|
return found, fmt.Errorf("couldn't list files: %w", err)
|
||||||
}
|
}
|
||||||
if len(result.Value) == 0 {
|
if len(result.Value) == 0 {
|
||||||
break
|
break
|
||||||
@@ -1175,7 +1178,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
|
|||||||
var status api.AsyncOperationStatus
|
var status api.AsyncOperationStatus
|
||||||
err = json.Unmarshal(body, &status)
|
err = json.Unmarshal(body, &status)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "async status result not JSON: %q", body)
|
return fmt.Errorf("async status result not JSON: %q: %w", body, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch status.Status {
|
switch status.Status {
|
||||||
@@ -1185,15 +1188,18 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
|
|||||||
}
|
}
|
||||||
fallthrough
|
fallthrough
|
||||||
case "deleteFailed":
|
case "deleteFailed":
|
||||||
return errors.Errorf("%s: async operation returned %q", o.remote, status.Status)
|
return fmt.Errorf("%s: async operation returned %q", o.remote, status.Status)
|
||||||
case "completed":
|
case "completed":
|
||||||
err = o.readMetaData(ctx)
|
err = o.readMetaData(ctx)
|
||||||
return errors.Wrapf(err, "async operation completed but readMetaData failed")
|
if err != nil {
|
||||||
|
return fmt.Errorf("async operation completed but readMetaData failed: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
}
|
}
|
||||||
return errors.Errorf("async operation didn't complete after %v", f.ci.TimeoutOrInfinite())
|
return fmt.Errorf("async operation didn't complete after %v", f.ci.TimeoutOrInfinite())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
@@ -1232,7 +1238,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
srcPath := srcObj.rootPath()
|
srcPath := srcObj.rootPath()
|
||||||
dstPath := f.rootPath(remote)
|
dstPath := f.rootPath(remote)
|
||||||
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
||||||
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1450,7 +1456,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "about failed")
|
return nil, fmt.Errorf("about failed: %w", err)
|
||||||
}
|
}
|
||||||
q := drive.Quota
|
q := drive.Quota
|
||||||
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
|
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
|
||||||
@@ -1501,7 +1507,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if resp != nil && resp.StatusCode == 400 && f.driveType != driveTypePersonal {
|
if resp != nil && resp.StatusCode == 400 && f.driveType != driveTypePersonal {
|
||||||
return "", errors.Errorf("%v (is making public links permitted by the org admin?)", err)
|
return "", fmt.Errorf("%v (is making public links permitted by the org admin?)", err)
|
||||||
}
|
}
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@@ -1886,17 +1892,17 @@ func (o *Object) getPosition(ctx context.Context, url string) (pos int64, err er
|
|||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if len(info.NextExpectedRanges) != 1 {
|
if len(info.NextExpectedRanges) != 1 {
|
||||||
return 0, errors.Errorf("bad number of ranges in upload position: %v", info.NextExpectedRanges)
|
return 0, fmt.Errorf("bad number of ranges in upload position: %v", info.NextExpectedRanges)
|
||||||
}
|
}
|
||||||
position := info.NextExpectedRanges[0]
|
position := info.NextExpectedRanges[0]
|
||||||
i := strings.IndexByte(position, '-')
|
i := strings.IndexByte(position, '-')
|
||||||
if i < 0 {
|
if i < 0 {
|
||||||
return 0, errors.Errorf("no '-' in next expected range: %q", position)
|
return 0, fmt.Errorf("no '-' in next expected range: %q", position)
|
||||||
}
|
}
|
||||||
position = position[:i]
|
position = position[:i]
|
||||||
pos, err = strconv.ParseInt(position, 10, 64)
|
pos, err = strconv.ParseInt(position, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, errors.Wrapf(err, "bad expected range: %q", position)
|
return 0, fmt.Errorf("bad expected range: %q: %w", position, err)
|
||||||
}
|
}
|
||||||
return pos, nil
|
return pos, nil
|
||||||
}
|
}
|
||||||
@@ -1930,14 +1936,14 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
|
|||||||
fs.Debugf(o, "Read position %d, chunk is %d..%d, bytes to skip = %d", pos, start, start+chunkSize, skip)
|
fs.Debugf(o, "Read position %d, chunk is %d..%d, bytes to skip = %d", pos, start, start+chunkSize, skip)
|
||||||
switch {
|
switch {
|
||||||
case skip < 0:
|
case skip < 0:
|
||||||
return false, errors.Wrapf(err, "sent block already (skip %d < 0), can't rewind", skip)
|
return false, fmt.Errorf("sent block already (skip %d < 0), can't rewind: %w", skip, err)
|
||||||
case skip > chunkSize:
|
case skip > chunkSize:
|
||||||
return false, errors.Wrapf(err, "position is in the future (skip %d > chunkSize %d), can't skip forward", skip, chunkSize)
|
return false, fmt.Errorf("position is in the future (skip %d > chunkSize %d), can't skip forward: %w", skip, chunkSize, err)
|
||||||
case skip == chunkSize:
|
case skip == chunkSize:
|
||||||
fs.Debugf(o, "Skipping chunk as already sent (skip %d == chunkSize %d)", skip, chunkSize)
|
fs.Debugf(o, "Skipping chunk as already sent (skip %d == chunkSize %d)", skip, chunkSize)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, errors.Wrapf(err, "retry this chunk skipping %d bytes", skip)
|
return true, fmt.Errorf("retry this chunk skipping %d bytes: %w", skip, err)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package opendrive
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -11,7 +12,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
@@ -210,7 +210,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
return f.shouldRetry(ctx, resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to create session")
|
return nil, fmt.Errorf("failed to create session: %w", err)
|
||||||
}
|
}
|
||||||
fs.Debugf(nil, "Starting OpenDrive session with ID: %s", f.session.SessionID)
|
fs.Debugf(nil, "Starting OpenDrive session with ID: %s", f.session.SessionID)
|
||||||
|
|
||||||
@@ -362,7 +362,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
srcPath := srcObj.fs.rootSlash() + srcObj.remote
|
srcPath := srcObj.fs.rootSlash() + srcObj.remote
|
||||||
dstPath := f.rootSlash() + remote
|
dstPath := f.rootSlash() + remote
|
||||||
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
||||||
return nil, errors.Errorf("Can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
return nil, fmt.Errorf("Can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
@@ -636,7 +636,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
return o.fs.shouldRetry(ctx, resp, err)
|
return o.fs.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to create file")
|
return nil, fmt.Errorf("failed to create file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
o.id = response.FileID
|
o.id = response.FileID
|
||||||
@@ -719,7 +719,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
|||||||
return f.shouldRetry(ctx, resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", false, errors.Wrap(err, "failed to get folder list")
|
return "", false, fmt.Errorf("failed to get folder list: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
leaf = f.opt.Enc.FromStandardName(leaf)
|
leaf = f.opt.Enc.FromStandardName(leaf)
|
||||||
@@ -762,7 +762,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
return f.shouldRetry(ctx, resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to get folder list")
|
return nil, fmt.Errorf("failed to get folder list: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, folder := range folderList.Folders {
|
for _, folder := range folderList.Folders {
|
||||||
@@ -871,7 +871,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
return o.fs.shouldRetry(ctx, resp, err)
|
return o.fs.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to open file)")
|
return nil, fmt.Errorf("failed to open file): %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return resp.Body, nil
|
return resp.Body, nil
|
||||||
@@ -919,7 +919,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return o.fs.shouldRetry(ctx, resp, err)
|
return o.fs.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to create file")
|
return fmt.Errorf("failed to create file: %w", err)
|
||||||
}
|
}
|
||||||
// resp.Body.Close()
|
// resp.Body.Close()
|
||||||
// fs.Debugf(nil, "PostOpen: %#v", openResponse)
|
// fs.Debugf(nil, "PostOpen: %#v", openResponse)
|
||||||
@@ -963,10 +963,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return o.fs.shouldRetry(ctx, resp, err)
|
return o.fs.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to create file")
|
return fmt.Errorf("failed to create file: %w", err)
|
||||||
}
|
}
|
||||||
if reply.TotalWritten != currentChunkSize {
|
if reply.TotalWritten != currentChunkSize {
|
||||||
return errors.Errorf("failed to create file: incomplete write of %d/%d bytes", reply.TotalWritten, currentChunkSize)
|
return fmt.Errorf("failed to create file: incomplete write of %d/%d bytes", reply.TotalWritten, currentChunkSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkCounter++
|
chunkCounter++
|
||||||
@@ -986,7 +986,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return o.fs.shouldRetry(ctx, resp, err)
|
return o.fs.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to create file")
|
return fmt.Errorf("failed to create file: %w", err)
|
||||||
}
|
}
|
||||||
// fs.Debugf(nil, "PostClose: %#v", closeResponse)
|
// fs.Debugf(nil, "PostClose: %#v", closeResponse)
|
||||||
|
|
||||||
@@ -1038,7 +1038,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||||||
return o.fs.shouldRetry(ctx, resp, err)
|
return o.fs.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to get folder list")
|
return fmt.Errorf("failed to get folder list: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(folderList.Files) == 0 {
|
if len(folderList.Files) == 0 {
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ package pcloud
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -18,7 +19,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/pcloud/api"
|
"github.com/rclone/rclone/backend/pcloud/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -290,7 +290,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
root = parsePath(root)
|
root = parsePath(root)
|
||||||
oAuthClient, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
oAuthClient, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure Pcloud")
|
return nil, fmt.Errorf("failed to configure Pcloud: %w", err)
|
||||||
}
|
}
|
||||||
updateTokenURL(oauthConfig, opt.Hostname)
|
updateTokenURL(oauthConfig, opt.Hostname)
|
||||||
|
|
||||||
@@ -463,7 +463,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return found, errors.Wrap(err, "couldn't list files")
|
return found, fmt.Errorf("couldn't list files: %w", err)
|
||||||
}
|
}
|
||||||
for i := range result.Metadata.Contents {
|
for i := range result.Metadata.Contents {
|
||||||
item := &result.Metadata.Contents[i]
|
item := &result.Metadata.Contents[i]
|
||||||
@@ -600,7 +600,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "rmdir failed")
|
return fmt.Errorf("rmdir failed: %w", err)
|
||||||
}
|
}
|
||||||
f.dirCache.FlushDir(dir)
|
f.dirCache.FlushDir(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -872,7 +872,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "about failed")
|
return nil, fmt.Errorf("about failed: %w", err)
|
||||||
}
|
}
|
||||||
usage = &fs.Usage{
|
usage = &fs.Usage{
|
||||||
Total: fs.NewUsageValue(q.Quota), // quota of bytes that can be used
|
Total: fs.NewUsageValue(q.Quota), // quota of bytes that can be used
|
||||||
@@ -952,7 +952,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||||||
if o.md5 == "" && o.sha1 == "" && o.sha256 == "" {
|
if o.md5 == "" && o.sha1 == "" && o.sha256 == "" {
|
||||||
err := o.getHashes(ctx)
|
err := o.getHashes(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "failed to get hash")
|
return "", fmt.Errorf("failed to get hash: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return *pHash, nil
|
return *pHash, nil
|
||||||
@@ -971,7 +971,7 @@ func (o *Object) Size() int64 {
|
|||||||
// setMetaData sets the metadata from info
|
// setMetaData sets the metadata from info
|
||||||
func (o *Object) setMetaData(info *api.Item) (err error) {
|
func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||||
if info.IsFolder {
|
if info.IsFolder {
|
||||||
return errors.Wrapf(fs.ErrorNotAFile, "%q is a folder", o.remote)
|
return fmt.Errorf("%q is a folder: %w", o.remote, fs.ErrorNotAFile)
|
||||||
}
|
}
|
||||||
o.hasMetaData = true
|
o.hasMetaData = true
|
||||||
o.size = info.Size
|
o.size = info.Size
|
||||||
@@ -1058,7 +1058,7 @@ func (o *Object) downloadURL(ctx context.Context) (URL string, err error) {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
if !result.IsValid() {
|
if !result.IsValid() {
|
||||||
return "", errors.Errorf("fetched invalid link %+v", result)
|
return "", fmt.Errorf("fetched invalid link %+v", result)
|
||||||
}
|
}
|
||||||
o.link = &result
|
o.link = &result
|
||||||
return o.link.URL(), nil
|
return o.link.URL(), nil
|
||||||
@@ -1146,7 +1146,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
if size == 0 {
|
if size == 0 {
|
||||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf)
|
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to make multipart upload for 0 length file")
|
return fmt.Errorf("failed to make multipart upload for 0 length file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
contentLength := overhead + size
|
contentLength := overhead + size
|
||||||
@@ -1177,7 +1177,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(result.Items) != 1 {
|
if len(result.Items) != 1 {
|
||||||
return errors.Errorf("failed to upload %v - not sure why", o)
|
return fmt.Errorf("failed to upload %v - not sure why", o)
|
||||||
}
|
}
|
||||||
o.setHashes(&result.Checksums[0])
|
o.setHashes(&result.Checksums[0])
|
||||||
return o.setMetaData(&result.Items[0])
|
return o.setMetaData(&result.Items[0])
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ canStream = false
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
@@ -27,7 +28,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/premiumizeme/api"
|
"github.com/rclone/rclone/backend/premiumizeme/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -250,7 +250,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if opt.APIKey == "" {
|
if opt.APIKey == "" {
|
||||||
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
|
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure premiumize.me")
|
return nil, fmt.Errorf("failed to configure premiumize.me: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
client = fshttp.NewClient(ctx)
|
client = fshttp.NewClient(ctx)
|
||||||
@@ -380,10 +380,10 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
//fmt.Printf("...Error %v\n", err)
|
//fmt.Printf("...Error %v\n", err)
|
||||||
return "", errors.Wrap(err, "CreateDir http")
|
return "", fmt.Errorf("CreateDir http: %w", err)
|
||||||
}
|
}
|
||||||
if err = info.AsErr(); err != nil {
|
if err = info.AsErr(); err != nil {
|
||||||
return "", errors.Wrap(err, "CreateDir")
|
return "", fmt.Errorf("CreateDir: %w", err)
|
||||||
}
|
}
|
||||||
// fmt.Printf("...Id %q\n", *info.Id)
|
// fmt.Printf("...Id %q\n", *info.Id)
|
||||||
return info.ID, nil
|
return info.ID, nil
|
||||||
@@ -420,10 +420,10 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return newDirID, found, errors.Wrap(err, "couldn't list files")
|
return newDirID, found, fmt.Errorf("couldn't list files: %w", err)
|
||||||
}
|
}
|
||||||
if err = result.AsErr(); err != nil {
|
if err = result.AsErr(); err != nil {
|
||||||
return newDirID, found, errors.Wrap(err, "error while listing")
|
return newDirID, found, fmt.Errorf("error while listing: %w", err)
|
||||||
}
|
}
|
||||||
newDirID = result.FolderID
|
newDirID = result.FolderID
|
||||||
for i := range result.Content {
|
for i := range result.Content {
|
||||||
@@ -572,7 +572,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "purgeCheck")
|
return fmt.Errorf("purgeCheck: %w", err)
|
||||||
}
|
}
|
||||||
if found {
|
if found {
|
||||||
return fs.ErrorDirectoryNotEmpty
|
return fs.ErrorDirectoryNotEmpty
|
||||||
@@ -594,10 +594,10 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "rmdir failed")
|
return fmt.Errorf("rmdir failed: %w", err)
|
||||||
}
|
}
|
||||||
if err = result.AsErr(); err != nil {
|
if err = result.AsErr(); err != nil {
|
||||||
return errors.Wrap(err, "rmdir")
|
return fmt.Errorf("rmdir: %w", err)
|
||||||
}
|
}
|
||||||
f.dirCache.FlushDir(dir)
|
f.dirCache.FlushDir(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -645,7 +645,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
|
|||||||
tmpLeaf := newLeaf + "." + random.String(8)
|
tmpLeaf := newLeaf + "." + random.String(8)
|
||||||
err = f.renameLeaf(ctx, isFile, id, tmpLeaf)
|
err = f.renameLeaf(ctx, isFile, id, tmpLeaf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Move rename leaf")
|
return fmt.Errorf("Move rename leaf: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -674,10 +674,10 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Move http")
|
return fmt.Errorf("Move http: %w", err)
|
||||||
}
|
}
|
||||||
if err = result.AsErr(); err != nil {
|
if err = result.AsErr(); err != nil {
|
||||||
return errors.Wrap(err, "Move")
|
return fmt.Errorf("Move: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -685,7 +685,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
|
|||||||
if doRenameLeaf {
|
if doRenameLeaf {
|
||||||
err = f.renameLeaf(ctx, isFile, id, newLeaf)
|
err = f.renameLeaf(ctx, isFile, id, newLeaf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Move rename leaf")
|
return fmt.Errorf("Move rename leaf: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -783,10 +783,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "CreateDir http")
|
return nil, fmt.Errorf("CreateDir http: %w", err)
|
||||||
}
|
}
|
||||||
if err = info.AsErr(); err != nil {
|
if err = info.AsErr(); err != nil {
|
||||||
return nil, errors.Wrap(err, "CreateDir")
|
return nil, fmt.Errorf("CreateDir: %w", err)
|
||||||
}
|
}
|
||||||
usage = &fs.Usage{
|
usage = &fs.Usage{
|
||||||
Used: fs.NewUsageValue(int64(info.SpaceUsed)),
|
Used: fs.NewUsageValue(int64(info.SpaceUsed)),
|
||||||
@@ -843,7 +843,7 @@ func (o *Object) Size() int64 {
|
|||||||
// setMetaData sets the metadata from info
|
// setMetaData sets the metadata from info
|
||||||
func (o *Object) setMetaData(info *api.Item) (err error) {
|
func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||||
if info.Type != "file" {
|
if info.Type != "file" {
|
||||||
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
|
return fmt.Errorf("%q is %q: %w", o.remote, info.Type, fs.ErrorNotAFile)
|
||||||
}
|
}
|
||||||
o.hasMetaData = true
|
o.hasMetaData = true
|
||||||
o.size = info.Size
|
o.size = info.Size
|
||||||
@@ -953,19 +953,19 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
var u *url.URL
|
var u *url.URL
|
||||||
u, err = url.Parse(info.URL)
|
u, err = url.Parse(info.URL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return true, errors.Wrap(err, "failed to parse download URL")
|
return true, fmt.Errorf("failed to parse download URL: %w", err)
|
||||||
}
|
}
|
||||||
_, err = net.LookupIP(u.Hostname())
|
_, err = net.LookupIP(u.Hostname())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return true, errors.Wrap(err, "failed to resolve download URL")
|
return true, fmt.Errorf("failed to resolve download URL: %w", err)
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "upload get URL http")
|
return fmt.Errorf("upload get URL http: %w", err)
|
||||||
}
|
}
|
||||||
if err = info.AsErr(); err != nil {
|
if err = info.AsErr(); err != nil {
|
||||||
return errors.Wrap(err, "upload get URL")
|
return fmt.Errorf("upload get URL: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// if file exists then rename it out the way otherwise uploads can fail
|
// if file exists then rename it out the way otherwise uploads can fail
|
||||||
@@ -976,7 +976,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
fs.Debugf(o, "Moving old file out the way to %q", newLeaf)
|
fs.Debugf(o, "Moving old file out the way to %q", newLeaf)
|
||||||
err = o.fs.renameLeaf(ctx, true, oldID, newLeaf)
|
err = o.fs.renameLeaf(ctx, true, oldID, newLeaf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "upload rename old file")
|
return fmt.Errorf("upload rename old file: %w", err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
// on failed upload rename old file back
|
// on failed upload rename old file back
|
||||||
@@ -984,7 +984,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
fs.Debugf(o, "Renaming old file back (from %q to %q) since upload failed", leaf, newLeaf)
|
fs.Debugf(o, "Renaming old file back (from %q to %q) since upload failed", leaf, newLeaf)
|
||||||
newErr := o.fs.renameLeaf(ctx, true, oldID, leaf)
|
newErr := o.fs.renameLeaf(ctx, true, oldID, leaf)
|
||||||
if newErr != nil && err == nil {
|
if newErr != nil && err == nil {
|
||||||
err = errors.Wrap(newErr, "upload renaming old file back")
|
err = fmt.Errorf("upload renaming old file back: %w", newErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@@ -1007,10 +1007,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "upload file http")
|
return fmt.Errorf("upload file http: %w", err)
|
||||||
}
|
}
|
||||||
if err = result.AsErr(); err != nil {
|
if err = result.AsErr(); err != nil {
|
||||||
return errors.Wrap(err, "upload file")
|
return fmt.Errorf("upload file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// on successful upload, remove old file if it exists
|
// on successful upload, remove old file if it exists
|
||||||
@@ -1019,7 +1019,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
fs.Debugf(o, "Removing old file")
|
fs.Debugf(o, "Removing old file")
|
||||||
err := o.fs.remove(ctx, oldID)
|
err := o.fs.remove(ctx, oldID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "upload remove old file")
|
return fmt.Errorf("upload remove old file: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1049,10 +1049,10 @@ func (f *Fs) renameLeaf(ctx context.Context, isFile bool, id string, newLeaf str
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "rename http")
|
return fmt.Errorf("rename http: %w", err)
|
||||||
}
|
}
|
||||||
if err = result.AsErr(); err != nil {
|
if err = result.AsErr(); err != nil {
|
||||||
return errors.Wrap(err, "rename")
|
return fmt.Errorf("rename: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1074,10 +1074,10 @@ func (f *Fs) remove(ctx context.Context, id string) (err error) {
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "remove http")
|
return fmt.Errorf("remove http: %w", err)
|
||||||
}
|
}
|
||||||
if err = result.AsErr(); err != nil {
|
if err = result.AsErr(); err != nil {
|
||||||
return errors.Wrap(err, "remove")
|
return fmt.Errorf("remove: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1086,7 +1086,7 @@ func (f *Fs) remove(ctx context.Context, id string) (err error) {
|
|||||||
func (o *Object) Remove(ctx context.Context) error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
err := o.readMetaData(ctx)
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Remove: Failed to read metadata")
|
return fmt.Errorf("Remove: Failed to read metadata: %w", err)
|
||||||
}
|
}
|
||||||
return o.fs.remove(ctx, o.id)
|
return o.fs.remove(ctx, o.id)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -13,7 +14,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/putdotio/go-putio/putio"
|
"github.com/putdotio/go-putio/putio"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
@@ -80,7 +80,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (f fs.Fs,
|
|||||||
httpClient := fshttp.NewClient(ctx)
|
httpClient := fshttp.NewClient(ctx)
|
||||||
oAuthClient, _, err := oauthutil.NewClientWithBaseClient(ctx, name, m, putioConfig, httpClient)
|
oAuthClient, _, err := oauthutil.NewClientWithBaseClient(ctx, name, m, putioConfig, httpClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure putio")
|
return nil, fmt.Errorf("failed to configure putio: %w", err)
|
||||||
}
|
}
|
||||||
p := &Fs{
|
p := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
@@ -469,7 +469,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
|||||||
// check directory exists
|
// check directory exists
|
||||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Rmdir")
|
return fmt.Errorf("Rmdir: %w", err)
|
||||||
}
|
}
|
||||||
dirID := atoi(directoryID)
|
dirID := atoi(directoryID)
|
||||||
|
|
||||||
@@ -482,7 +482,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Rmdir")
|
return fmt.Errorf("Rmdir: %w", err)
|
||||||
}
|
}
|
||||||
if len(children) != 0 {
|
if len(children) != 0 {
|
||||||
return errors.New("directory not empty")
|
return errors.New("directory not empty")
|
||||||
@@ -647,7 +647,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "about failed")
|
return nil, fmt.Errorf("about failed: %w", err)
|
||||||
}
|
}
|
||||||
return &fs.Usage{
|
return &fs.Usage{
|
||||||
Total: fs.NewUsageValue(ai.Disk.Size), // quota of bytes that can be used
|
Total: fs.NewUsageValue(ai.Disk.Size), // quota of bytes that can be used
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package putio
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
@@ -9,7 +10,6 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/putdotio/go-putio/putio"
|
"github.com/putdotio/go-putio/putio"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
@@ -82,7 +82,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||||||
}
|
}
|
||||||
err := o.readEntryAndSetMetadata(ctx)
|
err := o.readEntryAndSetMetadata(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "failed to read hash from metadata")
|
return "", fmt.Errorf("failed to read hash from metadata: %w", err)
|
||||||
}
|
}
|
||||||
return o.file.CRC32, nil
|
return o.file.CRC32, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ package qingstor
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -17,7 +18,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
@@ -285,7 +285,7 @@ func qsServiceConnection(ctx context.Context, opt *Options) (*qs.Service, error)
|
|||||||
|
|
||||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||||
if cs < minChunkSize {
|
if cs < minChunkSize {
|
||||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -300,7 +300,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
|||||||
|
|
||||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||||
if cs > maxUploadCutoff {
|
if cs > maxUploadCutoff {
|
||||||
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
|
return fmt.Errorf("%s is greater than %s", cs, maxUploadCutoff)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -329,11 +329,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
err = checkUploadChunkSize(opt.ChunkSize)
|
err = checkUploadChunkSize(opt.ChunkSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "qingstor: chunk size")
|
return nil, fmt.Errorf("qingstor: chunk size: %w", err)
|
||||||
}
|
}
|
||||||
err = checkUploadCutoff(opt.UploadCutoff)
|
err = checkUploadCutoff(opt.UploadCutoff)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "qingstor: upload cutoff")
|
return nil, fmt.Errorf("qingstor: upload cutoff: %w", err)
|
||||||
}
|
}
|
||||||
svc, err := qsServiceConnection(ctx, opt)
|
svc, err := qsServiceConnection(ctx, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -884,7 +884,7 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string) (err error) {
|
|||||||
var resp *qs.ListMultipartUploadsOutput
|
var resp *qs.ListMultipartUploadsOutput
|
||||||
resp, err = bucketInit.ListMultipartUploads(&req)
|
resp, err = bucketInit.ListMultipartUploads(&req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "clean up bucket list multipart uploads")
|
return fmt.Errorf("clean up bucket list multipart uploads: %w", err)
|
||||||
}
|
}
|
||||||
for _, upload := range resp.Uploads {
|
for _, upload := range resp.Uploads {
|
||||||
if upload.Created != nil && upload.Key != nil && upload.UploadID != nil {
|
if upload.Created != nil && upload.Key != nil && upload.UploadID != nil {
|
||||||
@@ -896,7 +896,7 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string) (err error) {
|
|||||||
}
|
}
|
||||||
_, abortErr := bucketInit.AbortMultipartUpload(*upload.Key, &req)
|
_, abortErr := bucketInit.AbortMultipartUpload(*upload.Key, &req)
|
||||||
if abortErr != nil {
|
if abortErr != nil {
|
||||||
err = errors.Wrapf(abortErr, "failed to remove multipart upload for %q", *upload.Key)
|
err = fmt.Errorf("failed to remove multipart upload for %q: %w", *upload.Key, abortErr)
|
||||||
fs.Errorf(f, "%v", err)
|
fs.Errorf(f, "%v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -8,13 +8,13 @@ package qingstor
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/lib/atexit"
|
"github.com/rclone/rclone/lib/atexit"
|
||||||
qs "github.com/yunify/qingstor-sdk-go/v3/service"
|
qs "github.com/yunify/qingstor-sdk-go/v3/service"
|
||||||
@@ -175,7 +175,7 @@ func (u *uploader) upload() error {
|
|||||||
u.init()
|
u.init()
|
||||||
|
|
||||||
if u.cfg.partSize < minMultiPartSize {
|
if u.cfg.partSize < minMultiPartSize {
|
||||||
return errors.Errorf("part size must be at least %d bytes", minMultiPartSize)
|
return fmt.Errorf("part size must be at least %d bytes", minMultiPartSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do one read to determine if we have more than one part
|
// Do one read to determine if we have more than one part
|
||||||
@@ -184,7 +184,7 @@ func (u *uploader) upload() error {
|
|||||||
fs.Debugf(u, "Uploading as single part object to QingStor")
|
fs.Debugf(u, "Uploading as single part object to QingStor")
|
||||||
return u.singlePartUpload(reader, u.readerPos)
|
return u.singlePartUpload(reader, u.readerPos)
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return errors.Errorf("read upload data failed: %s", err)
|
return fmt.Errorf("read upload data failed: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fs.Debugf(u, "Uploading as multi-part object to QingStor")
|
fs.Debugf(u, "Uploading as multi-part object to QingStor")
|
||||||
|
|||||||
425
backend/s3/s3.go
425
backend/s3/s3.go
@@ -9,6 +9,7 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -33,7 +34,6 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
"github.com/ncw/swift/v2"
|
"github.com/ncw/swift/v2"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
@@ -58,12 +58,14 @@ import (
|
|||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
Name: "s3",
|
Name: "s3",
|
||||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, SeaweedFS, and Tencent COS",
|
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, RackCorp, SeaweedFS, and Tencent COS",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: fs.ConfigProvider,
|
Name: fs.ConfigProvider,
|
||||||
Help: "Choose your S3 provider.",
|
Help: "Choose your S3 provider.",
|
||||||
|
// NB if you add a new provider here, then add it in the
|
||||||
|
// setQuirks function and set the correct quirks
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "AWS",
|
Value: "AWS",
|
||||||
Help: "Amazon Web Services (AWS) S3",
|
Help: "Amazon Web Services (AWS) S3",
|
||||||
@@ -88,6 +90,9 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Value: "Netease",
|
Value: "Netease",
|
||||||
Help: "Netease Object Storage (NOS)",
|
Help: "Netease Object Storage (NOS)",
|
||||||
|
}, {
|
||||||
|
Value: "RackCorp",
|
||||||
|
Help: "RackCorp Object Storage",
|
||||||
}, {
|
}, {
|
||||||
Value: "Scaleway",
|
Value: "Scaleway",
|
||||||
Help: "Scaleway Object Storage",
|
Help: "Scaleway Object Storage",
|
||||||
@@ -207,6 +212,68 @@ func init() {
|
|||||||
Value: "us-gov-west-1",
|
Value: "us-gov-west-1",
|
||||||
Help: "AWS GovCloud (US) Region.\nNeeds location constraint us-gov-west-1.",
|
Help: "AWS GovCloud (US) Region.\nNeeds location constraint us-gov-west-1.",
|
||||||
}},
|
}},
|
||||||
|
}, {
|
||||||
|
Name: "region",
|
||||||
|
Help: "region - the location where your bucket will be created and your data stored.\n",
|
||||||
|
Provider: "RackCorp",
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "global",
|
||||||
|
Help: "Global CDN (All locations) Region",
|
||||||
|
}, {
|
||||||
|
Value: "au",
|
||||||
|
Help: "Australia (All states)",
|
||||||
|
}, {
|
||||||
|
Value: "au-nsw",
|
||||||
|
Help: "NSW (Australia) Region",
|
||||||
|
}, {
|
||||||
|
Value: "au-qld",
|
||||||
|
Help: "QLD (Australia) Region",
|
||||||
|
}, {
|
||||||
|
Value: "au-vic",
|
||||||
|
Help: "VIC (Australia) Region",
|
||||||
|
}, {
|
||||||
|
Value: "au-wa",
|
||||||
|
Help: "Perth (Australia) Region",
|
||||||
|
}, {
|
||||||
|
Value: "ph",
|
||||||
|
Help: "Manila (Philippines) Region",
|
||||||
|
}, {
|
||||||
|
Value: "th",
|
||||||
|
Help: "Bangkok (Thailand) Region",
|
||||||
|
}, {
|
||||||
|
Value: "hk",
|
||||||
|
Help: "HK (Hong Kong) Region",
|
||||||
|
}, {
|
||||||
|
Value: "mn",
|
||||||
|
Help: "Ulaanbaatar (Mongolia) Region",
|
||||||
|
}, {
|
||||||
|
Value: "kg",
|
||||||
|
Help: "Bishkek (Kyrgyzstan) Region",
|
||||||
|
}, {
|
||||||
|
Value: "id",
|
||||||
|
Help: "Jakarta (Indonesia) Region",
|
||||||
|
}, {
|
||||||
|
Value: "jp",
|
||||||
|
Help: "Tokyo (Japan) Region",
|
||||||
|
}, {
|
||||||
|
Value: "sg",
|
||||||
|
Help: "SG (Singapore) Region",
|
||||||
|
}, {
|
||||||
|
Value: "de",
|
||||||
|
Help: "Frankfurt (Germany) Region",
|
||||||
|
}, {
|
||||||
|
Value: "us",
|
||||||
|
Help: "USA (AnyCast) Region",
|
||||||
|
}, {
|
||||||
|
Value: "us-east-1",
|
||||||
|
Help: "New York (USA) Region",
|
||||||
|
}, {
|
||||||
|
Value: "us-west-1",
|
||||||
|
Help: "Freemont (USA) Region",
|
||||||
|
}, {
|
||||||
|
Value: "nz",
|
||||||
|
Help: "Auckland (New Zealand) Region",
|
||||||
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "region",
|
Name: "region",
|
||||||
Help: "Region to connect to.",
|
Help: "Region to connect to.",
|
||||||
@@ -221,7 +288,7 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Name: "region",
|
Name: "region",
|
||||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||||
Provider: "!AWS,Alibaba,Scaleway,TencentCOS",
|
Provider: "!AWS,Alibaba,RackCorp,Scaleway,TencentCOS",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "",
|
Value: "",
|
||||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||||
@@ -593,10 +660,73 @@ func init() {
|
|||||||
Value: "cos.accelerate.myqcloud.com",
|
Value: "cos.accelerate.myqcloud.com",
|
||||||
Help: "Use Tencent COS Accelerate Endpoint",
|
Help: "Use Tencent COS Accelerate Endpoint",
|
||||||
}},
|
}},
|
||||||
|
}, {
|
||||||
|
// RackCorp endpoints: https://www.rackcorp.com/storage/s3storage
|
||||||
|
Name: "endpoint",
|
||||||
|
Help: "Endpoint for RackCorp Object Storage.",
|
||||||
|
Provider: "RackCorp",
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "s3.rackcorp.com",
|
||||||
|
Help: "Global (AnyCast) Endpoint",
|
||||||
|
}, {
|
||||||
|
Value: "au.s3.rackcorp.com",
|
||||||
|
Help: "Australia (Anycast) Endpoint",
|
||||||
|
}, {
|
||||||
|
Value: "au-nsw.s3.rackcorp.com",
|
||||||
|
Help: "Sydney (Australia) Endpoint",
|
||||||
|
}, {
|
||||||
|
Value: "au-qld.s3.rackcorp.com",
|
||||||
|
Help: "Brisbane (Australia) Endpoint",
|
||||||
|
}, {
|
||||||
|
Value: "au-vic.s3.rackcorp.com",
|
||||||
|
Help: "Melbourne (Australia) Endpoint",
|
||||||
|
}, {
|
||||||
|
Value: "au-wa.s3.rackcorp.com",
|
||||||
|
Help: "Perth (Australia) Endpoint",
|
||||||
|
}, {
|
||||||
|
Value: "ph.s3.rackcorp.com",
|
||||||
|
Help: "Manila (Philippines) Endpoint",
|
||||||
|
}, {
|
||||||
|
Value: "th.s3.rackcorp.com",
|
||||||
|
Help: "Bangkok (Thailand) Endpoint",
|
||||||
|
}, {
|
||||||
|
Value: "hk.s3.rackcorp.com",
|
||||||
|
Help: "HK (Hong Kong) Endpoint",
|
||||||
|
}, {
|
||||||
|
Value: "mn.s3.rackcorp.com",
|
||||||
|
Help: "Ulaanbaatar (Mongolia) Endpoint",
|
||||||
|
}, {
|
||||||
|
Value: "kg.s3.rackcorp.com",
|
||||||
|
Help: "Bishkek (Kyrgyzstan) Endpoint",
|
||||||
|
}, {
|
||||||
|
Value: "id.s3.rackcorp.com",
|
||||||
|
Help: "Jakarta (Indonesia) Endpoint",
|
||||||
|
}, {
|
||||||
|
Value: "jp.s3.rackcorp.com",
|
||||||
|
Help: "Tokyo (Japan) Endpoint",
|
||||||
|
}, {
|
||||||
|
Value: "sg.s3.rackcorp.com",
|
||||||
|
Help: "SG (Singapore) Endpoint",
|
||||||
|
}, {
|
||||||
|
Value: "de.s3.rackcorp.com",
|
||||||
|
Help: "Frankfurt (Germany) Endpoint",
|
||||||
|
}, {
|
||||||
|
Value: "us.s3.rackcorp.com",
|
||||||
|
Help: "USA (AnyCast) Endpoint",
|
||||||
|
}, {
|
||||||
|
Value: "us-east-1.s3.rackcorp.com",
|
||||||
|
Help: "New York (USA) Endpoint",
|
||||||
|
}, {
|
||||||
|
Value: "us-west-1.s3.rackcorp.com",
|
||||||
|
Help: "Freemont (USA) Endpoint",
|
||||||
|
}, {
|
||||||
|
Value: "nz.s3.rackcorp.com",
|
||||||
|
Help: "Auckland (New Zealand) Endpoint",
|
||||||
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||||
Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath",
|
Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath,RackCorp",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "objects-us-east-1.dream.io",
|
Value: "objects-us-east-1.dream.io",
|
||||||
Help: "Dream Objects endpoint",
|
Help: "Dream Objects endpoint",
|
||||||
@@ -815,10 +945,72 @@ func init() {
|
|||||||
Value: "tor01-flex",
|
Value: "tor01-flex",
|
||||||
Help: "Toronto Flex",
|
Help: "Toronto Flex",
|
||||||
}},
|
}},
|
||||||
|
}, {
|
||||||
|
Name: "location_constraint",
|
||||||
|
Help: "Location constraint - the location where your bucket will be located and your data stored.\n",
|
||||||
|
Provider: "RackCorp",
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "global",
|
||||||
|
Help: "Global CDN Region",
|
||||||
|
}, {
|
||||||
|
Value: "au",
|
||||||
|
Help: "Australia (All locations)",
|
||||||
|
}, {
|
||||||
|
Value: "au-nsw",
|
||||||
|
Help: "NSW (Australia) Region",
|
||||||
|
}, {
|
||||||
|
Value: "au-qld",
|
||||||
|
Help: "QLD (Australia) Region",
|
||||||
|
}, {
|
||||||
|
Value: "au-vic",
|
||||||
|
Help: "VIC (Australia) Region",
|
||||||
|
}, {
|
||||||
|
Value: "au-wa",
|
||||||
|
Help: "Perth (Australia) Region",
|
||||||
|
}, {
|
||||||
|
Value: "ph",
|
||||||
|
Help: "Manila (Philippines) Region",
|
||||||
|
}, {
|
||||||
|
Value: "th",
|
||||||
|
Help: "Bangkok (Thailand) Region",
|
||||||
|
}, {
|
||||||
|
Value: "hk",
|
||||||
|
Help: "HK (Hong Kong) Region",
|
||||||
|
}, {
|
||||||
|
Value: "mn",
|
||||||
|
Help: "Ulaanbaatar (Mongolia) Region",
|
||||||
|
}, {
|
||||||
|
Value: "kg",
|
||||||
|
Help: "Bishkek (Kyrgyzstan) Region",
|
||||||
|
}, {
|
||||||
|
Value: "id",
|
||||||
|
Help: "Jakarta (Indonesia) Region",
|
||||||
|
}, {
|
||||||
|
Value: "jp",
|
||||||
|
Help: "Tokyo (Japan) Region",
|
||||||
|
}, {
|
||||||
|
Value: "sg",
|
||||||
|
Help: "SG (Singapore) Region",
|
||||||
|
}, {
|
||||||
|
Value: "de",
|
||||||
|
Help: "Frankfurt (Germany) Region",
|
||||||
|
}, {
|
||||||
|
Value: "us",
|
||||||
|
Help: "USA (AnyCast) Region",
|
||||||
|
}, {
|
||||||
|
Value: "us-east-1",
|
||||||
|
Help: "New York (USA) Region",
|
||||||
|
}, {
|
||||||
|
Value: "us-west-1",
|
||||||
|
Help: "Freemont (USA) Region",
|
||||||
|
}, {
|
||||||
|
Value: "nz",
|
||||||
|
Help: "Auckland (New Zealand) Region",
|
||||||
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "location_constraint",
|
Name: "location_constraint",
|
||||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||||
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath,TencentCOS",
|
Provider: "!AWS,IBMCOS,Alibaba,RackCorp,Scaleway,StackPath,TencentCOS",
|
||||||
}, {
|
}, {
|
||||||
Name: "acl",
|
Name: "acl",
|
||||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||||
@@ -1203,6 +1395,34 @@ In Ceph, this can be increased with the "rgw list buckets max chunk" option.
|
|||||||
`,
|
`,
|
||||||
Default: 1000,
|
Default: 1000,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "list_version",
|
||||||
|
Help: `Version of ListObjects to use: 1,2 or 0 for auto.
|
||||||
|
|
||||||
|
When S3 originally launched it only provided the ListObjects call to
|
||||||
|
enumerate objects in a bucket.
|
||||||
|
|
||||||
|
However in May 2016 the ListObjectsV2 call was introduced. This is
|
||||||
|
much higher performance and should be used if at all possible.
|
||||||
|
|
||||||
|
If set to the default, 0, rclone will guess according to the provider
|
||||||
|
set which list objects method to call. If it guesses wrong, then it
|
||||||
|
may be set manually here.
|
||||||
|
`,
|
||||||
|
Default: 0,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "list_url_encode",
|
||||||
|
Help: `Whether to url encode listings: true/false/unset
|
||||||
|
|
||||||
|
Some providers support URL encoding listings and where this is
|
||||||
|
available this is more reliable when using control characters in file
|
||||||
|
names. If this is set to unset (the default) then rclone will choose
|
||||||
|
according to the provider setting what to apply, but you can override
|
||||||
|
rclone's choice here.
|
||||||
|
`,
|
||||||
|
Default: fs.Tristate{},
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "no_check_bucket",
|
Name: "no_check_bucket",
|
||||||
Help: `If set, don't attempt to check the bucket exists or create it.
|
Help: `If set, don't attempt to check the bucket exists or create it.
|
||||||
@@ -1357,6 +1577,8 @@ type Options struct {
|
|||||||
UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"`
|
UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"`
|
||||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||||
ListChunk int64 `config:"list_chunk"`
|
ListChunk int64 `config:"list_chunk"`
|
||||||
|
ListVersion int `config:"list_version"`
|
||||||
|
ListURLEncode fs.Tristate `config:"list_url_encode"`
|
||||||
NoCheckBucket bool `config:"no_check_bucket"`
|
NoCheckBucket bool `config:"no_check_bucket"`
|
||||||
NoHead bool `config:"no_head"`
|
NoHead bool `config:"no_head"`
|
||||||
NoHeadObject bool `config:"no_head_object"`
|
NoHeadObject bool `config:"no_head_object"`
|
||||||
@@ -1525,7 +1747,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
|||||||
// start a new AWS session
|
// start a new AWS session
|
||||||
awsSession, err := session.NewSession()
|
awsSession, err := session.NewSession()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "NewSession")
|
return nil, nil, fmt.Errorf("NewSession: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// first provider to supply a credential set "wins"
|
// first provider to supply a credential set "wins"
|
||||||
@@ -1573,12 +1795,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
|||||||
if opt.Region == "" {
|
if opt.Region == "" {
|
||||||
opt.Region = "us-east-1"
|
opt.Region = "us-east-1"
|
||||||
}
|
}
|
||||||
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.Provider == "Scaleway" || opt.Provider == "TencentCOS" || opt.UseAccelerateEndpoint {
|
setQuirks(opt)
|
||||||
opt.ForcePathStyle = false
|
|
||||||
}
|
|
||||||
if opt.Provider == "Scaleway" && opt.MaxUploadParts > 1000 {
|
|
||||||
opt.MaxUploadParts = 1000
|
|
||||||
}
|
|
||||||
awsConfig := aws.NewConfig().
|
awsConfig := aws.NewConfig().
|
||||||
WithMaxRetries(ci.LowLevelRetries).
|
WithMaxRetries(ci.LowLevelRetries).
|
||||||
WithCredentials(cred).
|
WithCredentials(cred).
|
||||||
@@ -1634,7 +1851,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
|||||||
|
|
||||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||||
if cs < minChunkSize {
|
if cs < minChunkSize {
|
||||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1649,7 +1866,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
|||||||
|
|
||||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||||
if cs > maxUploadCutoff {
|
if cs > maxUploadCutoff {
|
||||||
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
|
return fmt.Errorf("%s is greater than %s", cs, maxUploadCutoff)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1662,6 +1879,92 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set the provider quirks
|
||||||
|
//
|
||||||
|
// There should be no testing against opt.Provider anywhere in the
|
||||||
|
// code except in here to localise the setting of the quirks.
|
||||||
|
//
|
||||||
|
// These should be differences from AWS S3
|
||||||
|
func setQuirks(opt *Options) {
|
||||||
|
var (
|
||||||
|
listObjectsV2 = true
|
||||||
|
virtualHostStyle = true
|
||||||
|
urlEncodeListings = true
|
||||||
|
)
|
||||||
|
switch opt.Provider {
|
||||||
|
case "AWS":
|
||||||
|
// No quirks
|
||||||
|
case "Alibaba":
|
||||||
|
// No quirks
|
||||||
|
case "Ceph":
|
||||||
|
listObjectsV2 = false
|
||||||
|
virtualHostStyle = false
|
||||||
|
urlEncodeListings = false
|
||||||
|
case "DigitalOcean":
|
||||||
|
urlEncodeListings = false
|
||||||
|
case "Dreamhost":
|
||||||
|
urlEncodeListings = false
|
||||||
|
case "IBMCOS":
|
||||||
|
listObjectsV2 = false // untested
|
||||||
|
virtualHostStyle = false
|
||||||
|
urlEncodeListings = false
|
||||||
|
case "Minio":
|
||||||
|
virtualHostStyle = false
|
||||||
|
case "Netease":
|
||||||
|
listObjectsV2 = false // untested
|
||||||
|
urlEncodeListings = false
|
||||||
|
case "RackCorp":
|
||||||
|
// No quirks
|
||||||
|
case "Scaleway":
|
||||||
|
// Scaleway can only have 1000 parts in an upload
|
||||||
|
if opt.MaxUploadParts > 1000 {
|
||||||
|
opt.MaxUploadParts = 1000
|
||||||
|
}
|
||||||
|
urlEncodeListings = false
|
||||||
|
case "SeaweedFS":
|
||||||
|
listObjectsV2 = false // untested
|
||||||
|
virtualHostStyle = false
|
||||||
|
urlEncodeListings = false
|
||||||
|
case "StackPath":
|
||||||
|
listObjectsV2 = false // untested
|
||||||
|
virtualHostStyle = false
|
||||||
|
urlEncodeListings = false
|
||||||
|
case "TencentCOS":
|
||||||
|
listObjectsV2 = false // untested
|
||||||
|
case "Wasabi":
|
||||||
|
// No quirks
|
||||||
|
case "Other":
|
||||||
|
listObjectsV2 = false
|
||||||
|
virtualHostStyle = false
|
||||||
|
urlEncodeListings = false
|
||||||
|
default:
|
||||||
|
fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider)
|
||||||
|
listObjectsV2 = false
|
||||||
|
virtualHostStyle = false
|
||||||
|
urlEncodeListings = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path Style vs Virtual Host style
|
||||||
|
if virtualHostStyle || opt.UseAccelerateEndpoint {
|
||||||
|
opt.ForcePathStyle = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set to see if we need to URL encode listings
|
||||||
|
if !opt.ListURLEncode.Valid {
|
||||||
|
opt.ListURLEncode.Valid = true
|
||||||
|
opt.ListURLEncode.Value = urlEncodeListings
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the correct list version if not manually set
|
||||||
|
if opt.ListVersion == 0 {
|
||||||
|
if listObjectsV2 {
|
||||||
|
opt.ListVersion = 2
|
||||||
|
} else {
|
||||||
|
opt.ListVersion = 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// setRoot changes the root of the Fs
|
// setRoot changes the root of the Fs
|
||||||
func (f *Fs) setRoot(root string) {
|
func (f *Fs) setRoot(root string) {
|
||||||
f.root = parsePath(root)
|
f.root = parsePath(root)
|
||||||
@@ -1678,11 +1981,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
err = checkUploadChunkSize(opt.ChunkSize)
|
err = checkUploadChunkSize(opt.ChunkSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "s3: chunk size")
|
return nil, fmt.Errorf("s3: chunk size: %w", err)
|
||||||
}
|
}
|
||||||
err = checkUploadCutoff(opt.UploadCutoff)
|
err = checkUploadCutoff(opt.UploadCutoff)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "s3: upload cutoff")
|
return nil, fmt.Errorf("s3: upload cutoff: %w", err)
|
||||||
}
|
}
|
||||||
if opt.ACL == "" {
|
if opt.ACL == "" {
|
||||||
opt.ACL = "private"
|
opt.ACL = "private"
|
||||||
@@ -1820,13 +2123,13 @@ func (f *Fs) getBucketLocation(ctx context.Context, bucket string) (string, erro
|
|||||||
func (f *Fs) updateRegionForBucket(ctx context.Context, bucket string) error {
|
func (f *Fs) updateRegionForBucket(ctx context.Context, bucket string) error {
|
||||||
region, err := f.getBucketLocation(ctx, bucket)
|
region, err := f.getBucketLocation(ctx, bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "reading bucket location failed")
|
return fmt.Errorf("reading bucket location failed: %w", err)
|
||||||
}
|
}
|
||||||
if aws.StringValue(f.c.Config.Endpoint) != "" {
|
if aws.StringValue(f.c.Config.Endpoint) != "" {
|
||||||
return errors.Errorf("can't set region to %q as endpoint is set", region)
|
return fmt.Errorf("can't set region to %q as endpoint is set", region)
|
||||||
}
|
}
|
||||||
if aws.StringValue(f.c.Config.Region) == region {
|
if aws.StringValue(f.c.Config.Region) == region {
|
||||||
return errors.Errorf("region is already %q - not updating", region)
|
return fmt.Errorf("region is already %q - not updating", region)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make a new session with the new region
|
// Make a new session with the new region
|
||||||
@@ -1834,7 +2137,7 @@ func (f *Fs) updateRegionForBucket(ctx context.Context, bucket string) error {
|
|||||||
f.opt.Region = region
|
f.opt.Region = region
|
||||||
c, ses, err := s3Connection(f.ctx, &f.opt, f.srv)
|
c, ses, err := s3Connection(f.ctx, &f.opt, f.srv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "creating new session failed")
|
return fmt.Errorf("creating new session failed: %w", err)
|
||||||
}
|
}
|
||||||
f.c = c
|
f.c = c
|
||||||
f.ses = ses
|
f.ses = ses
|
||||||
@@ -1853,6 +2156,7 @@ type listFn func(remote string, object *s3.Object, isDirectory bool) error
|
|||||||
//
|
//
|
||||||
// Set recurse to read sub directories
|
// Set recurse to read sub directories
|
||||||
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
|
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
|
||||||
|
v1 := f.opt.ListVersion == 1
|
||||||
if prefix != "" {
|
if prefix != "" {
|
||||||
prefix += "/"
|
prefix += "/"
|
||||||
}
|
}
|
||||||
@@ -1863,7 +2167,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
if !recurse {
|
if !recurse {
|
||||||
delimiter = "/"
|
delimiter = "/"
|
||||||
}
|
}
|
||||||
var marker *string
|
var continuationToken, startAfter *string
|
||||||
// URL encode the listings so we can use control characters in object names
|
// URL encode the listings so we can use control characters in object names
|
||||||
// See: https://github.com/aws/aws-sdk-go/issues/1914
|
// See: https://github.com/aws/aws-sdk-go/issues/1914
|
||||||
//
|
//
|
||||||
@@ -1879,15 +2183,16 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
//
|
//
|
||||||
// So we enable only on providers we know supports it properly, all others can retry when a
|
// So we enable only on providers we know supports it properly, all others can retry when a
|
||||||
// XML Syntax error is detected.
|
// XML Syntax error is detected.
|
||||||
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio" || f.opt.Provider == "TencentCOS")
|
urlEncodeListings := f.opt.ListURLEncode.Value
|
||||||
for {
|
for {
|
||||||
// FIXME need to implement ALL loop
|
// FIXME need to implement ALL loop
|
||||||
req := s3.ListObjectsInput{
|
req := s3.ListObjectsV2Input{
|
||||||
Bucket: &bucket,
|
Bucket: &bucket,
|
||||||
Delimiter: &delimiter,
|
ContinuationToken: continuationToken,
|
||||||
Prefix: &directory,
|
Delimiter: &delimiter,
|
||||||
MaxKeys: &f.opt.ListChunk,
|
Prefix: &directory,
|
||||||
Marker: marker,
|
MaxKeys: &f.opt.ListChunk,
|
||||||
|
StartAfter: startAfter,
|
||||||
}
|
}
|
||||||
if urlEncodeListings {
|
if urlEncodeListings {
|
||||||
req.EncodingType = aws.String(s3.EncodingTypeUrl)
|
req.EncodingType = aws.String(s3.EncodingTypeUrl)
|
||||||
@@ -1895,10 +2200,28 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
if f.opt.RequesterPays {
|
if f.opt.RequesterPays {
|
||||||
req.RequestPayer = aws.String(s3.RequestPayerRequester)
|
req.RequestPayer = aws.String(s3.RequestPayerRequester)
|
||||||
}
|
}
|
||||||
var resp *s3.ListObjectsOutput
|
var resp *s3.ListObjectsV2Output
|
||||||
var err error
|
var err error
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.c.ListObjectsWithContext(ctx, &req)
|
if v1 {
|
||||||
|
// Convert v2 req into v1 req
|
||||||
|
var reqv1 s3.ListObjectsInput
|
||||||
|
structs.SetFrom(&reqv1, &req)
|
||||||
|
reqv1.Marker = continuationToken
|
||||||
|
if startAfter != nil {
|
||||||
|
reqv1.Marker = startAfter
|
||||||
|
}
|
||||||
|
var respv1 *s3.ListObjectsOutput
|
||||||
|
respv1, err = f.c.ListObjectsWithContext(ctx, &reqv1)
|
||||||
|
if err == nil && respv1 != nil {
|
||||||
|
// convert v1 resp into v2 resp
|
||||||
|
resp = new(s3.ListObjectsV2Output)
|
||||||
|
structs.SetFrom(resp, respv1)
|
||||||
|
resp.NextContinuationToken = respv1.NextMarker
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
resp, err = f.c.ListObjectsV2WithContext(ctx, &req)
|
||||||
|
}
|
||||||
if err != nil && !urlEncodeListings {
|
if err != nil && !urlEncodeListings {
|
||||||
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
||||||
if origErr := awsErr.OrigErr(); origErr != nil {
|
if origErr := awsErr.OrigErr(); origErr != nil {
|
||||||
@@ -1996,19 +2319,21 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
if !aws.BoolValue(resp.IsTruncated) {
|
if !aws.BoolValue(resp.IsTruncated) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// Use NextMarker if set, otherwise use last Key
|
// Use NextContinuationToken if set, otherwise use last Key for StartAfter
|
||||||
if resp.NextMarker == nil || *resp.NextMarker == "" {
|
if resp.NextContinuationToken == nil || *resp.NextContinuationToken == "" {
|
||||||
if len(resp.Contents) == 0 {
|
if len(resp.Contents) == 0 {
|
||||||
return errors.New("s3 protocol error: received listing with IsTruncated set, no NextMarker and no Contents")
|
return errors.New("s3 protocol error: received listing with IsTruncated set, no NextContinuationToken/NextMarker and no Contents")
|
||||||
}
|
}
|
||||||
marker = resp.Contents[len(resp.Contents)-1].Key
|
continuationToken = nil
|
||||||
|
startAfter = resp.Contents[len(resp.Contents)-1].Key
|
||||||
} else {
|
} else {
|
||||||
marker = resp.NextMarker
|
continuationToken = resp.NextContinuationToken
|
||||||
|
startAfter = nil
|
||||||
}
|
}
|
||||||
if urlEncodeListings {
|
if startAfter != nil && urlEncodeListings {
|
||||||
*marker, err = url.QueryUnescape(*marker)
|
*startAfter, err = url.QueryUnescape(*startAfter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "failed to URL decode NextMarker %q", *marker)
|
return fmt.Errorf("failed to URL decode StartAfter/NextMarker %q: %w", *continuationToken, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2594,7 +2919,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
if lifetime := opt["lifetime"]; lifetime != "" {
|
if lifetime := opt["lifetime"]; lifetime != "" {
|
||||||
ilifetime, err := strconv.ParseInt(lifetime, 10, 64)
|
ilifetime, err := strconv.ParseInt(lifetime, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "bad lifetime")
|
return nil, fmt.Errorf("bad lifetime: %w", err)
|
||||||
}
|
}
|
||||||
req.RestoreRequest.Days = &ilifetime
|
req.RestoreRequest.Days = &ilifetime
|
||||||
}
|
}
|
||||||
@@ -2653,7 +2978,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
if opt["max-age"] != "" {
|
if opt["max-age"] != "" {
|
||||||
maxAge, err = fs.ParseDuration(opt["max-age"])
|
maxAge, err = fs.ParseDuration(opt["max-age"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "bad max-age")
|
return nil, fmt.Errorf("bad max-age: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, f.cleanUp(ctx, maxAge)
|
return nil, f.cleanUp(ctx, maxAge)
|
||||||
@@ -2687,7 +3012,7 @@ func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uplo
|
|||||||
return f.shouldRetry(ctx, err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "list multipart uploads bucket %q key %q", bucket, key)
|
return nil, fmt.Errorf("list multipart uploads bucket %q key %q: %w", bucket, key, err)
|
||||||
}
|
}
|
||||||
uploads = append(uploads, resp.Uploads...)
|
uploads = append(uploads, resp.Uploads...)
|
||||||
if !aws.BoolValue(resp.IsTruncated) {
|
if !aws.BoolValue(resp.IsTruncated) {
|
||||||
@@ -2745,7 +3070,7 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Durat
|
|||||||
}
|
}
|
||||||
_, abortErr := f.c.AbortMultipartUpload(&req)
|
_, abortErr := f.c.AbortMultipartUpload(&req)
|
||||||
if abortErr != nil {
|
if abortErr != nil {
|
||||||
err = errors.Wrapf(abortErr, "failed to remove %s", what)
|
err = fmt.Errorf("failed to remove %s: %w", what, abortErr)
|
||||||
fs.Errorf(f, "%v", err)
|
fs.Errorf(f, "%v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -3085,7 +3410,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
})
|
})
|
||||||
if err, ok := err.(awserr.RequestFailure); ok {
|
if err, ok := err.(awserr.RequestFailure); ok {
|
||||||
if err.Code() == "InvalidObjectState" {
|
if err.Code() == "InvalidObjectState" {
|
||||||
return nil, errors.Errorf("Object in GLACIER, restore first: bucket=%q, key=%q", bucket, bucketPath)
|
return nil, fmt.Errorf("Object in GLACIER, restore first: bucket=%q, key=%q", bucket, bucketPath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -3163,7 +3488,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
|||||||
return f.shouldRetry(ctx, err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "multipart upload failed to initialise")
|
return fmt.Errorf("multipart upload failed to initialise: %w", err)
|
||||||
}
|
}
|
||||||
uid := cout.UploadId
|
uid := cout.UploadId
|
||||||
|
|
||||||
@@ -3223,7 +3548,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
|||||||
finished = true
|
finished = true
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
free()
|
free()
|
||||||
return errors.Wrap(err, "multipart upload failed to read source")
|
return fmt.Errorf("multipart upload failed to read source: %w", err)
|
||||||
}
|
}
|
||||||
buf = buf[:n]
|
buf = buf[:n]
|
||||||
|
|
||||||
@@ -3270,7 +3595,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
|||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "multipart upload failed to upload part")
|
return fmt.Errorf("multipart upload failed to upload part: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@@ -3298,7 +3623,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
|||||||
return f.shouldRetry(ctx, err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "multipart upload failed to finalise")
|
return fmt.Errorf("multipart upload failed to finalise: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -3424,7 +3749,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
// PutObject so we'll use this work-around.
|
// PutObject so we'll use this work-around.
|
||||||
url, headers, err := putObj.PresignRequest(15 * time.Minute)
|
url, headers, err := putObj.PresignRequest(15 * time.Minute)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "s3 upload: sign request")
|
return fmt.Errorf("s3 upload: sign request: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if o.fs.opt.V2Auth && headers == nil {
|
if o.fs.opt.V2Auth && headers == nil {
|
||||||
@@ -3439,7 +3764,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
// create the vanilla http request
|
// create the vanilla http request
|
||||||
httpReq, err := http.NewRequestWithContext(ctx, "PUT", url, in)
|
httpReq, err := http.NewRequestWithContext(ctx, "PUT", url, in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "s3 upload: new request")
|
return fmt.Errorf("s3 upload: new request: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// set the headers we signed and the length
|
// set the headers we signed and the length
|
||||||
@@ -3459,7 +3784,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
if resp.StatusCode >= 200 && resp.StatusCode < 299 {
|
if resp.StatusCode >= 200 && resp.StatusCode < 299 {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
err = errors.Errorf("s3 upload: %s: %s", resp.Status, body)
|
err = fmt.Errorf("s3 upload: %s: %s", resp.Status, body)
|
||||||
return fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
return fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package seafile
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -13,7 +14,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/coreos/go-semver/semver"
|
"github.com/coreos/go-semver/semver"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/seafile/api"
|
"github.com/rclone/rclone/backend/seafile/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -171,14 +171,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
var err error
|
var err error
|
||||||
opt.Password, err = obscure.Reveal(opt.Password)
|
opt.Password, err = obscure.Reveal(opt.Password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't decrypt user password")
|
return nil, fmt.Errorf("couldn't decrypt user password: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if opt.LibraryKey != "" {
|
if opt.LibraryKey != "" {
|
||||||
var err error
|
var err error
|
||||||
opt.LibraryKey, err = obscure.Reveal(opt.LibraryKey)
|
opt.LibraryKey, err = obscure.Reveal(opt.LibraryKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't decrypt library password")
|
return nil, fmt.Errorf("couldn't decrypt library password: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -282,7 +282,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
_, err := f.NewObject(ctx, remote)
|
_, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
if errors.Is(err, fs.ErrorObjectNotFound) || errors.Is(err, fs.ErrorNotAFile) {
|
||||||
// File doesn't exist so return the original f
|
// File doesn't exist so return the original f
|
||||||
f.rootDirectory = rootDirectory
|
f.rootDirectory = rootDirectory
|
||||||
return f, nil
|
return f, nil
|
||||||
@@ -305,7 +305,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
|||||||
|
|
||||||
u, err := url.Parse(serverURL)
|
u, err := url.Parse(serverURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Errorf("invalid server URL %s", serverURL)
|
return nil, fmt.Errorf("invalid server URL %s", serverURL)
|
||||||
}
|
}
|
||||||
|
|
||||||
is2faEnabled, _ := m.Get(config2FA)
|
is2faEnabled, _ := m.Get(config2FA)
|
||||||
@@ -886,7 +886,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
// 1- rename source
|
// 1- rename source
|
||||||
err = srcFs.renameDir(ctx, srcLibraryID, srcPath, tempName)
|
err = srcFs.renameDir(ctx, srcLibraryID, srcPath, tempName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Cannot rename source directory to a temporary name")
|
return fmt.Errorf("Cannot rename source directory to a temporary name: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2- move source to destination
|
// 2- move source to destination
|
||||||
@@ -900,7 +900,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
// 3- rename destination back to source name
|
// 3- rename destination back to source name
|
||||||
err = f.renameDir(ctx, dstLibraryID, path.Join(dstDir, tempName), dstName)
|
err = f.renameDir(ctx, dstLibraryID, path.Join(dstDir, tempName), dstName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Cannot rename temporary directory to destination name")
|
return fmt.Errorf("Cannot rename temporary directory to destination name: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package seafile
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@@ -11,7 +12,6 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/seafile/api"
|
"github.com/rclone/rclone/backend/seafile/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
@@ -61,7 +61,7 @@ func getAuthorizationToken(ctx context.Context, srv *rest.Client, user, password
|
|||||||
_, err := srv.CallJSON(ctx, &opts, &request, &result)
|
_, err := srv.CallJSON(ctx, &opts, &request, &result)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// This is only going to be http errors here
|
// This is only going to be http errors here
|
||||||
return "", errors.Wrap(err, "failed to authenticate")
|
return "", fmt.Errorf("failed to authenticate: %w", err)
|
||||||
}
|
}
|
||||||
if result.Errors != nil && len(result.Errors) > 0 {
|
if result.Errors != nil && len(result.Errors) > 0 {
|
||||||
return "", errors.New(strings.Join(result.Errors, ", "))
|
return "", errors.New(strings.Join(result.Errors, ", "))
|
||||||
@@ -94,7 +94,7 @@ func (f *Fs) getServerInfo(ctx context.Context) (account *api.ServerInfo, err er
|
|||||||
return nil, fs.ErrorPermissionDenied
|
return nil, fs.ErrorPermissionDenied
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, "failed to get server info")
|
return nil, fmt.Errorf("failed to get server info: %w", err)
|
||||||
}
|
}
|
||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
@@ -120,7 +120,7 @@ func (f *Fs) getUserAccountInfo(ctx context.Context) (account *api.AccountInfo,
|
|||||||
return nil, fs.ErrorPermissionDenied
|
return nil, fs.ErrorPermissionDenied
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, "failed to get account info")
|
return nil, fmt.Errorf("failed to get account info: %w", err)
|
||||||
}
|
}
|
||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
@@ -147,7 +147,7 @@ func (f *Fs) getLibraries(ctx context.Context) ([]api.Library, error) {
|
|||||||
return nil, fs.ErrorPermissionDenied
|
return nil, fs.ErrorPermissionDenied
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, "failed to get libraries")
|
return nil, fmt.Errorf("failed to get libraries: %w", err)
|
||||||
}
|
}
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
@@ -178,7 +178,7 @@ func (f *Fs) createLibrary(ctx context.Context, libraryName, password string) (l
|
|||||||
return nil, fs.ErrorPermissionDenied
|
return nil, fs.ErrorPermissionDenied
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, "failed to create library")
|
return nil, fmt.Errorf("failed to create library: %w", err)
|
||||||
}
|
}
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
@@ -205,7 +205,7 @@ func (f *Fs) deleteLibrary(ctx context.Context, libraryID string) error {
|
|||||||
return fs.ErrorPermissionDenied
|
return fs.ErrorPermissionDenied
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return errors.Wrap(err, "failed to delete library")
|
return fmt.Errorf("failed to delete library: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -240,7 +240,7 @@ func (f *Fs) decryptLibrary(ctx context.Context, libraryID, password string) err
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return errors.Wrap(err, "failed to decrypt library")
|
return fmt.Errorf("failed to decrypt library: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -286,7 +286,7 @@ func (f *Fs) getDirectoryEntriesAPIv21(ctx context.Context, libraryID, dirPath s
|
|||||||
return nil, fs.ErrorPermissionDenied
|
return nil, fs.ErrorPermissionDenied
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, "failed to get directory contents")
|
return nil, fmt.Errorf("failed to get directory contents: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clean up encoded names
|
// Clean up encoded names
|
||||||
@@ -327,7 +327,7 @@ func (f *Fs) getDirectoryDetails(ctx context.Context, libraryID, dirPath string)
|
|||||||
return nil, fs.ErrorDirNotFound
|
return nil, fs.ErrorDirNotFound
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, "failed to get directory details")
|
return nil, fmt.Errorf("failed to get directory details: %w", err)
|
||||||
}
|
}
|
||||||
result.Name = f.opt.Enc.ToStandardName(result.Name)
|
result.Name = f.opt.Enc.ToStandardName(result.Name)
|
||||||
result.Path = f.opt.Enc.ToStandardPath(result.Path)
|
result.Path = f.opt.Enc.ToStandardPath(result.Path)
|
||||||
@@ -366,7 +366,7 @@ func (f *Fs) createDir(ctx context.Context, libraryID, dirPath string) error {
|
|||||||
return fs.ErrorPermissionDenied
|
return fs.ErrorPermissionDenied
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return errors.Wrap(err, "failed to create directory")
|
return fmt.Errorf("failed to create directory: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -406,7 +406,7 @@ func (f *Fs) renameDir(ctx context.Context, libraryID, dirPath, newName string)
|
|||||||
return fs.ErrorPermissionDenied
|
return fs.ErrorPermissionDenied
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return errors.Wrap(err, "failed to rename directory")
|
return fmt.Errorf("failed to rename directory: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -449,7 +449,7 @@ func (f *Fs) moveDir(ctx context.Context, srcLibraryID, srcDir, srcName, dstLibr
|
|||||||
return fs.ErrorObjectNotFound
|
return fs.ErrorObjectNotFound
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return errors.Wrap(err, fmt.Sprintf("failed to move directory '%s' from '%s' to '%s'", srcName, srcDir, dstPath))
|
return fmt.Errorf("failed to move directory '%s' from '%s' to '%s': %w", srcName, srcDir, dstPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -482,7 +482,7 @@ func (f *Fs) deleteDir(ctx context.Context, libraryID, filePath string) error {
|
|||||||
return fs.ErrorPermissionDenied
|
return fs.ErrorPermissionDenied
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return errors.Wrap(err, "failed to delete directory")
|
return fmt.Errorf("failed to delete directory: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -516,7 +516,7 @@ func (f *Fs) getFileDetails(ctx context.Context, libraryID, filePath string) (*a
|
|||||||
return nil, fs.ErrorPermissionDenied
|
return nil, fs.ErrorPermissionDenied
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, "failed to get file details")
|
return nil, fmt.Errorf("failed to get file details: %w", err)
|
||||||
}
|
}
|
||||||
result.Name = f.opt.Enc.ToStandardName(result.Name)
|
result.Name = f.opt.Enc.ToStandardName(result.Name)
|
||||||
result.Parent = f.opt.Enc.ToStandardPath(result.Parent)
|
result.Parent = f.opt.Enc.ToStandardPath(result.Parent)
|
||||||
@@ -542,7 +542,7 @@ func (f *Fs) deleteFile(ctx context.Context, libraryID, filePath string) error {
|
|||||||
return f.shouldRetry(ctx, resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to delete file")
|
return fmt.Errorf("failed to delete file: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -573,7 +573,7 @@ func (f *Fs) getDownloadLink(ctx context.Context, libraryID, filePath string) (s
|
|||||||
return "", fs.ErrorObjectNotFound
|
return "", fs.ErrorObjectNotFound
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return "", errors.Wrap(err, "failed to get download link")
|
return "", fmt.Errorf("failed to get download link: %w", err)
|
||||||
}
|
}
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
@@ -667,7 +667,7 @@ func (f *Fs) getUploadLink(ctx context.Context, libraryID string) (string, error
|
|||||||
return "", fs.ErrorPermissionDenied
|
return "", fs.ErrorPermissionDenied
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return "", errors.Wrap(err, "failed to get upload link")
|
return "", fmt.Errorf("failed to get upload link: %w", err)
|
||||||
}
|
}
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
@@ -684,7 +684,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri
|
|||||||
}
|
}
|
||||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename))
|
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to make multipart upload")
|
return nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
@@ -711,7 +711,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri
|
|||||||
return nil, ErrorInternalDuringUpload
|
return nil, ErrorInternalDuringUpload
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, "failed to upload file")
|
return nil, fmt.Errorf("failed to upload file: %w", err)
|
||||||
}
|
}
|
||||||
if len(result) > 0 {
|
if len(result) > 0 {
|
||||||
result[0].Parent = f.opt.Enc.ToStandardPath(result[0].Parent)
|
result[0].Parent = f.opt.Enc.ToStandardPath(result[0].Parent)
|
||||||
@@ -750,7 +750,7 @@ func (f *Fs) listShareLinks(ctx context.Context, libraryID, remote string) ([]ap
|
|||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, "failed to list shared links")
|
return nil, fmt.Errorf("failed to list shared links: %w", err)
|
||||||
}
|
}
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
@@ -788,7 +788,7 @@ func (f *Fs) createShareLink(ctx context.Context, libraryID, remote string) (*ap
|
|||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, "failed to create a shared link")
|
return nil, fmt.Errorf("failed to create a shared link: %w", err)
|
||||||
}
|
}
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
@@ -830,7 +830,7 @@ func (f *Fs) copyFile(ctx context.Context, srcLibraryID, srcPath, dstLibraryID,
|
|||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, fmt.Sprintf("failed to copy file %s:'%s' to %s:'%s'", srcLibraryID, srcPath, dstLibraryID, dstPath))
|
return nil, fmt.Errorf("failed to copy file %s:'%s' to %s:'%s': %w", srcLibraryID, srcPath, dstLibraryID, dstPath, err)
|
||||||
}
|
}
|
||||||
return f.decodeFileInfo(result), nil
|
return f.decodeFileInfo(result), nil
|
||||||
}
|
}
|
||||||
@@ -872,7 +872,7 @@ func (f *Fs) moveFile(ctx context.Context, srcLibraryID, srcPath, dstLibraryID,
|
|||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, fmt.Sprintf("failed to move file %s:'%s' to %s:'%s'", srcLibraryID, srcPath, dstLibraryID, dstPath))
|
return nil, fmt.Errorf("failed to move file %s:'%s' to %s:'%s': %w", srcLibraryID, srcPath, dstLibraryID, dstPath, err)
|
||||||
}
|
}
|
||||||
return f.decodeFileInfo(result), nil
|
return f.decodeFileInfo(result), nil
|
||||||
}
|
}
|
||||||
@@ -912,7 +912,7 @@ func (f *Fs) renameFile(ctx context.Context, libraryID, filePath, newname string
|
|||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, fmt.Sprintf("failed to rename file '%s' to '%s'", filePath, newname))
|
return nil, fmt.Errorf("failed to rename file '%s' to '%s': %w", filePath, newname, err)
|
||||||
}
|
}
|
||||||
return f.decodeFileInfo(result), nil
|
return f.decodeFileInfo(result), nil
|
||||||
}
|
}
|
||||||
@@ -949,7 +949,7 @@ func (f *Fs) emptyLibraryTrash(ctx context.Context, libraryID string) error {
|
|||||||
return fs.ErrorObjectNotFound
|
return fs.ErrorObjectNotFound
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return errors.Wrap(err, "failed empty the library trash")
|
return fmt.Errorf("failed empty the library trash: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -991,7 +991,7 @@ func (f *Fs) getDirectoryEntriesAPIv2(ctx context.Context, libraryID, dirPath st
|
|||||||
return nil, fs.ErrorPermissionDenied
|
return nil, fs.ErrorPermissionDenied
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, "failed to get directory contents")
|
return nil, fmt.Errorf("failed to get directory contents: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clean up encoded names
|
// Clean up encoded names
|
||||||
@@ -1038,7 +1038,7 @@ func (f *Fs) copyFileAPIv2(ctx context.Context, srcLibraryID, srcPath, dstLibrar
|
|||||||
return nil, fs.ErrorPermissionDenied
|
return nil, fs.ErrorPermissionDenied
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, fmt.Sprintf("failed to copy file %s:'%s' to %s:'%s'", srcLibraryID, srcPath, dstLibraryID, dstPath))
|
return nil, fmt.Errorf("failed to copy file %s:'%s' to %s:'%s': %w", srcLibraryID, srcPath, dstLibraryID, dstPath, err)
|
||||||
}
|
}
|
||||||
err = rest.DecodeJSON(resp, &result)
|
err = rest.DecodeJSON(resp, &result)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1090,7 +1090,7 @@ func (f *Fs) renameFileAPIv2(ctx context.Context, libraryID, filePath, newname s
|
|||||||
return fs.ErrorObjectNotFound
|
return fs.ErrorObjectNotFound
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return errors.Wrap(err, "failed to rename file")
|
return fmt.Errorf("failed to rename file: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ package sftp
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@@ -20,7 +21,6 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/pkg/sftp"
|
"github.com/pkg/sftp"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
@@ -152,11 +152,11 @@ different. This issue affects among others Synology NAS boxes.
|
|||||||
|
|
||||||
Shared folders can be found in directories representing volumes
|
Shared folders can be found in directories representing volumes
|
||||||
|
|
||||||
rclone sync /home/local/directory remote:/directory --ssh-path-override /volume2/directory
|
rclone sync /home/local/directory remote:/directory --sftp-path-override /volume2/directory
|
||||||
|
|
||||||
Home directory can be found in a shared folder called "home"
|
Home directory can be found in a shared folder called "home"
|
||||||
|
|
||||||
rclone sync /home/local/directory remote:/home/directory --ssh-path-override /volume1/homes/USER/directory`,
|
rclone sync /home/local/directory remote:/home/directory --sftp-path-override /volume1/homes/USER/directory`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "set_modtime",
|
Name: "set_modtime",
|
||||||
@@ -384,12 +384,12 @@ func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
|
|||||||
}
|
}
|
||||||
c.sshClient, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port, f.config)
|
c.sshClient, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port, f.config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't connect SSH")
|
return nil, fmt.Errorf("couldn't connect SSH: %w", err)
|
||||||
}
|
}
|
||||||
c.sftpClient, err = f.newSftpClient(c.sshClient)
|
c.sftpClient, err = f.newSftpClient(c.sshClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = c.sshClient.Close()
|
_ = c.sshClient.Close()
|
||||||
return nil, errors.Wrap(err, "couldn't initialise SFTP")
|
return nil, fmt.Errorf("couldn't initialise SFTP: %w", err)
|
||||||
}
|
}
|
||||||
go c.wait()
|
go c.wait()
|
||||||
return c, nil
|
return c, nil
|
||||||
@@ -468,16 +468,16 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
|||||||
*pc = nil
|
*pc = nil
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// work out if this is an expected error
|
// work out if this is an expected error
|
||||||
underlyingErr := errors.Cause(err)
|
|
||||||
isRegularError := false
|
isRegularError := false
|
||||||
switch underlyingErr {
|
var statusErr *sftp.StatusError
|
||||||
case os.ErrNotExist:
|
var pathErr *os.PathError
|
||||||
|
switch {
|
||||||
|
case errors.Is(err, os.ErrNotExist):
|
||||||
|
isRegularError = true
|
||||||
|
case errors.As(err, &statusErr):
|
||||||
|
isRegularError = true
|
||||||
|
case errors.As(err, &pathErr):
|
||||||
isRegularError = true
|
isRegularError = true
|
||||||
default:
|
|
||||||
switch underlyingErr.(type) {
|
|
||||||
case *sftp.StatusError, *os.PathError:
|
|
||||||
isRegularError = true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// If not a regular SFTP error code then check the connection
|
// If not a regular SFTP error code then check the connection
|
||||||
if !isRegularError {
|
if !isRegularError {
|
||||||
@@ -561,7 +561,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if opt.KnownHostsFile != "" {
|
if opt.KnownHostsFile != "" {
|
||||||
hostcallback, err := knownhosts.New(env.ShellExpand(opt.KnownHostsFile))
|
hostcallback, err := knownhosts.New(env.ShellExpand(opt.KnownHostsFile))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't parse known_hosts_file")
|
return nil, fmt.Errorf("couldn't parse known_hosts_file: %w", err)
|
||||||
}
|
}
|
||||||
sshConfig.HostKeyCallback = hostcallback
|
sshConfig.HostKeyCallback = hostcallback
|
||||||
}
|
}
|
||||||
@@ -579,20 +579,20 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
|
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
|
||||||
sshAgentClient, _, err := sshagent.New()
|
sshAgentClient, _, err := sshagent.New()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't connect to ssh-agent")
|
return nil, fmt.Errorf("couldn't connect to ssh-agent: %w", err)
|
||||||
}
|
}
|
||||||
signers, err := sshAgentClient.Signers()
|
signers, err := sshAgentClient.Signers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't read ssh agent signers")
|
return nil, fmt.Errorf("couldn't read ssh agent signers: %w", err)
|
||||||
}
|
}
|
||||||
if keyFile != "" {
|
if keyFile != "" {
|
||||||
pubBytes, err := ioutil.ReadFile(keyFile + ".pub")
|
pubBytes, err := ioutil.ReadFile(keyFile + ".pub")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to read public key file")
|
return nil, fmt.Errorf("failed to read public key file: %w", err)
|
||||||
}
|
}
|
||||||
pub, _, _, _, err := ssh.ParseAuthorizedKey(pubBytes)
|
pub, _, _, _, err := ssh.ParseAuthorizedKey(pubBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to parse public key file")
|
return nil, fmt.Errorf("failed to parse public key file: %w", err)
|
||||||
}
|
}
|
||||||
pubM := pub.Marshal()
|
pubM := pub.Marshal()
|
||||||
found := false
|
found := false
|
||||||
@@ -617,13 +617,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if opt.KeyPem == "" {
|
if opt.KeyPem == "" {
|
||||||
key, err = ioutil.ReadFile(keyFile)
|
key, err = ioutil.ReadFile(keyFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to read private key file")
|
return nil, fmt.Errorf("failed to read private key file: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// wrap in quotes because the config is a coming as a literal without them.
|
// wrap in quotes because the config is a coming as a literal without them.
|
||||||
opt.KeyPem, err = strconv.Unquote("\"" + opt.KeyPem + "\"")
|
opt.KeyPem, err = strconv.Unquote("\"" + opt.KeyPem + "\"")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "pem key not formatted properly")
|
return nil, fmt.Errorf("pem key not formatted properly: %w", err)
|
||||||
}
|
}
|
||||||
key = []byte(opt.KeyPem)
|
key = []byte(opt.KeyPem)
|
||||||
}
|
}
|
||||||
@@ -641,19 +641,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(clearpass))
|
signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(clearpass))
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to parse private key file")
|
return nil, fmt.Errorf("failed to parse private key file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a public key has been specified then use that
|
// If a public key has been specified then use that
|
||||||
if pubkeyFile != "" {
|
if pubkeyFile != "" {
|
||||||
certfile, err := ioutil.ReadFile(pubkeyFile)
|
certfile, err := ioutil.ReadFile(pubkeyFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "unable to read cert file")
|
return nil, fmt.Errorf("unable to read cert file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pk, _, _, _, err := ssh.ParseAuthorizedKey(certfile)
|
pk, _, _, _, err := ssh.ParseAuthorizedKey(certfile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "unable to parse cert file")
|
return nil, fmt.Errorf("unable to parse cert file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// And the signer for this, which includes the private key signer
|
// And the signer for this, which includes the private key signer
|
||||||
@@ -669,7 +669,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
pubsigner, err := ssh.NewCertSigner(cert, signer)
|
pubsigner, err := ssh.NewCertSigner(cert, signer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error generating cert signer")
|
return nil, fmt.Errorf("error generating cert signer: %w", err)
|
||||||
}
|
}
|
||||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(pubsigner))
|
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(pubsigner))
|
||||||
} else {
|
} else {
|
||||||
@@ -759,7 +759,7 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
|||||||
// Make a connection and pool it to return errors early
|
// Make a connection and pool it to return errors early
|
||||||
c, err := f.getSftpConnection(ctx)
|
c, err := f.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "NewFs")
|
return nil, fmt.Errorf("NewFs: %w", err)
|
||||||
}
|
}
|
||||||
cwd, err := c.sftpClient.Getwd()
|
cwd, err := c.sftpClient.Getwd()
|
||||||
f.putSftpConnection(&c, nil)
|
f.putSftpConnection(&c, nil)
|
||||||
@@ -840,7 +840,7 @@ func (f *Fs) dirExists(ctx context.Context, dir string) (bool, error) {
|
|||||||
}
|
}
|
||||||
c, err := f.getSftpConnection(ctx)
|
c, err := f.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Wrap(err, "dirExists")
|
return false, fmt.Errorf("dirExists: %w", err)
|
||||||
}
|
}
|
||||||
info, err := c.sftpClient.Stat(dir)
|
info, err := c.sftpClient.Stat(dir)
|
||||||
f.putSftpConnection(&c, err)
|
f.putSftpConnection(&c, err)
|
||||||
@@ -848,7 +848,7 @@ func (f *Fs) dirExists(ctx context.Context, dir string) (bool, error) {
|
|||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return false, errors.Wrap(err, "dirExists stat failed")
|
return false, fmt.Errorf("dirExists stat failed: %w", err)
|
||||||
}
|
}
|
||||||
if !info.IsDir() {
|
if !info.IsDir() {
|
||||||
return false, fs.ErrorIsFile
|
return false, fs.ErrorIsFile
|
||||||
@@ -869,7 +869,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
root := path.Join(f.absRoot, dir)
|
root := path.Join(f.absRoot, dir)
|
||||||
ok, err := f.dirExists(ctx, root)
|
ok, err := f.dirExists(ctx, root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "List failed")
|
return nil, fmt.Errorf("List failed: %w", err)
|
||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fs.ErrorDirNotFound
|
return nil, fs.ErrorDirNotFound
|
||||||
@@ -880,12 +880,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
}
|
}
|
||||||
c, err := f.getSftpConnection(ctx)
|
c, err := f.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "List")
|
return nil, fmt.Errorf("List: %w", err)
|
||||||
}
|
}
|
||||||
infos, err := c.sftpClient.ReadDir(sftpDir)
|
infos, err := c.sftpClient.ReadDir(sftpDir)
|
||||||
f.putSftpConnection(&c, err)
|
f.putSftpConnection(&c, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error listing %q", dir)
|
return nil, fmt.Errorf("error listing %q: %w", dir, err)
|
||||||
}
|
}
|
||||||
for _, info := range infos {
|
for _, info := range infos {
|
||||||
remote := path.Join(dir, info.Name())
|
remote := path.Join(dir, info.Name())
|
||||||
@@ -924,7 +924,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
err := f.mkParentDir(ctx, src.Remote())
|
err := f.mkParentDir(ctx, src.Remote())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Put mkParentDir failed")
|
return nil, fmt.Errorf("Put mkParentDir failed: %w", err)
|
||||||
}
|
}
|
||||||
// Temporary object under construction
|
// Temporary object under construction
|
||||||
o := &Object{
|
o := &Object{
|
||||||
@@ -959,7 +959,7 @@ func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
|
|||||||
}
|
}
|
||||||
ok, err := f.dirExists(ctx, dirPath)
|
ok, err := f.dirExists(ctx, dirPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "mkdir dirExists failed")
|
return fmt.Errorf("mkdir dirExists failed: %w", err)
|
||||||
}
|
}
|
||||||
if ok {
|
if ok {
|
||||||
return nil
|
return nil
|
||||||
@@ -971,12 +971,12 @@ func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
|
|||||||
}
|
}
|
||||||
c, err := f.getSftpConnection(ctx)
|
c, err := f.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "mkdir")
|
return fmt.Errorf("mkdir: %w", err)
|
||||||
}
|
}
|
||||||
err = c.sftpClient.Mkdir(dirPath)
|
err = c.sftpClient.Mkdir(dirPath)
|
||||||
f.putSftpConnection(&c, err)
|
f.putSftpConnection(&c, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "mkdir %q failed", dirPath)
|
return fmt.Errorf("mkdir %q failed: %w", dirPath, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -993,7 +993,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
// delete recursively with RemoveDirectory
|
// delete recursively with RemoveDirectory
|
||||||
entries, err := f.List(ctx, dir)
|
entries, err := f.List(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Rmdir")
|
return fmt.Errorf("Rmdir: %w", err)
|
||||||
}
|
}
|
||||||
if len(entries) != 0 {
|
if len(entries) != 0 {
|
||||||
return fs.ErrorDirectoryNotEmpty
|
return fs.ErrorDirectoryNotEmpty
|
||||||
@@ -1002,7 +1002,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
root := path.Join(f.absRoot, dir)
|
root := path.Join(f.absRoot, dir)
|
||||||
c, err := f.getSftpConnection(ctx)
|
c, err := f.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Rmdir")
|
return fmt.Errorf("Rmdir: %w", err)
|
||||||
}
|
}
|
||||||
err = c.sftpClient.RemoveDirectory(root)
|
err = c.sftpClient.RemoveDirectory(root)
|
||||||
f.putSftpConnection(&c, err)
|
f.putSftpConnection(&c, err)
|
||||||
@@ -1018,11 +1018,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
err := f.mkParentDir(ctx, remote)
|
err := f.mkParentDir(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move mkParentDir failed")
|
return nil, fmt.Errorf("Move mkParentDir failed: %w", err)
|
||||||
}
|
}
|
||||||
c, err := f.getSftpConnection(ctx)
|
c, err := f.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move")
|
return nil, fmt.Errorf("Move: %w", err)
|
||||||
}
|
}
|
||||||
err = c.sftpClient.Rename(
|
err = c.sftpClient.Rename(
|
||||||
srcObj.path(),
|
srcObj.path(),
|
||||||
@@ -1030,11 +1030,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
)
|
)
|
||||||
f.putSftpConnection(&c, err)
|
f.putSftpConnection(&c, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move Rename failed")
|
return nil, fmt.Errorf("Move Rename failed: %w", err)
|
||||||
}
|
}
|
||||||
dstObj, err := f.NewObject(ctx, remote)
|
dstObj, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move NewObject failed")
|
return nil, fmt.Errorf("Move NewObject failed: %w", err)
|
||||||
}
|
}
|
||||||
return dstObj, nil
|
return dstObj, nil
|
||||||
}
|
}
|
||||||
@@ -1059,7 +1059,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
// Check if destination exists
|
// Check if destination exists
|
||||||
ok, err := f.dirExists(ctx, dstPath)
|
ok, err := f.dirExists(ctx, dstPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "DirMove dirExists dst failed")
|
return fmt.Errorf("DirMove dirExists dst failed: %w", err)
|
||||||
}
|
}
|
||||||
if ok {
|
if ok {
|
||||||
return fs.ErrorDirExists
|
return fs.ErrorDirExists
|
||||||
@@ -1068,13 +1068,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
// Make sure the parent directory exists
|
// Make sure the parent directory exists
|
||||||
err = f.mkdir(ctx, path.Dir(dstPath))
|
err = f.mkdir(ctx, path.Dir(dstPath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
return fmt.Errorf("DirMove mkParentDir dst failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do the move
|
// Do the move
|
||||||
c, err := f.getSftpConnection(ctx)
|
c, err := f.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "DirMove")
|
return fmt.Errorf("DirMove: %w", err)
|
||||||
}
|
}
|
||||||
err = c.sftpClient.Rename(
|
err = c.sftpClient.Rename(
|
||||||
srcPath,
|
srcPath,
|
||||||
@@ -1082,7 +1082,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
)
|
)
|
||||||
f.putSftpConnection(&c, err)
|
f.putSftpConnection(&c, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "DirMove Rename(%q,%q) failed", srcPath, dstPath)
|
return fmt.Errorf("DirMove Rename(%q,%q) failed: %w", srcPath, dstPath, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1094,13 +1094,13 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
|
|||||||
|
|
||||||
c, err := f.getSftpConnection(ctx)
|
c, err := f.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "run: get SFTP connection")
|
return nil, fmt.Errorf("run: get SFTP connection: %w", err)
|
||||||
}
|
}
|
||||||
defer f.putSftpConnection(&c, err)
|
defer f.putSftpConnection(&c, err)
|
||||||
|
|
||||||
session, err := c.sshClient.NewSession()
|
session, err := c.sshClient.NewSession()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "run: get SFTP session")
|
return nil, fmt.Errorf("run: get SFTP session: %w", err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = session.Close()
|
_ = session.Close()
|
||||||
@@ -1112,7 +1112,7 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
|
|||||||
|
|
||||||
err = session.Run(cmd)
|
err = session.Run(cmd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to run %q: %s", cmd, stderr.Bytes())
|
return nil, fmt.Errorf("failed to run %q: %s: %w", cmd, stderr.Bytes(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return stdout.Bytes(), nil
|
return stdout.Bytes(), nil
|
||||||
@@ -1155,8 +1155,8 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
}
|
}
|
||||||
|
|
||||||
changed := false
|
changed := false
|
||||||
md5Works := checkHash([]string{"md5sum", "md5 -r"}, "d41d8cd98f00b204e9800998ecf8427e", &f.opt.Md5sumCommand, &changed)
|
md5Works := checkHash([]string{"md5sum", "md5 -r", "rclone md5sum"}, "d41d8cd98f00b204e9800998ecf8427e", &f.opt.Md5sumCommand, &changed)
|
||||||
sha1Works := checkHash([]string{"sha1sum", "sha1 -r"}, "da39a3ee5e6b4b0d3255bfef95601890afd80709", &f.opt.Sha1sumCommand, &changed)
|
sha1Works := checkHash([]string{"sha1sum", "sha1 -r", "rclone sha1sum"}, "da39a3ee5e6b4b0d3255bfef95601890afd80709", &f.opt.Sha1sumCommand, &changed)
|
||||||
|
|
||||||
if changed {
|
if changed {
|
||||||
f.m.Set("md5sum_command", f.opt.Md5sumCommand)
|
f.m.Set("md5sum_command", f.opt.Md5sumCommand)
|
||||||
@@ -1186,7 +1186,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
}
|
}
|
||||||
stdout, err := f.run(ctx, "df -k "+escapedPath)
|
stdout, err := f.run(ctx, "df -k "+escapedPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "your remote may not support About")
|
return nil, fmt.Errorf("your remote may not support About: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
usageTotal, usageUsed, usageAvail := parseUsage(stdout)
|
usageTotal, usageUsed, usageAvail := parseUsage(stdout)
|
||||||
@@ -1257,12 +1257,12 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
|||||||
|
|
||||||
c, err := o.fs.getSftpConnection(ctx)
|
c, err := o.fs.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "Hash get SFTP connection")
|
return "", fmt.Errorf("Hash get SFTP connection: %w", err)
|
||||||
}
|
}
|
||||||
session, err := c.sshClient.NewSession()
|
session, err := c.sshClient.NewSession()
|
||||||
o.fs.putSftpConnection(&c, err)
|
o.fs.putSftpConnection(&c, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "Hash put SFTP connection")
|
return "", fmt.Errorf("Hash put SFTP connection: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var stdout, stderr bytes.Buffer
|
var stdout, stderr bytes.Buffer
|
||||||
@@ -1366,7 +1366,7 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
|||||||
func (f *Fs) stat(ctx context.Context, remote string) (info os.FileInfo, err error) {
|
func (f *Fs) stat(ctx context.Context, remote string) (info os.FileInfo, err error) {
|
||||||
c, err := f.getSftpConnection(ctx)
|
c, err := f.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "stat")
|
return nil, fmt.Errorf("stat: %w", err)
|
||||||
}
|
}
|
||||||
absPath := path.Join(f.absRoot, remote)
|
absPath := path.Join(f.absRoot, remote)
|
||||||
info, err = c.sftpClient.Stat(absPath)
|
info, err = c.sftpClient.Stat(absPath)
|
||||||
@@ -1381,7 +1381,7 @@ func (o *Object) stat(ctx context.Context) error {
|
|||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return fs.ErrorObjectNotFound
|
return fs.ErrorObjectNotFound
|
||||||
}
|
}
|
||||||
return errors.Wrap(err, "stat failed")
|
return fmt.Errorf("stat failed: %w", err)
|
||||||
}
|
}
|
||||||
if info.IsDir() {
|
if info.IsDir() {
|
||||||
return fs.ErrorIsDir
|
return fs.ErrorIsDir
|
||||||
@@ -1399,16 +1399,16 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|||||||
}
|
}
|
||||||
c, err := o.fs.getSftpConnection(ctx)
|
c, err := o.fs.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "SetModTime")
|
return fmt.Errorf("SetModTime: %w", err)
|
||||||
}
|
}
|
||||||
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
|
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
|
||||||
o.fs.putSftpConnection(&c, err)
|
o.fs.putSftpConnection(&c, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "SetModTime failed")
|
return fmt.Errorf("SetModTime failed: %w", err)
|
||||||
}
|
}
|
||||||
err = o.stat(ctx)
|
err = o.stat(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "SetModTime stat failed")
|
return fmt.Errorf("SetModTime stat failed: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1487,17 +1487,17 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
}
|
}
|
||||||
c, err := o.fs.getSftpConnection(ctx)
|
c, err := o.fs.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Open")
|
return nil, fmt.Errorf("Open: %w", err)
|
||||||
}
|
}
|
||||||
sftpFile, err := c.sftpClient.Open(o.path())
|
sftpFile, err := c.sftpClient.Open(o.path())
|
||||||
o.fs.putSftpConnection(&c, err)
|
o.fs.putSftpConnection(&c, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Open failed")
|
return nil, fmt.Errorf("Open failed: %w", err)
|
||||||
}
|
}
|
||||||
if offset > 0 {
|
if offset > 0 {
|
||||||
off, err := sftpFile.Seek(offset, io.SeekStart)
|
off, err := sftpFile.Seek(offset, io.SeekStart)
|
||||||
if err != nil || off != offset {
|
if err != nil || off != offset {
|
||||||
return nil, errors.Wrap(err, "Open Seek failed")
|
return nil, fmt.Errorf("Open Seek failed: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
in = readers.NewLimitedReadCloser(o.fs.newObjectReader(sftpFile), limit)
|
in = readers.NewLimitedReadCloser(o.fs.newObjectReader(sftpFile), limit)
|
||||||
@@ -1526,12 +1526,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
o.sha1sum = nil
|
o.sha1sum = nil
|
||||||
c, err := o.fs.getSftpConnection(ctx)
|
c, err := o.fs.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Update")
|
return fmt.Errorf("Update: %w", err)
|
||||||
}
|
}
|
||||||
file, err := c.sftpClient.OpenFile(o.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
|
file, err := c.sftpClient.OpenFile(o.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
|
||||||
o.fs.putSftpConnection(&c, err)
|
o.fs.putSftpConnection(&c, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Update Create failed")
|
return fmt.Errorf("Update Create failed: %w", err)
|
||||||
}
|
}
|
||||||
// remove the file if upload failed
|
// remove the file if upload failed
|
||||||
remove := func() {
|
remove := func() {
|
||||||
@@ -1551,18 +1551,18 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
_, err = file.ReadFrom(&sizeReader{Reader: in, size: src.Size()})
|
_, err = file.ReadFrom(&sizeReader{Reader: in, size: src.Size()})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
remove()
|
remove()
|
||||||
return errors.Wrap(err, "Update ReadFrom failed")
|
return fmt.Errorf("Update ReadFrom failed: %w", err)
|
||||||
}
|
}
|
||||||
err = file.Close()
|
err = file.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
remove()
|
remove()
|
||||||
return errors.Wrap(err, "Update Close failed")
|
return fmt.Errorf("Update Close failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the mod time - this stats the object if o.fs.opt.SetModTime == true
|
// Set the mod time - this stats the object if o.fs.opt.SetModTime == true
|
||||||
err = o.SetModTime(ctx, src.ModTime(ctx))
|
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Update SetModTime failed")
|
return fmt.Errorf("Update SetModTime failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stat the file after the upload to read its stats back if o.fs.opt.SetModTime == false
|
// Stat the file after the upload to read its stats back if o.fs.opt.SetModTime == false
|
||||||
@@ -1576,7 +1576,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
o.size = src.Size()
|
o.size = src.Size()
|
||||||
o.mode = os.FileMode(0666) // regular file
|
o.mode = os.FileMode(0666) // regular file
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return errors.Wrap(err, "Update stat failed")
|
return fmt.Errorf("Update stat failed: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1587,7 +1587,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
func (o *Object) Remove(ctx context.Context) error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
c, err := o.fs.getSftpConnection(ctx)
|
c, err := o.fs.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Remove")
|
return fmt.Errorf("Remove: %w", err)
|
||||||
}
|
}
|
||||||
err = c.sftpClient.Remove(o.path())
|
err = c.sftpClient.Remove(o.path())
|
||||||
o.fs.putSftpConnection(&c, err)
|
o.fs.putSftpConnection(&c, err)
|
||||||
|
|||||||
@@ -2,10 +2,9 @@
|
|||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ListRequestSelect should be used in $select for Items/Children
|
// ListRequestSelect should be used in $select for Items/Children
|
||||||
@@ -122,7 +121,7 @@ type UploadFinishResponse struct {
|
|||||||
// ID returns the ID of the first response if available
|
// ID returns the ID of the first response if available
|
||||||
func (finish *UploadFinishResponse) ID() (string, error) {
|
func (finish *UploadFinishResponse) ID() (string, error) {
|
||||||
if finish.Error {
|
if finish.Error {
|
||||||
return "", errors.Errorf("upload failed: %s (%d)", finish.ErrorMessage, finish.ErrorCode)
|
return "", fmt.Errorf("upload failed: %s (%d)", finish.ErrorMessage, finish.ErrorCode)
|
||||||
}
|
}
|
||||||
if len(finish.Value) == 0 {
|
if len(finish.Value) == 0 {
|
||||||
return "", errors.New("upload failed: no results returned")
|
return "", errors.New("upload failed: no results returned")
|
||||||
|
|||||||
@@ -74,6 +74,7 @@ Which is control chars + [' ', '*', '.', '/', ':', '<', '>', '?', '|']
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@@ -83,7 +84,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/sharefile/api"
|
"github.com/rclone/rclone/backend/sharefile/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -144,7 +144,7 @@ func init() {
|
|||||||
subdomain := auth.Form.Get("subdomain")
|
subdomain := auth.Form.Get("subdomain")
|
||||||
apicp := auth.Form.Get("apicp")
|
apicp := auth.Form.Get("apicp")
|
||||||
if subdomain == "" || apicp == "" {
|
if subdomain == "" || apicp == "" {
|
||||||
return errors.Errorf("subdomain or apicp not found in response: %+v", auth.Form)
|
return fmt.Errorf("subdomain or apicp not found in response: %+v", auth.Form)
|
||||||
}
|
}
|
||||||
endpoint := "https://" + subdomain + "." + apicp
|
endpoint := "https://" + subdomain + "." + apicp
|
||||||
m.Set("endpoint", endpoint)
|
m.Set("endpoint", endpoint)
|
||||||
@@ -334,7 +334,7 @@ func (f *Fs) readMetaDataForIDPath(ctx context.Context, id, path string, directo
|
|||||||
}
|
}
|
||||||
return nil, fs.ErrorDirNotFound
|
return nil, fs.ErrorDirNotFound
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, "couldn't find item")
|
return nil, fmt.Errorf("couldn't find item: %w", err)
|
||||||
}
|
}
|
||||||
if directoriesOnly && item.Type != api.ItemTypeFolder {
|
if directoriesOnly && item.Type != api.ItemTypeFolder {
|
||||||
return nil, fs.ErrorIsFile
|
return nil, fs.ErrorIsFile
|
||||||
@@ -386,10 +386,10 @@ func errorHandler(resp *http.Response) error {
|
|||||||
|
|
||||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||||
if cs < minChunkSize {
|
if cs < minChunkSize {
|
||||||
return errors.Errorf("ChunkSize: %s is less than %s", cs, minChunkSize)
|
return fmt.Errorf("ChunkSize: %s is less than %s", cs, minChunkSize)
|
||||||
}
|
}
|
||||||
if cs > maxChunkSize {
|
if cs > maxChunkSize {
|
||||||
return errors.Errorf("ChunkSize: %s is greater than %s", cs, maxChunkSize)
|
return fmt.Errorf("ChunkSize: %s is greater than %s", cs, maxChunkSize)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -444,7 +444,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
var ts *oauthutil.TokenSource
|
var ts *oauthutil.TokenSource
|
||||||
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
|
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure sharefile")
|
return nil, fmt.Errorf("failed to configure sharefile: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
@@ -477,23 +477,23 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
const serverTimezone = "America/New_York"
|
const serverTimezone = "America/New_York"
|
||||||
timezone, err := tzdata.Open(serverTimezone)
|
timezone, err := tzdata.Open(serverTimezone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to open timezone db")
|
return nil, fmt.Errorf("failed to open timezone db: %w", err)
|
||||||
}
|
}
|
||||||
tzdata, err := ioutil.ReadAll(timezone)
|
tzdata, err := ioutil.ReadAll(timezone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to read timezone")
|
return nil, fmt.Errorf("failed to read timezone: %w", err)
|
||||||
}
|
}
|
||||||
_ = timezone.Close()
|
_ = timezone.Close()
|
||||||
f.location, err = time.LoadLocationFromTZData(serverTimezone, tzdata)
|
f.location, err = time.LoadLocationFromTZData(serverTimezone, tzdata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to load location from timezone")
|
return nil, fmt.Errorf("failed to load location from timezone: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find ID of user's root folder
|
// Find ID of user's root folder
|
||||||
if opt.RootFolderID == "" {
|
if opt.RootFolderID == "" {
|
||||||
item, err := f.readMetaDataForID(ctx, opt.RootFolderID, true, false)
|
item, err := f.readMetaDataForID(ctx, opt.RootFolderID, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't find root ID")
|
return nil, fmt.Errorf("couldn't find root ID: %w", err)
|
||||||
}
|
}
|
||||||
f.rootID = item.ID
|
f.rootID = item.ID
|
||||||
} else {
|
} else {
|
||||||
@@ -639,7 +639,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "CreateDir")
|
return "", fmt.Errorf("CreateDir: %w", err)
|
||||||
}
|
}
|
||||||
return info.ID, nil
|
return info.ID, nil
|
||||||
}
|
}
|
||||||
@@ -671,7 +671,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return found, errors.Wrap(err, "couldn't list files")
|
return found, fmt.Errorf("couldn't list files: %w", err)
|
||||||
}
|
}
|
||||||
for i := range result.Value {
|
for i := range result.Value {
|
||||||
item := &result.Value[i]
|
item := &result.Value[i]
|
||||||
@@ -825,7 +825,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "purgeCheck")
|
return fmt.Errorf("purgeCheck: %w", err)
|
||||||
}
|
}
|
||||||
if found {
|
if found {
|
||||||
return fs.ErrorDirectoryNotEmpty
|
return fs.ErrorDirectoryNotEmpty
|
||||||
@@ -900,7 +900,7 @@ func (f *Fs) updateItem(ctx context.Context, id, leaf, directoryID string, modTi
|
|||||||
// Parse it back into a time
|
// Parse it back into a time
|
||||||
newModTime, err := time.Parse(time.RFC3339Nano, isoTime)
|
newModTime, err := time.Parse(time.RFC3339Nano, isoTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "updateItem: time parse")
|
return nil, fmt.Errorf("updateItem: time parse: %w", err)
|
||||||
}
|
}
|
||||||
modTime = &newModTime
|
modTime = &newModTime
|
||||||
}
|
}
|
||||||
@@ -934,7 +934,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
|
|||||||
// To demonstrate bug
|
// To demonstrate bug
|
||||||
// item, err = f.updateItem(ctx, id, newLeaf, newDirectoryID, nil)
|
// item, err = f.updateItem(ctx, id, newLeaf, newDirectoryID, nil)
|
||||||
// if err != nil {
|
// if err != nil {
|
||||||
// return nil, errors.Wrap(err, "Move rename leaf")
|
// return nil, fmt.Errorf("Move rename leaf: %w", err)
|
||||||
// }
|
// }
|
||||||
// return item, nil
|
// return item, nil
|
||||||
doRenameLeaf := oldLeaf != newLeaf
|
doRenameLeaf := oldLeaf != newLeaf
|
||||||
@@ -947,7 +947,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
|
|||||||
tmpLeaf := newLeaf + "." + random.String(8)
|
tmpLeaf := newLeaf + "." + random.String(8)
|
||||||
item, err = f.updateItem(ctx, id, tmpLeaf, "", nil)
|
item, err = f.updateItem(ctx, id, tmpLeaf, "", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move rename leaf")
|
return nil, fmt.Errorf("Move rename leaf: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -956,7 +956,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
|
|||||||
if doMove {
|
if doMove {
|
||||||
item, err = f.updateItem(ctx, id, "", newDirectoryID, nil)
|
item, err = f.updateItem(ctx, id, "", newDirectoryID, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move directory")
|
return nil, fmt.Errorf("Move directory: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -964,7 +964,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
|
|||||||
if doRenameLeaf {
|
if doRenameLeaf {
|
||||||
item, err = f.updateItem(ctx, id, newLeaf, "", nil)
|
item, err = f.updateItem(ctx, id, newLeaf, "", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move rename leaf")
|
return nil, fmt.Errorf("Move rename leaf: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1079,7 +1079,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
|||||||
|
|
||||||
sameName := strings.ToLower(srcLeaf) == strings.ToLower(dstLeaf)
|
sameName := strings.ToLower(srcLeaf) == strings.ToLower(dstLeaf)
|
||||||
if sameName && srcParentID == dstParentID {
|
if sameName && srcParentID == dstParentID {
|
||||||
return nil, errors.Errorf("copy: can't copy to a file in the same directory whose name only differs in case: %q vs %q", srcLeaf, dstLeaf)
|
return nil, fmt.Errorf("copy: can't copy to a file in the same directory whose name only differs in case: %q vs %q", srcLeaf, dstLeaf)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Discover whether we can just copy directly or not
|
// Discover whether we can just copy directly or not
|
||||||
@@ -1095,7 +1095,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
|||||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorDirNotFound {
|
if err == fs.ErrorObjectNotFound || err == fs.ErrorDirNotFound {
|
||||||
directCopy = true
|
directCopy = true
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return nil, errors.Wrap(err, "copy: failed to examine destination dir")
|
return nil, fmt.Errorf("copy: failed to examine destination dir: %w", err)
|
||||||
} else {
|
} else {
|
||||||
// otherwise need to copy via a temporary directory
|
// otherwise need to copy via a temporary directory
|
||||||
}
|
}
|
||||||
@@ -1109,17 +1109,17 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
|||||||
tmpDir := "rclone-temp-dir-" + random.String(16)
|
tmpDir := "rclone-temp-dir-" + random.String(16)
|
||||||
err = f.Mkdir(ctx, tmpDir)
|
err = f.Mkdir(ctx, tmpDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "copy: failed to make temp dir")
|
return nil, fmt.Errorf("copy: failed to make temp dir: %w", err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
rmdirErr := f.Rmdir(ctx, tmpDir)
|
rmdirErr := f.Rmdir(ctx, tmpDir)
|
||||||
if rmdirErr != nil && err == nil {
|
if rmdirErr != nil && err == nil {
|
||||||
err = errors.Wrap(rmdirErr, "copy: failed to remove temp dir")
|
err = fmt.Errorf("copy: failed to remove temp dir: %w", rmdirErr)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
tmpDirID, err := f.dirCache.FindDir(ctx, tmpDir, false)
|
tmpDirID, err := f.dirCache.FindDir(ctx, tmpDir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "copy: failed to find temp dir")
|
return nil, fmt.Errorf("copy: failed to find temp dir: %w", err)
|
||||||
}
|
}
|
||||||
copyTargetDirID = tmpDirID
|
copyTargetDirID = tmpDirID
|
||||||
}
|
}
|
||||||
@@ -1221,7 +1221,7 @@ func (o *Object) Size() int64 {
|
|||||||
// setMetaData sets the metadata from info
|
// setMetaData sets the metadata from info
|
||||||
func (o *Object) setMetaData(info *api.Item) (err error) {
|
func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||||
if info.Type != api.ItemTypeFile {
|
if info.Type != api.ItemTypeFile {
|
||||||
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
|
return fmt.Errorf("%q is %q: %w", o.remote, info.Type, fs.ErrorNotAFile)
|
||||||
}
|
}
|
||||||
o.hasMetaData = true
|
o.hasMetaData = true
|
||||||
o.size = info.Size
|
o.size = info.Size
|
||||||
@@ -1302,7 +1302,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "open: fetch download specification")
|
return nil, fmt.Errorf("open: fetch download specification: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fs.FixRangeOption(options, o.size)
|
fs.FixRangeOption(options, o.size)
|
||||||
@@ -1317,7 +1317,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "open")
|
return nil, fmt.Errorf("open: %w", err)
|
||||||
}
|
}
|
||||||
return resp.Body, err
|
return resp.Body, err
|
||||||
}
|
}
|
||||||
@@ -1373,7 +1373,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "upload get specification")
|
return fmt.Errorf("upload get specification: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If file is large then upload in parts
|
// If file is large then upload in parts
|
||||||
@@ -1398,7 +1398,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "upload file")
|
return fmt.Errorf("upload file: %w", err)
|
||||||
}
|
}
|
||||||
return o.checkUploadResponse(ctx, &finish)
|
return o.checkUploadResponse(ctx, &finish)
|
||||||
}
|
}
|
||||||
@@ -1434,7 +1434,7 @@ func (f *Fs) remove(ctx context.Context, id string) (err error) {
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "remove")
|
return fmt.Errorf("remove: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1443,7 +1443,7 @@ func (f *Fs) remove(ctx context.Context, id string) (err error) {
|
|||||||
func (o *Object) Remove(ctx context.Context) error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
err := o.readMetaData(ctx)
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Remove: Failed to read metadata")
|
return fmt.Errorf("Remove: Failed to read metadata: %w", err)
|
||||||
}
|
}
|
||||||
return o.fs.remove(ctx, o.id)
|
return o.fs.remove(ctx, o.id)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/sharefile/api"
|
"github.com/rclone/rclone/backend/sharefile/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
@@ -55,7 +54,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
case "threaded":
|
case "threaded":
|
||||||
streamed = false
|
streamed = false
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("can't use method %q with newLargeUpload", info.Method)
|
return nil, fmt.Errorf("can't use method %q with newLargeUpload", info.Method)
|
||||||
}
|
}
|
||||||
|
|
||||||
threads := f.ci.Transfers
|
threads := f.ci.Transfers
|
||||||
@@ -87,7 +86,7 @@ func (up *largeUpload) parseUploadFinishResponse(respBody []byte) (err error) {
|
|||||||
err = json.Unmarshal(respBody, &finish)
|
err = json.Unmarshal(respBody, &finish)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Sometimes the unmarshal fails in which case return the body
|
// Sometimes the unmarshal fails in which case return the body
|
||||||
return errors.Errorf("upload: bad response: %q", bytes.TrimSpace(respBody))
|
return fmt.Errorf("upload: bad response: %q", bytes.TrimSpace(respBody))
|
||||||
}
|
}
|
||||||
return up.o.checkUploadResponse(up.ctx, &finish)
|
return up.o.checkUploadResponse(up.ctx, &finish)
|
||||||
}
|
}
|
||||||
@@ -240,7 +239,7 @@ outer:
|
|||||||
|
|
||||||
// check size read is correct
|
// check size read is correct
|
||||||
if eof && err == nil && up.size >= 0 && up.size != offset {
|
if eof && err == nil && up.size >= 0 && up.size != offset {
|
||||||
err = errors.Errorf("upload: short read: read %d bytes expected %d", up.size, offset)
|
err = fmt.Errorf("upload: short read: read %d bytes expected %d", up.size, offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
// read any errors
|
// read any errors
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package sia
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -11,18 +12,16 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs/config"
|
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/sia/api"
|
"github.com/rclone/rclone/backend/sia/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
)
|
)
|
||||||
@@ -460,7 +459,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if opt.APIPassword != "" {
|
if opt.APIPassword != "" {
|
||||||
opt.APIPassword, err = obscure.Reveal(opt.APIPassword)
|
opt.APIPassword, err = obscure.Reveal(opt.APIPassword)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't decrypt API password")
|
return nil, fmt.Errorf("couldn't decrypt API password: %w", err)
|
||||||
}
|
}
|
||||||
f.srv.SetUserPass("", opt.APIPassword)
|
f.srv.SetUserPass("", opt.APIPassword)
|
||||||
}
|
}
|
||||||
@@ -474,7 +473,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
_, err := f.NewObject(ctx, remote)
|
_, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
if errors.Is(err, fs.ErrorObjectNotFound) || errors.Is(err, fs.ErrorNotAFile) {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
f.root = root
|
f.root = root
|
||||||
return f, nil
|
return f, nil
|
||||||
@@ -493,7 +492,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
func errorHandler(resp *http.Response) error {
|
func errorHandler(resp *http.Response) error {
|
||||||
body, err := rest.ReadBody(resp)
|
body, err := rest.ReadBody(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "error when trying to read error body")
|
return fmt.Errorf("error when trying to read error body: %w", err)
|
||||||
}
|
}
|
||||||
// Decode error response
|
// Decode error response
|
||||||
errResponse := new(api.Error)
|
errResponse := new(api.Error)
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ To work around this we use the remote "TestSugarSync:Test" to test with.
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -25,7 +26,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/sugarsync/api"
|
"github.com/rclone/rclone/backend/sugarsync/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -79,7 +79,7 @@ func init() {
|
|||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to read options")
|
return nil, fmt.Errorf("failed to read options: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch config.State {
|
switch config.State {
|
||||||
@@ -124,7 +124,7 @@ func init() {
|
|||||||
// return shouldRetry(ctx, resp, err)
|
// return shouldRetry(ctx, resp, err)
|
||||||
//})
|
//})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to get token")
|
return nil, fmt.Errorf("failed to get token: %w", err)
|
||||||
}
|
}
|
||||||
opt.RefreshToken = resp.Header.Get("Location")
|
opt.RefreshToken = resp.Header.Get("Location")
|
||||||
m.Set("refresh_token", opt.RefreshToken)
|
m.Set("refresh_token", opt.RefreshToken)
|
||||||
@@ -309,7 +309,7 @@ func (f *Fs) readMetaDataForID(ctx context.Context, ID string) (info *api.File,
|
|||||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, "failed to get authorization")
|
return nil, fmt.Errorf("failed to get authorization: %w", err)
|
||||||
}
|
}
|
||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
@@ -343,7 +343,7 @@ func (f *Fs) getAuthToken(ctx context.Context) error {
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to get authorization")
|
return fmt.Errorf("failed to get authorization: %w", err)
|
||||||
}
|
}
|
||||||
f.opt.Authorization = resp.Header.Get("Location")
|
f.opt.Authorization = resp.Header.Get("Location")
|
||||||
f.authExpiry = authResponse.Expiration
|
f.authExpiry = authResponse.Expiration
|
||||||
@@ -391,7 +391,7 @@ func (f *Fs) getUser(ctx context.Context) (user *api.User, err error) {
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to get user")
|
return nil, fmt.Errorf("failed to get user: %w", err)
|
||||||
}
|
}
|
||||||
return user, nil
|
return user, nil
|
||||||
}
|
}
|
||||||
@@ -445,7 +445,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if strings.HasSuffix(f.opt.RootID, "/contents") {
|
if strings.HasSuffix(f.opt.RootID, "/contents") {
|
||||||
f.opt.RootID = f.opt.RootID[:len(f.opt.RootID)-9]
|
f.opt.RootID = f.opt.RootID[:len(f.opt.RootID)-9]
|
||||||
} else {
|
} else {
|
||||||
return nil, errors.Errorf("unexpected rootID %q", f.opt.RootID)
|
return nil, fmt.Errorf("unexpected rootID %q", f.opt.RootID)
|
||||||
}
|
}
|
||||||
// Cache the results
|
// Cache the results
|
||||||
f.m.Set("root_id", f.opt.RootID)
|
f.m.Set("root_id", f.opt.RootID)
|
||||||
@@ -497,13 +497,13 @@ var findError = regexp.MustCompile(`<h3>(.*?)</h3>`)
|
|||||||
func errorHandler(resp *http.Response) (err error) {
|
func errorHandler(resp *http.Response) (err error) {
|
||||||
body, err := rest.ReadBody(resp)
|
body, err := rest.ReadBody(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "error reading error out of body")
|
return fmt.Errorf("error reading error out of body: %w", err)
|
||||||
}
|
}
|
||||||
match := findError.FindSubmatch(body)
|
match := findError.FindSubmatch(body)
|
||||||
if match == nil || len(match) < 2 || len(match[1]) == 0 {
|
if match == nil || len(match) < 2 || len(match[1]) == 0 {
|
||||||
return errors.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
|
return fmt.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
|
||||||
}
|
}
|
||||||
return errors.Errorf("HTTP error %v (%v): %s", resp.StatusCode, resp.Status, match[1])
|
return fmt.Errorf("HTTP error %v (%v): %s", resp.StatusCode, resp.Status, match[1])
|
||||||
}
|
}
|
||||||
|
|
||||||
// rootSlash returns root with a slash on if it is empty, otherwise empty string
|
// rootSlash returns root with a slash on if it is empty, otherwise empty string
|
||||||
@@ -596,7 +596,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
if !found {
|
if !found {
|
||||||
return "", errors.Errorf("couldn't find ID for newly created directory %q", leaf)
|
return "", fmt.Errorf("couldn't find ID for newly created directory %q", leaf)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -636,7 +636,7 @@ OUTER:
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return found, errors.Wrap(err, "couldn't list files")
|
return found, fmt.Errorf("couldn't list files: %w", err)
|
||||||
}
|
}
|
||||||
if fileFn != nil {
|
if fileFn != nil {
|
||||||
for i := range result.Files {
|
for i := range result.Files {
|
||||||
@@ -873,7 +873,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
srcPath := srcObj.fs.rootSlash() + srcObj.remote
|
srcPath := srcObj.fs.rootSlash() + srcObj.remote
|
||||||
dstPath := f.rootSlash() + remote
|
dstPath := f.rootSlash() + remote
|
||||||
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
||||||
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
@@ -1247,7 +1247,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
if o.id == "" {
|
if o.id == "" {
|
||||||
o.id, err = o.fs.createFile(ctx, directoryID, leaf, fs.MimeType(ctx, src))
|
o.id, err = o.fs.createFile(ctx, directoryID, leaf, fs.MimeType(ctx, src))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to create file")
|
return fmt.Errorf("failed to create file: %w", err)
|
||||||
}
|
}
|
||||||
if o.id == "" {
|
if o.id == "" {
|
||||||
return errors.New("failed to create file: no ID")
|
return errors.New("failed to create file: no ID")
|
||||||
@@ -1280,7 +1280,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to upload file")
|
return fmt.Errorf("failed to upload file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
o.hasMetaData = false
|
o.hasMetaData = false
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/url"
|
"net/url"
|
||||||
@@ -15,7 +16,6 @@ import (
|
|||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/ncw/swift/v2"
|
"github.com/ncw/swift/v2"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
@@ -381,7 +381,7 @@ func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Con
|
|||||||
if opt.EnvAuth {
|
if opt.EnvAuth {
|
||||||
err := c.ApplyEnvironment()
|
err := c.ApplyEnvironment()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to read environment variables")
|
return nil, fmt.Errorf("failed to read environment variables: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
StorageUrl, AuthToken := c.StorageUrl, c.AuthToken // nolint
|
StorageUrl, AuthToken := c.StorageUrl, c.AuthToken // nolint
|
||||||
@@ -423,7 +423,7 @@ func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Con
|
|||||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||||
const minChunkSize = fs.SizeSuffixBase
|
const minChunkSize = fs.SizeSuffixBase
|
||||||
if cs < minChunkSize {
|
if cs < minChunkSize {
|
||||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -499,7 +499,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
err = checkUploadChunkSize(opt.ChunkSize)
|
err = checkUploadChunkSize(opt.ChunkSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "swift: chunk size")
|
return nil, fmt.Errorf("swift: chunk size: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := swiftConnection(ctx, opt, name)
|
c, err := swiftConnection(ctx, opt, name)
|
||||||
@@ -670,7 +670,7 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "container listing failed")
|
return nil, fmt.Errorf("container listing failed: %w", err)
|
||||||
}
|
}
|
||||||
for _, container := range containers {
|
for _, container := range containers {
|
||||||
f.cache.MarkOK(container.Name)
|
f.cache.MarkOK(container.Name)
|
||||||
@@ -762,7 +762,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "container listing failed")
|
return nil, fmt.Errorf("container listing failed: %w", err)
|
||||||
}
|
}
|
||||||
var total, objects int64
|
var total, objects int64
|
||||||
for _, c := range containers {
|
for _, c := range containers {
|
||||||
|
|||||||
@@ -6,13 +6,13 @@ package tardigrade
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
@@ -64,12 +64,12 @@ func init() {
|
|||||||
|
|
||||||
access, err := uplink.RequestAccessWithPassphrase(context.TODO(), satellite, apiKey, passphrase)
|
access, err := uplink.RequestAccessWithPassphrase(context.TODO(), satellite, apiKey, passphrase)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't create access grant")
|
return nil, fmt.Errorf("couldn't create access grant: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
serializedAccess, err := access.Serialize()
|
serializedAccess, err := access.Serialize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't serialize access grant")
|
return nil, fmt.Errorf("couldn't serialize access grant: %w", err)
|
||||||
}
|
}
|
||||||
m.Set("satellite_address", satellite)
|
m.Set("satellite_address", satellite)
|
||||||
m.Set("access_grant", serializedAccess)
|
m.Set("access_grant", serializedAccess)
|
||||||
@@ -78,7 +78,7 @@ func init() {
|
|||||||
config.FileDeleteKey(name, "api_key")
|
config.FileDeleteKey(name, "api_key")
|
||||||
config.FileDeleteKey(name, "passphrase")
|
config.FileDeleteKey(name, "passphrase")
|
||||||
} else {
|
} else {
|
||||||
return nil, errors.Errorf("invalid provider type: %s", provider)
|
return nil, fmt.Errorf("invalid provider type: %s", provider)
|
||||||
}
|
}
|
||||||
return nil, nil
|
return nil, nil
|
||||||
},
|
},
|
||||||
@@ -188,24 +188,24 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
|
|||||||
if f.opts.Access != "" {
|
if f.opts.Access != "" {
|
||||||
access, err = uplink.ParseAccess(f.opts.Access)
|
access, err = uplink.ParseAccess(f.opts.Access)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "tardigrade: access")
|
return nil, fmt.Errorf("tardigrade: access: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if access == nil && f.opts.SatelliteAddress != "" && f.opts.APIKey != "" && f.opts.Passphrase != "" {
|
if access == nil && f.opts.SatelliteAddress != "" && f.opts.APIKey != "" && f.opts.Passphrase != "" {
|
||||||
access, err = uplink.RequestAccessWithPassphrase(ctx, f.opts.SatelliteAddress, f.opts.APIKey, f.opts.Passphrase)
|
access, err = uplink.RequestAccessWithPassphrase(ctx, f.opts.SatelliteAddress, f.opts.APIKey, f.opts.Passphrase)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "tardigrade: access")
|
return nil, fmt.Errorf("tardigrade: access: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
serializedAccess, err := access.Serialize()
|
serializedAccess, err := access.Serialize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "tardigrade: access")
|
return nil, fmt.Errorf("tardigrade: access: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = config.SetValueAndSave(f.name, "access_grant", serializedAccess)
|
err = config.SetValueAndSave(f.name, "access_grant", serializedAccess)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "tardigrade: access")
|
return nil, fmt.Errorf("tardigrade: access: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -237,7 +237,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
|
|||||||
if bucketName != "" && bucketPath != "" {
|
if bucketName != "" && bucketPath != "" {
|
||||||
_, err = project.StatBucket(ctx, bucketName)
|
_, err = project.StatBucket(ctx, bucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return f, errors.Wrap(err, "tardigrade: bucket")
|
return f, fmt.Errorf("tardigrade: bucket: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
object, err := project.StatObject(ctx, bucketName, bucketPath)
|
object, err := project.StatObject(ctx, bucketName, bucketPath)
|
||||||
@@ -274,7 +274,7 @@ func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
|
|||||||
|
|
||||||
project, err = cfg.OpenProject(ctx, f.access)
|
project, err = cfg.OpenProject(ctx, f.access)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "tardigrade: project")
|
return nil, fmt.Errorf("tardigrade: project: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -5,11 +5,11 @@ package tardigrade
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/bucket"
|
"github.com/rclone/rclone/lib/bucket"
|
||||||
|
|||||||
@@ -2,11 +2,11 @@ package union
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/union/upstream"
|
"github.com/rclone/rclone/backend/union/upstream"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
)
|
)
|
||||||
@@ -82,7 +82,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
multithread(len(entries), func(i int) {
|
multithread(len(entries), func(i int) {
|
||||||
if o, ok := entries[i].(*upstream.Object); ok {
|
if o, ok := entries[i].(*upstream.Object); ok {
|
||||||
err := o.Update(ctx, readers[i], src, options...)
|
err := o.Update(ctx, readers[i], src, options...)
|
||||||
errs[i] = errors.Wrap(err, o.UpstreamFs().Name())
|
if err != nil {
|
||||||
|
errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
errs[i] = fs.ErrorNotAFile
|
errs[i] = fs.ErrorNotAFile
|
||||||
}
|
}
|
||||||
@@ -101,7 +103,9 @@ func (o *Object) Remove(ctx context.Context) error {
|
|||||||
multithread(len(entries), func(i int) {
|
multithread(len(entries), func(i int) {
|
||||||
if o, ok := entries[i].(*upstream.Object); ok {
|
if o, ok := entries[i].(*upstream.Object); ok {
|
||||||
err := o.Remove(ctx)
|
err := o.Remove(ctx)
|
||||||
errs[i] = errors.Wrap(err, o.UpstreamFs().Name())
|
if err != nil {
|
||||||
|
errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
errs[i] = fs.ErrorNotAFile
|
errs[i] = fs.ErrorNotAFile
|
||||||
}
|
}
|
||||||
@@ -120,7 +124,9 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
|
|||||||
multithread(len(entries), func(i int) {
|
multithread(len(entries), func(i int) {
|
||||||
if o, ok := entries[i].(*upstream.Object); ok {
|
if o, ok := entries[i].(*upstream.Object); ok {
|
||||||
err := o.SetModTime(ctx, t)
|
err := o.SetModTime(ctx, t)
|
||||||
errs[i] = errors.Wrap(err, o.UpstreamFs().Name())
|
if err != nil {
|
||||||
|
errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
errs[i] = fs.ErrorNotAFile
|
errs[i] = fs.ErrorNotAFile
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ func (p *EpLus) lus(upstreams []*upstream.Fs) (*upstream.Fs, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *EpLus) lusEntries(entries []upstream.Entry) (upstream.Entry, error) {
|
func (p *EpLus) lusEntries(entries []upstream.Entry) (upstream.Entry, error) {
|
||||||
var minUsedSpace int64
|
var minUsedSpace int64 = math.MaxInt64
|
||||||
var lusEntry upstream.Entry
|
var lusEntry upstream.Entry
|
||||||
for _, e := range entries {
|
for _, e := range entries {
|
||||||
space, err := e.UpstreamFs().GetFreeSpace()
|
space, err := e.UpstreamFs().GetFreeSpace()
|
||||||
|
|||||||
@@ -2,12 +2,12 @@ package policy
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/union/upstream"
|
"github.com/rclone/rclone/backend/union/upstream"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
)
|
)
|
||||||
@@ -44,7 +44,7 @@ func registerPolicy(name string, p Policy) {
|
|||||||
func Get(name string) (Policy, error) {
|
func Get(name string) (Policy, error) {
|
||||||
p, ok := policies[strings.ToLower(name)]
|
p, ok := policies[strings.ToLower(name)]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.Errorf("didn't find policy called %q", name)
|
return nil, fmt.Errorf("didn't find policy called %q", name)
|
||||||
}
|
}
|
||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package union
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
@@ -11,7 +12,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/union/policy"
|
"github.com/rclone/rclone/backend/union/policy"
|
||||||
"github.com/rclone/rclone/backend/union/upstream"
|
"github.com/rclone/rclone/backend/union/upstream"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
@@ -99,7 +99,7 @@ func (f *Fs) wrapEntries(entries ...upstream.Entry) (entry, error) {
|
|||||||
cd: entries,
|
cd: entries,
|
||||||
}, nil
|
}, nil
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("unknown object type %T", e)
|
return nil, fmt.Errorf("unknown object type %T", e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -132,7 +132,9 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
errs := Errors(make([]error, len(upstreams)))
|
errs := Errors(make([]error, len(upstreams)))
|
||||||
multithread(len(upstreams), func(i int) {
|
multithread(len(upstreams), func(i int) {
|
||||||
err := upstreams[i].Rmdir(ctx, dir)
|
err := upstreams[i].Rmdir(ctx, dir)
|
||||||
errs[i] = errors.Wrap(err, upstreams[i].Name())
|
if err != nil {
|
||||||
|
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
return errs.Err()
|
return errs.Err()
|
||||||
}
|
}
|
||||||
@@ -162,7 +164,9 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
errs := Errors(make([]error, len(upstreams)))
|
errs := Errors(make([]error, len(upstreams)))
|
||||||
multithread(len(upstreams), func(i int) {
|
multithread(len(upstreams), func(i int) {
|
||||||
err := upstreams[i].Mkdir(ctx, dir)
|
err := upstreams[i].Mkdir(ctx, dir)
|
||||||
errs[i] = errors.Wrap(err, upstreams[i].Name())
|
if err != nil {
|
||||||
|
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
return errs.Err()
|
return errs.Err()
|
||||||
}
|
}
|
||||||
@@ -186,10 +190,12 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
errs := Errors(make([]error, len(upstreams)))
|
errs := Errors(make([]error, len(upstreams)))
|
||||||
multithread(len(upstreams), func(i int) {
|
multithread(len(upstreams), func(i int) {
|
||||||
err := upstreams[i].Features().Purge(ctx, dir)
|
err := upstreams[i].Features().Purge(ctx, dir)
|
||||||
if errors.Cause(err) == fs.ErrorDirNotFound {
|
if errors.Is(err, fs.ErrorDirNotFound) {
|
||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
errs[i] = errors.Wrap(err, upstreams[i].Name())
|
if err != nil {
|
||||||
|
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
return errs.Err()
|
return errs.Err()
|
||||||
}
|
}
|
||||||
@@ -264,7 +270,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
su := entries[i].UpstreamFs()
|
su := entries[i].UpstreamFs()
|
||||||
o, ok := entries[i].(*upstream.Object)
|
o, ok := entries[i].(*upstream.Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
errs[i] = errors.Wrap(fs.ErrorNotAFile, su.Name())
|
errs[i] = fmt.Errorf("%s: %w", su.Name(), fs.ErrorNotAFile)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var du *upstream.Fs
|
var du *upstream.Fs
|
||||||
@@ -274,7 +280,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if du == nil {
|
if du == nil {
|
||||||
errs[i] = errors.Wrap(fs.ErrorCantMove, su.Name()+":"+remote)
|
errs[i] = fmt.Errorf("%s: %s: %w", su.Name(), remote, fs.ErrorCantMove)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
srcObj := o.UnWrap()
|
srcObj := o.UnWrap()
|
||||||
@@ -285,8 +291,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
// Do the Move or Copy
|
// Do the Move or Copy
|
||||||
dstObj, err := do(ctx, srcObj, remote)
|
dstObj, err := do(ctx, srcObj, remote)
|
||||||
if err != nil || dstObj == nil {
|
if err != nil {
|
||||||
errs[i] = errors.Wrap(err, su.Name())
|
errs[i] = fmt.Errorf("%s: %w", su.Name(), err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if dstObj == nil {
|
||||||
|
errs[i] = fmt.Errorf("%s: destination object not found", su.Name())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
objs[i] = du.WrapObject(dstObj)
|
objs[i] = du.WrapObject(dstObj)
|
||||||
@@ -294,7 +304,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
if duFeatures.Move == nil {
|
if duFeatures.Move == nil {
|
||||||
err = srcObj.Remove(ctx)
|
err = srcObj.Remove(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs[i] = errors.Wrap(err, su.Name())
|
errs[i] = fmt.Errorf("%s: %w", su.Name(), err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -345,18 +355,20 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if du == nil {
|
if du == nil {
|
||||||
errs[i] = errors.Wrap(fs.ErrorCantDirMove, su.Name()+":"+su.Root())
|
errs[i] = fmt.Errorf("%s: %s: %w", su.Name(), su.Root(), fs.ErrorCantDirMove)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
err := du.Features().DirMove(ctx, su.Fs, srcRemote, dstRemote)
|
err := du.Features().DirMove(ctx, su.Fs, srcRemote, dstRemote)
|
||||||
errs[i] = errors.Wrap(err, du.Name()+":"+du.Root())
|
if err != nil {
|
||||||
|
errs[i] = fmt.Errorf("%s: %w", du.Name()+":"+du.Root(), err)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
errs = errs.FilterNil()
|
errs = errs.FilterNil()
|
||||||
if len(errs) == 0 {
|
if len(errs) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
for _, e := range errs {
|
for _, e := range errs {
|
||||||
if errors.Cause(e) != fs.ErrorDirExists {
|
if !errors.Is(e, fs.ErrorDirExists) {
|
||||||
return errs
|
return errs
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -477,7 +489,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
|
|||||||
o, err = u.Put(ctx, readers[i], src, options...)
|
o, err = u.Put(ctx, readers[i], src, options...)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs[i] = errors.Wrap(err, u.Name())
|
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
objs[i] = u.WrapObject(o)
|
objs[i] = u.WrapObject(o)
|
||||||
@@ -537,7 +549,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
}
|
}
|
||||||
for _, u := range f.upstreams {
|
for _, u := range f.upstreams {
|
||||||
usg, err := u.About(ctx)
|
usg, err := u.About(ctx)
|
||||||
if errors.Cause(err) == fs.ErrorDirNotFound {
|
if errors.Is(err, fs.ErrorDirNotFound) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -593,7 +605,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
u := f.upstreams[i]
|
u := f.upstreams[i]
|
||||||
entries, err := u.List(ctx, dir)
|
entries, err := u.List(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs[i] = errors.Wrap(err, u.Name())
|
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
uEntries := make([]upstream.Entry, len(entries))
|
uEntries := make([]upstream.Entry, len(entries))
|
||||||
@@ -604,7 +616,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
})
|
})
|
||||||
if len(errs) == len(errs.FilterNil()) {
|
if len(errs) == len(errs.FilterNil()) {
|
||||||
errs = errs.Map(func(e error) error {
|
errs = errs.Map(func(e error) error {
|
||||||
if errors.Cause(e) == fs.ErrorDirNotFound {
|
if errors.Is(e, fs.ErrorDirNotFound) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return e
|
return e
|
||||||
@@ -657,13 +669,13 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
err = walk.ListR(ctx, u, dir, true, -1, walk.ListAll, callback)
|
err = walk.ListR(ctx, u, dir, true, -1, walk.ListAll, callback)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs[i] = errors.Wrap(err, u.Name())
|
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
if len(errs) == len(errs.FilterNil()) {
|
if len(errs) == len(errs.FilterNil()) {
|
||||||
errs = errs.Map(func(e error) error {
|
errs = errs.Map(func(e error) error {
|
||||||
if errors.Cause(e) == fs.ErrorDirNotFound {
|
if errors.Is(e, fs.ErrorDirNotFound) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return e
|
return e
|
||||||
@@ -688,7 +700,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
u := f.upstreams[i]
|
u := f.upstreams[i]
|
||||||
o, err := u.NewObject(ctx, remote)
|
o, err := u.NewObject(ctx, remote)
|
||||||
if err != nil && err != fs.ErrorObjectNotFound {
|
if err != nil && err != fs.ErrorObjectNotFound {
|
||||||
errs[i] = errors.Wrap(err, u.Name())
|
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
objs[i] = u.WrapObject(o)
|
objs[i] = u.WrapObject(o)
|
||||||
@@ -777,7 +789,9 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
|||||||
u := f.upstreams[i]
|
u := f.upstreams[i]
|
||||||
if do := u.Features().Shutdown; do != nil {
|
if do := u.Features().Shutdown; do != nil {
|
||||||
err := do(ctx)
|
err := do(ctx)
|
||||||
errs[i] = errors.Wrap(err, u.Name())
|
if err != nil {
|
||||||
|
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
return errs.Err()
|
return errs.Err()
|
||||||
|
|||||||
@@ -42,6 +42,7 @@ func TestStandard(t *testing.T) {
|
|||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -64,6 +65,7 @@ func TestRO(t *testing.T) {
|
|||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -86,6 +88,7 @@ func TestNC(t *testing.T) {
|
|||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -108,6 +111,7 @@ func TestPolicy1(t *testing.T) {
|
|||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -130,6 +134,7 @@ func TestPolicy2(t *testing.T) {
|
|||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -152,5 +157,6 @@ func TestPolicy3(t *testing.T) {
|
|||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,8 @@ package upstream
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"path"
|
"path"
|
||||||
@@ -11,7 +13,6 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/cache"
|
"github.com/rclone/rclone/fs/cache"
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
@@ -133,7 +134,7 @@ func (f *Fs) WrapEntry(e fs.DirEntry) (Entry, error) {
|
|||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
return f.WrapDirectory(e.(fs.Directory)), nil
|
return f.WrapDirectory(e.(fs.Directory)), nil
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("unknown object type %T", e)
|
return nil, fmt.Errorf("unknown object type %T", e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -335,7 +336,7 @@ func (f *Fs) updateUsageCore(lock bool) error {
|
|||||||
usage, err := f.RootFs.Features().About(ctx)
|
usage, err := f.RootFs.Features().About(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.cacheUpdate = false
|
f.cacheUpdate = false
|
||||||
if errors.Cause(err) == fs.ErrorDirNotFound {
|
if errors.Is(err, fs.ErrorDirNotFound) {
|
||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package uptobox
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@@ -14,7 +15,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/uptobox/api"
|
"github.com/rclone/rclone/backend/uptobox/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -408,7 +408,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, filename
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't upload file")
|
return nil, fmt.Errorf("couldn't upload file: %w", err)
|
||||||
}
|
}
|
||||||
return &ul, nil
|
return &ul, nil
|
||||||
}
|
}
|
||||||
@@ -438,10 +438,10 @@ func (f *Fs) move(ctx context.Context, dstPath string, fileID string) (err error
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't move file")
|
return fmt.Errorf("couldn't move file: %w", err)
|
||||||
}
|
}
|
||||||
if info.StatusCode != 0 {
|
if info.StatusCode != 0 {
|
||||||
return errors.Errorf("move: api error: %d - %s", info.StatusCode, info.Message)
|
return fmt.Errorf("move: api error: %d - %s", info.StatusCode, info.Message)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -460,10 +460,10 @@ func (f *Fs) updateFileInformation(ctx context.Context, update *api.UpdateFileIn
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't update file info")
|
return fmt.Errorf("couldn't update file info: %w", err)
|
||||||
}
|
}
|
||||||
if info.StatusCode != 0 {
|
if info.StatusCode != 0 {
|
||||||
return errors.Errorf("updateFileInfo: api error: %d - %s", info.StatusCode, info.Message)
|
return fmt.Errorf("updateFileInfo: api error: %d - %s", info.StatusCode, info.Message)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -493,7 +493,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if info.StatusCode != 0 {
|
if info.StatusCode != 0 {
|
||||||
return nil, errors.Errorf("putUnchecked: api error: %d - %s", info.StatusCode, info.Message)
|
return nil, fmt.Errorf("putUnchecked: api error: %d - %s", info.StatusCode, info.Message)
|
||||||
}
|
}
|
||||||
// we need to have a safe name for the upload to work
|
// we need to have a safe name for the upload to work
|
||||||
tmpName := "rcloneTemp" + random.String(8)
|
tmpName := "rcloneTemp" + random.String(8)
|
||||||
@@ -681,7 +681,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
if needMove {
|
if needMove {
|
||||||
err := f.mkDirs(ctx, strings.Trim(dstBase, "/"))
|
err := f.mkDirs(ctx, strings.Trim(dstBase, "/"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "move: failed to make destination dirs")
|
return nil, fmt.Errorf("move: failed to make destination dirs: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = f.move(ctx, dstBase, srcObj.code)
|
err = f.move(ctx, dstBase, srcObj.code)
|
||||||
@@ -694,7 +694,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
if needRename {
|
if needRename {
|
||||||
err := f.updateFileInformation(ctx, &api.UpdateFileInformation{Token: f.opt.AccessToken, FileCode: srcObj.code, NewName: f.opt.Enc.FromStandardName(dstLeaf)})
|
err := f.updateFileInformation(ctx, &api.UpdateFileInformation{Token: f.opt.AccessToken, FileCode: srcObj.code, NewName: f.opt.Enc.FromStandardName(dstLeaf)})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "move: failed final rename")
|
return nil, fmt.Errorf("move: failed final rename: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -751,7 +751,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
srcPath := srcFs.dirPath(srcRemote)
|
srcPath := srcFs.dirPath(srcRemote)
|
||||||
srcInfo, err := f.readMetaDataForPath(ctx, srcPath, &api.MetadataRequestOptions{Limit: 1})
|
srcInfo, err := f.readMetaDataForPath(ctx, srcPath, &api.MetadataRequestOptions{Limit: 1})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "dirmove: source not found")
|
return fmt.Errorf("dirmove: source not found: %w", err)
|
||||||
}
|
}
|
||||||
// check if the destination allready exists
|
// check if the destination allready exists
|
||||||
dstPath := f.dirPath(dstRemote)
|
dstPath := f.dirPath(dstRemote)
|
||||||
@@ -764,13 +764,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
dstBase, dstName := f.splitPathFull(dstRemote)
|
dstBase, dstName := f.splitPathFull(dstRemote)
|
||||||
err = f.mkDirs(ctx, strings.Trim(dstBase, "/"))
|
err = f.mkDirs(ctx, strings.Trim(dstBase, "/"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "dirmove: failed to create dirs")
|
return fmt.Errorf("dirmove: failed to create dirs: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// find the destination parent dir
|
// find the destination parent dir
|
||||||
dstInfo, err = f.readMetaDataForPath(ctx, dstBase, &api.MetadataRequestOptions{Limit: 1})
|
dstInfo, err = f.readMetaDataForPath(ctx, dstBase, &api.MetadataRequestOptions{Limit: 1})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "dirmove: failed to read destination")
|
return fmt.Errorf("dirmove: failed to read destination: %w", err)
|
||||||
}
|
}
|
||||||
srcBase, srcName := srcFs.splitPathFull(srcRemote)
|
srcBase, srcName := srcFs.splitPathFull(srcRemote)
|
||||||
|
|
||||||
@@ -784,7 +784,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
tmpName := "rcloneTemp" + random.String(8)
|
tmpName := "rcloneTemp" + random.String(8)
|
||||||
err = f.renameDir(ctx, srcInfo.Data.CurrentFolder.FolderID, tmpName)
|
err = f.renameDir(ctx, srcInfo.Data.CurrentFolder.FolderID, tmpName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "dirmove: failed initial rename")
|
return fmt.Errorf("dirmove: failed initial rename: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -807,7 +807,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "dirmove: failed to move")
|
return fmt.Errorf("dirmove: failed to move: %w", err)
|
||||||
}
|
}
|
||||||
if apiErr.StatusCode != 0 {
|
if apiErr.StatusCode != 0 {
|
||||||
return apiErr
|
return apiErr
|
||||||
@@ -818,7 +818,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
if needRename {
|
if needRename {
|
||||||
err = f.renameDir(ctx, srcInfo.Data.CurrentFolder.FolderID, dstName)
|
err = f.renameDir(ctx, srcInfo.Data.CurrentFolder.FolderID, dstName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "dirmove: failed final rename")
|
return fmt.Errorf("dirmove: failed final rename: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -848,10 +848,10 @@ func (f *Fs) copy(ctx context.Context, dstPath string, fileID string) (err error
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't copy file")
|
return fmt.Errorf("couldn't copy file: %w", err)
|
||||||
}
|
}
|
||||||
if info.StatusCode != 0 {
|
if info.StatusCode != 0 {
|
||||||
return errors.Errorf("copy: api error: %d - %s", info.StatusCode, info.Message)
|
return fmt.Errorf("copy: api error: %d - %s", info.StatusCode, info.Message)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -871,7 +871,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
|
|
||||||
err := f.mkDirs(ctx, path.Join(f.root, dstBase))
|
err := f.mkDirs(ctx, path.Join(f.root, dstBase))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "copy: failed to make destination dirs")
|
return nil, fmt.Errorf("copy: failed to make destination dirs: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = f.copy(ctx, f.dirPath(dstBase), srcObj.code)
|
err = f.copy(ctx, f.dirPath(dstBase), srcObj.code)
|
||||||
@@ -881,13 +881,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
|
|
||||||
newObj, err := f.NewObject(ctx, path.Join(dstBase, srcLeaf))
|
newObj, err := f.NewObject(ctx, path.Join(dstBase, srcLeaf))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "copy: couldn't find copied object")
|
return nil, fmt.Errorf("copy: couldn't find copied object: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if needRename {
|
if needRename {
|
||||||
err := f.updateFileInformation(ctx, &api.UpdateFileInformation{Token: f.opt.AccessToken, FileCode: newObj.(*Object).code, NewName: f.opt.Enc.FromStandardName(dstLeaf)})
|
err := f.updateFileInformation(ctx, &api.UpdateFileInformation{Token: f.opt.AccessToken, FileCode: newObj.(*Object).code, NewName: f.opt.Enc.FromStandardName(dstLeaf)})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "copy: failed final rename")
|
return nil, fmt.Errorf("copy: failed final rename: %w", err)
|
||||||
}
|
}
|
||||||
newObj.(*Object).remote = remote
|
newObj.(*Object).remote = remote
|
||||||
}
|
}
|
||||||
@@ -970,7 +970,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "open: failed to get download link")
|
return nil, fmt.Errorf("open: failed to get download link: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fs.FixRangeOption(options, o.size)
|
fs.FixRangeOption(options, o.size)
|
||||||
@@ -1010,7 +1010,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
// delete duplicate object after successful upload
|
// delete duplicate object after successful upload
|
||||||
err = o.Remove(ctx)
|
err = o.Remove(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to remove old version")
|
return fmt.Errorf("failed to remove old version: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Replace guts of old object with new one
|
// Replace guts of old object with new one
|
||||||
@@ -1038,7 +1038,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if info.StatusCode != 0 {
|
if info.StatusCode != 0 {
|
||||||
return errors.Errorf("remove: api error: %d - %s", info.StatusCode, info.Message)
|
return fmt.Errorf("remove: api error: %d - %s", info.StatusCode, info.Message)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"golang.org/x/net/publicsuffix"
|
"golang.org/x/net/publicsuffix"
|
||||||
@@ -122,12 +121,12 @@ func (ca *CookieAuth) Cookies(ctx context.Context) (*CookieResponse, error) {
|
|||||||
func (ca *CookieAuth) getSPCookie(conf *SharepointSuccessResponse) (*CookieResponse, error) {
|
func (ca *CookieAuth) getSPCookie(conf *SharepointSuccessResponse) (*CookieResponse, error) {
|
||||||
spRoot, err := url.Parse(ca.endpoint)
|
spRoot, err := url.Parse(ca.endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error while constructing endpoint URL")
|
return nil, fmt.Errorf("error while constructing endpoint URL: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
u, err := url.Parse(spRoot.Scheme + "://" + spRoot.Host + "/_forms/default.aspx?wa=wsignin1.0")
|
u, err := url.Parse(spRoot.Scheme + "://" + spRoot.Host + "/_forms/default.aspx?wa=wsignin1.0")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error while constructing login URL")
|
return nil, fmt.Errorf("error while constructing login URL: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// To authenticate with davfs or anything else we need two cookies (rtFa and FedAuth)
|
// To authenticate with davfs or anything else we need two cookies (rtFa and FedAuth)
|
||||||
@@ -143,7 +142,7 @@ func (ca *CookieAuth) getSPCookie(conf *SharepointSuccessResponse) (*CookieRespo
|
|||||||
|
|
||||||
// Send the previously acquired Token as a Post parameter
|
// Send the previously acquired Token as a Post parameter
|
||||||
if _, err = client.Post(u.String(), "text/xml", strings.NewReader(conf.Body.Token)); err != nil {
|
if _, err = client.Post(u.String(), "text/xml", strings.NewReader(conf.Body.Token)); err != nil {
|
||||||
return nil, errors.Wrap(err, "Error while grabbing cookies from endpoint: %v")
|
return nil, fmt.Errorf("error while grabbing cookies from endpoint: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cookieResponse := CookieResponse{}
|
cookieResponse := CookieResponse{}
|
||||||
@@ -171,7 +170,7 @@ func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SharepointSuccessRe
|
|||||||
|
|
||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
if err := t.Execute(buf, reqData); err != nil {
|
if err := t.Execute(buf, reqData); err != nil {
|
||||||
return nil, errors.Wrap(err, "Error while filling auth token template")
|
return nil, fmt.Errorf("error while filling auth token template: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create and execute the first request which returns an auth token for the sharepoint service
|
// Create and execute the first request which returns an auth token for the sharepoint service
|
||||||
@@ -184,7 +183,7 @@ func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SharepointSuccessRe
|
|||||||
client := fshttp.NewClient(ctx)
|
client := fshttp.NewClient(ctx)
|
||||||
resp, err := client.Do(req)
|
resp, err := client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error while logging in to endpoint")
|
return nil, fmt.Errorf("error while logging in to endpoint: %w", err)
|
||||||
}
|
}
|
||||||
defer fs.CheckClose(resp.Body, &err)
|
defer fs.CheckClose(resp.Body, &err)
|
||||||
|
|
||||||
@@ -209,7 +208,7 @@ func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SharepointSuccessRe
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error while reading endpoint response")
|
return nil, fmt.Errorf("error while reading endpoint response: %w", err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -23,7 +24,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/webdav/api"
|
"github.com/rclone/rclone/backend/webdav/api"
|
||||||
"github.com/rclone/rclone/backend/webdav/odrvcookie"
|
"github.com/rclone/rclone/backend/webdav/odrvcookie"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
@@ -118,7 +118,7 @@ Use this to set additional HTTP headers for all transactions
|
|||||||
The input format is comma separated list of key,value pairs. Standard
|
The input format is comma separated list of key,value pairs. Standard
|
||||||
[CSV encoding](https://godoc.org/encoding/csv) may be used.
|
[CSV encoding](https://godoc.org/encoding/csv) may be used.
|
||||||
|
|
||||||
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
For example, to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||||
|
|
||||||
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
|
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
|
||||||
`,
|
`,
|
||||||
@@ -303,7 +303,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "read metadata failed")
|
return nil, fmt.Errorf("read metadata failed: %w", err)
|
||||||
}
|
}
|
||||||
if len(result.Responses) < 1 {
|
if len(result.Responses) < 1 {
|
||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
@@ -322,7 +322,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
|
|||||||
func errorHandler(resp *http.Response) error {
|
func errorHandler(resp *http.Response) error {
|
||||||
body, err := rest.ReadBody(resp)
|
body, err := rest.ReadBody(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "error when trying to read error from body")
|
return fmt.Errorf("error when trying to read error from body: %w", err)
|
||||||
}
|
}
|
||||||
// Decode error response
|
// Decode error response
|
||||||
errResponse := new(api.Error)
|
errResponse := new(api.Error)
|
||||||
@@ -387,7 +387,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
var err error
|
var err error
|
||||||
opt.Pass, err = obscure.Reveal(opt.Pass)
|
opt.Pass, err = obscure.Reveal(opt.Pass)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't decrypt password")
|
return nil, fmt.Errorf("couldn't decrypt password: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if opt.Vendor == "" {
|
if opt.Vendor == "" {
|
||||||
@@ -465,7 +465,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
_, err := f.NewObject(ctx, remote)
|
_, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorIsDir {
|
if errors.Is(err, fs.ErrorObjectNotFound) || errors.Is(err, fs.ErrorIsDir) {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
f.root = root
|
f.root = root
|
||||||
return f, nil
|
return f, nil
|
||||||
@@ -503,7 +503,7 @@ func (f *Fs) fetchBearerToken(cmd string) (string, error) {
|
|||||||
if stderrString == "" {
|
if stderrString == "" {
|
||||||
stderrString = stdoutString
|
stderrString = stdoutString
|
||||||
}
|
}
|
||||||
return "", errors.Wrapf(err, "failed to get bearer token using %q: %s", f.opt.BearerTokenCommand, stderrString)
|
return "", fmt.Errorf("failed to get bearer token using %q: %s: %w", f.opt.BearerTokenCommand, stderrString, err)
|
||||||
}
|
}
|
||||||
return stdoutString, nil
|
return stdoutString, nil
|
||||||
}
|
}
|
||||||
@@ -673,12 +673,12 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
|||||||
return found, fs.ErrorDirNotFound
|
return found, fs.ErrorDirNotFound
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return found, errors.Wrap(err, "couldn't list files")
|
return found, fmt.Errorf("couldn't list files: %w", err)
|
||||||
}
|
}
|
||||||
//fmt.Printf("result = %#v", &result)
|
//fmt.Printf("result = %#v", &result)
|
||||||
baseURL, err := rest.URLJoin(f.endpoint, opts.Path)
|
baseURL, err := rest.URLJoin(f.endpoint, opts.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Wrap(err, "couldn't join URL")
|
return false, fmt.Errorf("couldn't join URL: %w", err)
|
||||||
}
|
}
|
||||||
for i := range result.Responses {
|
for i := range result.Responses {
|
||||||
item := &result.Responses[i]
|
item := &result.Responses[i]
|
||||||
@@ -947,7 +947,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||||||
return f.shouldRetry(ctx, resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "rmdir failed")
|
return fmt.Errorf("rmdir failed: %w", err)
|
||||||
}
|
}
|
||||||
// FIXME parse Multistatus response
|
// FIXME parse Multistatus response
|
||||||
return nil
|
return nil
|
||||||
@@ -986,11 +986,11 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
|||||||
dstPath := f.filePath(remote)
|
dstPath := f.filePath(remote)
|
||||||
err := f.mkParentDir(ctx, dstPath)
|
err := f.mkParentDir(ctx, dstPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Copy mkParentDir failed")
|
return nil, fmt.Errorf("Copy mkParentDir failed: %w", err)
|
||||||
}
|
}
|
||||||
destinationURL, err := rest.URLJoin(f.endpoint, dstPath)
|
destinationURL, err := rest.URLJoin(f.endpoint, dstPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "copyOrMove couldn't join URL")
|
return nil, fmt.Errorf("copyOrMove couldn't join URL: %w", err)
|
||||||
}
|
}
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
@@ -1010,11 +1010,11 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
|||||||
return f.shouldRetry(ctx, resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Copy call failed")
|
return nil, fmt.Errorf("Copy call failed: %w", err)
|
||||||
}
|
}
|
||||||
dstObj, err := f.NewObject(ctx, remote)
|
dstObj, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Copy NewObject failed")
|
return nil, fmt.Errorf("Copy NewObject failed: %w", err)
|
||||||
}
|
}
|
||||||
return dstObj, nil
|
return dstObj, nil
|
||||||
}
|
}
|
||||||
@@ -1077,18 +1077,18 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
return fs.ErrorDirExists
|
return fs.ErrorDirExists
|
||||||
}
|
}
|
||||||
if err != fs.ErrorDirNotFound {
|
if err != fs.ErrorDirNotFound {
|
||||||
return errors.Wrap(err, "DirMove dirExists dst failed")
|
return fmt.Errorf("DirMove dirExists dst failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure the parent directory exists
|
// Make sure the parent directory exists
|
||||||
err = f.mkParentDir(ctx, dstPath)
|
err = f.mkParentDir(ctx, dstPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
return fmt.Errorf("DirMove mkParentDir dst failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
destinationURL, err := rest.URLJoin(f.endpoint, dstPath)
|
destinationURL, err := rest.URLJoin(f.endpoint, dstPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "DirMove couldn't join URL")
|
return fmt.Errorf("DirMove couldn't join URL: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
@@ -1106,7 +1106,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
return f.shouldRetry(ctx, resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "DirMove MOVE call failed")
|
return fmt.Errorf("DirMove MOVE call failed: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1148,7 +1148,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
return f.shouldRetry(ctx, resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "about call failed")
|
return nil, fmt.Errorf("about call failed: %w", err)
|
||||||
}
|
}
|
||||||
usage := &fs.Usage{}
|
usage := &fs.Usage{}
|
||||||
if i, err := strconv.ParseInt(q.Used, 10, 64); err == nil && i >= 0 {
|
if i, err := strconv.ParseInt(q.Used, 10, 64); err == nil && i >= 0 {
|
||||||
@@ -1289,7 +1289,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
err = o.fs.mkParentDir(ctx, o.filePath())
|
err = o.fs.mkParentDir(ctx, o.filePath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Update mkParentDir failed")
|
return fmt.Errorf("Update mkParentDir failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package yandex
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
@@ -13,7 +14,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/yandex/api"
|
"github.com/rclone/rclone/backend/yandex/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -249,7 +249,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
|
|
||||||
token, err := oauthutil.GetToken(name, m)
|
token, err := oauthutil.GetToken(name, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't read OAuth token")
|
return nil, fmt.Errorf("couldn't read OAuth token: %w", err)
|
||||||
}
|
}
|
||||||
if token.RefreshToken == "" {
|
if token.RefreshToken == "" {
|
||||||
return nil, errors.New("unable to get RefreshToken. If you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
|
return nil, errors.New("unable to get RefreshToken. If you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
|
||||||
@@ -258,13 +258,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
token.TokenType = "OAuth"
|
token.TokenType = "OAuth"
|
||||||
err = oauthutil.PutToken(name, m, token, false)
|
err = oauthutil.PutToken(name, m, token, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't save OAuth token")
|
return nil, fmt.Errorf("couldn't save OAuth token: %w", err)
|
||||||
}
|
}
|
||||||
log.Printf("Automatically upgraded OAuth config.")
|
log.Printf("Automatically upgraded OAuth config.")
|
||||||
}
|
}
|
||||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure Yandex")
|
return nil, fmt.Errorf("failed to configure Yandex: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
@@ -309,7 +309,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.Reso
|
|||||||
case "dir":
|
case "dir":
|
||||||
t, err := time.Parse(time.RFC3339Nano, object.Modified)
|
t, err := time.Parse(time.RFC3339Nano, object.Modified)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error parsing time in directory item")
|
return nil, fmt.Errorf("error parsing time in directory item: %w", err)
|
||||||
}
|
}
|
||||||
d := fs.NewDir(remote, t).SetSize(object.Size)
|
d := fs.NewDir(remote, t).SetSize(object.Size)
|
||||||
return d, nil
|
return d, nil
|
||||||
@@ -560,19 +560,19 @@ func (f *Fs) waitForJob(ctx context.Context, location string) (err error) {
|
|||||||
var status api.AsyncStatus
|
var status api.AsyncStatus
|
||||||
err = json.Unmarshal(body, &status)
|
err = json.Unmarshal(body, &status)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "async status result not JSON: %q", body)
|
return fmt.Errorf("async status result not JSON: %q: %w", body, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch status.Status {
|
switch status.Status {
|
||||||
case "failure":
|
case "failure":
|
||||||
return errors.Errorf("async operation returned %q", status.Status)
|
return fmt.Errorf("async operation returned %q", status.Status)
|
||||||
case "success":
|
case "success":
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
}
|
}
|
||||||
return errors.Errorf("async operation didn't complete after %v", f.ci.TimeoutOrInfinite())
|
return fmt.Errorf("async operation didn't complete after %v", f.ci.TimeoutOrInfinite())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) (err error) {
|
func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) (err error) {
|
||||||
@@ -607,7 +607,7 @@ func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) (err erro
|
|||||||
var info api.AsyncInfo
|
var info api.AsyncInfo
|
||||||
err = json.Unmarshal(body, &info)
|
err = json.Unmarshal(body, &info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "async info result not JSON: %q", body)
|
return fmt.Errorf("async info result not JSON: %q: %w", body, err)
|
||||||
}
|
}
|
||||||
return f.waitForJob(ctx, info.HRef)
|
return f.waitForJob(ctx, info.HRef)
|
||||||
}
|
}
|
||||||
@@ -623,7 +623,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||||||
//send request to get list of objects in this directory.
|
//send request to get list of objects in this directory.
|
||||||
info, err := f.readMetaDataForPath(ctx, root, &api.ResourceInfoRequestOptions{})
|
info, err := f.readMetaDataForPath(ctx, root, &api.ResourceInfoRequestOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "rmdir failed")
|
return fmt.Errorf("rmdir failed: %w", err)
|
||||||
}
|
}
|
||||||
if len(info.Embedded.Items) != 0 {
|
if len(info.Embedded.Items) != 0 {
|
||||||
return fs.ErrorDirectoryNotEmpty
|
return fs.ErrorDirectoryNotEmpty
|
||||||
@@ -683,7 +683,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dst string, overwrite
|
|||||||
var info api.AsyncInfo
|
var info api.AsyncInfo
|
||||||
err = json.Unmarshal(body, &info)
|
err = json.Unmarshal(body, &info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "async info result not JSON: %q", body)
|
return fmt.Errorf("async info result not JSON: %q: %w", body, err)
|
||||||
}
|
}
|
||||||
return f.waitForJob(ctx, info.HRef)
|
return f.waitForJob(ctx, info.HRef)
|
||||||
}
|
}
|
||||||
@@ -714,7 +714,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
err = f.copyOrMove(ctx, "copy", srcObj.filePath(), dstPath, false)
|
err = f.copyOrMove(ctx, "copy", srcObj.filePath(), dstPath, false)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't copy file")
|
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.NewObject(ctx, remote)
|
return f.NewObject(ctx, remote)
|
||||||
@@ -744,7 +744,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
err = f.copyOrMove(ctx, "move", srcObj.filePath(), dstPath, false)
|
err = f.copyOrMove(ctx, "move", srcObj.filePath(), dstPath, false)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't move file")
|
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.NewObject(ctx, remote)
|
return f.NewObject(ctx, remote)
|
||||||
@@ -795,7 +795,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
err = f.copyOrMove(ctx, "move", srcPath, dstPath, false)
|
err = f.copyOrMove(ctx, "move", srcPath, dstPath, false)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't move directory")
|
return fmt.Errorf("couldn't move directory: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -831,9 +831,9 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if unlink {
|
if unlink {
|
||||||
return "", errors.Wrap(err, "couldn't remove public link")
|
return "", fmt.Errorf("couldn't remove public link: %w", err)
|
||||||
}
|
}
|
||||||
return "", errors.Wrap(err, "couldn't create public link")
|
return "", fmt.Errorf("couldn't create public link: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := f.readMetaDataForPath(ctx, f.filePath(remote), &api.ResourceInfoRequestOptions{})
|
info, err := f.readMetaDataForPath(ctx, f.filePath(remote), &api.ResourceInfoRequestOptions{})
|
||||||
@@ -934,7 +934,7 @@ func (o *Object) setMetaData(info *api.ResourceInfoResponse) (err error) {
|
|||||||
}
|
}
|
||||||
t, err := time.Parse(time.RFC3339Nano, modTimeString)
|
t, err := time.Parse(time.RFC3339Nano, modTimeString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "failed to parse modtime from %q", modTimeString)
|
return fmt.Errorf("failed to parse modtime from %q: %w", modTimeString, err)
|
||||||
}
|
}
|
||||||
o.modTime = t
|
o.modTime = t
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ package zoho
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@@ -14,7 +15,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
@@ -81,7 +81,7 @@ func init() {
|
|||||||
getSrvs := func() (authSrv, apiSrv *rest.Client, err error) {
|
getSrvs := func() (authSrv, apiSrv *rest.Client, err error) {
|
||||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "failed to load oAuthClient")
|
return nil, nil, fmt.Errorf("failed to load oAuthClient: %w", err)
|
||||||
}
|
}
|
||||||
authSrv = rest.NewClient(oAuthClient).SetRoot(accountsURL)
|
authSrv = rest.NewClient(oAuthClient).SetRoot(accountsURL)
|
||||||
apiSrv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
apiSrv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||||
@@ -100,13 +100,13 @@ func init() {
|
|||||||
// it's own custom type
|
// it's own custom type
|
||||||
token, err := oauthutil.GetToken(name, m)
|
token, err := oauthutil.GetToken(name, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to read token")
|
return nil, fmt.Errorf("failed to read token: %w", err)
|
||||||
}
|
}
|
||||||
if token.TokenType != "Zoho-oauthtoken" {
|
if token.TokenType != "Zoho-oauthtoken" {
|
||||||
token.TokenType = "Zoho-oauthtoken"
|
token.TokenType = "Zoho-oauthtoken"
|
||||||
err = oauthutil.PutToken(name, m, token, false)
|
err = oauthutil.PutToken(name, m, token, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure token")
|
return nil, fmt.Errorf("failed to configure token: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -478,7 +478,7 @@ OUTER:
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return found, errors.Wrap(err, "couldn't list files")
|
return found, fmt.Errorf("couldn't list files: %w", err)
|
||||||
}
|
}
|
||||||
if len(result.Items) == 0 {
|
if len(result.Items) == 0 {
|
||||||
break
|
break
|
||||||
@@ -670,7 +670,7 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
|
|||||||
params.Set("override-name-exist", strconv.FormatBool(true))
|
params.Set("override-name-exist", strconv.FormatBool(true))
|
||||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
|
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to make multipart upload")
|
return nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
contentLength := overhead + size
|
contentLength := overhead + size
|
||||||
@@ -692,7 +692,7 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "upload error")
|
return nil, fmt.Errorf("upload error: %w", err)
|
||||||
}
|
}
|
||||||
if len(uploadResponse.Uploads) != 1 {
|
if len(uploadResponse.Uploads) != 1 {
|
||||||
return nil, errors.New("upload: invalid response")
|
return nil, errors.New("upload: invalid response")
|
||||||
@@ -774,7 +774,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) (err error) {
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "delete object failed")
|
return fmt.Errorf("delete object failed: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -801,7 +801,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||||||
|
|
||||||
err = f.deleteObject(ctx, rootID)
|
err = f.deleteObject(ctx, rootID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "rmdir failed")
|
return fmt.Errorf("rmdir failed: %w", err)
|
||||||
}
|
}
|
||||||
f.dirCache.FlushDir(dir)
|
f.dirCache.FlushDir(dir)
|
||||||
return nil
|
return nil
|
||||||
@@ -844,7 +844,7 @@ func (f *Fs) rename(ctx context.Context, id, name string) (item *api.Item, err e
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "rename failed")
|
return nil, fmt.Errorf("rename failed: %w", err)
|
||||||
}
|
}
|
||||||
return &result.Item, nil
|
return &result.Item, nil
|
||||||
}
|
}
|
||||||
@@ -897,7 +897,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't copy file")
|
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||||
}
|
}
|
||||||
// Server acts weird some times make sure we actually got
|
// Server acts weird some times make sure we actually got
|
||||||
// an item
|
// an item
|
||||||
@@ -911,7 +911,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
// the correct name after copy
|
// the correct name after copy
|
||||||
if f.opt.Enc.ToStandardName(result.Items[0].Attributes.Name) != leaf {
|
if f.opt.Enc.ToStandardName(result.Items[0].Attributes.Name) != leaf {
|
||||||
if err = dstObject.rename(ctx, leaf); err != nil {
|
if err = dstObject.rename(ctx, leaf); err != nil {
|
||||||
return nil, errors.Wrap(err, "copy: couldn't rename copied file")
|
return nil, fmt.Errorf("copy: couldn't rename copied file: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return dstObject, nil
|
return dstObject, nil
|
||||||
@@ -942,7 +942,7 @@ func (f *Fs) move(ctx context.Context, srcID, parentID string) (item *api.Item,
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "move failed")
|
return nil, fmt.Errorf("move failed: %w", err)
|
||||||
}
|
}
|
||||||
// Server acts weird some times make sure our array actually contains
|
// Server acts weird some times make sure our array actually contains
|
||||||
// a file
|
// a file
|
||||||
@@ -992,7 +992,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
if needRename && needMove {
|
if needRename && needMove {
|
||||||
tmpLeaf := "rcloneTemp" + random.String(8)
|
tmpLeaf := "rcloneTemp" + random.String(8)
|
||||||
if err = srcObj.rename(ctx, tmpLeaf); err != nil {
|
if err = srcObj.rename(ctx, tmpLeaf); err != nil {
|
||||||
return nil, errors.Wrap(err, "move: pre move rename failed")
|
return nil, fmt.Errorf("move: pre move rename failed: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1012,7 +1012,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
// rename the leaf to its final name
|
// rename the leaf to its final name
|
||||||
if needRename {
|
if needRename {
|
||||||
if err = dstObject.rename(ctx, dstLeaf); err != nil {
|
if err = dstObject.rename(ctx, dstLeaf); err != nil {
|
||||||
return nil, errors.Wrap(err, "move: couldn't rename moved file")
|
return nil, fmt.Errorf("move: couldn't rename moved file: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return dstObject, nil
|
return dstObject, nil
|
||||||
@@ -1046,7 +1046,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
// do the move
|
// do the move
|
||||||
_, err = f.move(ctx, srcID, dstDirectoryID)
|
_, err = f.move(ctx, srcID, dstDirectoryID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't dir move")
|
return fmt.Errorf("couldn't dir move: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Can't copy and change name in one step so we have to check if we have
|
// Can't copy and change name in one step so we have to check if we have
|
||||||
@@ -1054,7 +1054,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
if srcLeaf != dstLeaf {
|
if srcLeaf != dstLeaf {
|
||||||
_, err = f.rename(ctx, srcID, dstLeaf)
|
_, err = f.rename(ctx, srcID, dstLeaf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "dirmove: couldn't rename moved dir")
|
return fmt.Errorf("dirmove: couldn't rename moved dir: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
srcFs.dirCache.FlushDir(srcRemote)
|
srcFs.dirCache.FlushDir(srcRemote)
|
||||||
@@ -1261,7 +1261,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
// upload was successfull, need to delete old object before rename
|
// upload was successfull, need to delete old object before rename
|
||||||
if err = o.Remove(ctx); err != nil {
|
if err = o.Remove(ctx); err != nil {
|
||||||
return errors.Wrap(err, "failed to remove old object")
|
return fmt.Errorf("failed to remove old object: %w", err)
|
||||||
}
|
}
|
||||||
if err = o.setMetaData(info); err != nil {
|
if err = o.setMetaData(info); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ provides:
|
|||||||
maintainer: "Nick Craig-Wood <nick@craig-wood.com>"
|
maintainer: "Nick Craig-Wood <nick@craig-wood.com>"
|
||||||
description: |
|
description: |
|
||||||
Rclone - "rsync for cloud storage"
|
Rclone - "rsync for cloud storage"
|
||||||
is a command line program to sync files and directories to and
|
is a command-line program to sync files and directories to and
|
||||||
from most cloud providers. It can also mount, tree, ncdu and lots
|
from most cloud providers. It can also mount, tree, ncdu and lots
|
||||||
of other useful things.
|
of other useful things.
|
||||||
vendor: "rclone"
|
vendor: "rclone"
|
||||||
|
|||||||
0
bin/not-in-stable.go
Executable file → Normal file
0
bin/not-in-stable.go
Executable file → Normal file
@@ -3,10 +3,10 @@ package about
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/cmd"
|
"github.com/rclone/rclone/cmd"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config/flags"
|
"github.com/rclone/rclone/fs/config/flags"
|
||||||
@@ -76,7 +76,7 @@ Applying a ` + "`--full`" + ` flag to the command prints the bytes in full, e.g.
|
|||||||
Trashed: 104857602
|
Trashed: 104857602
|
||||||
Other: 8849156022
|
Other: 8849156022
|
||||||
|
|
||||||
A ` + "`--json`" + ` flag generates conveniently computer readable output, e.g.
|
A ` + "`--json`" + ` flag generates conveniently machine-readable output, e.g.
|
||||||
|
|
||||||
{
|
{
|
||||||
"total": 18253611008,
|
"total": 18253611008,
|
||||||
@@ -98,11 +98,11 @@ see complete list in [documentation](https://rclone.org/overview/#optional-featu
|
|||||||
cmd.Run(false, false, command, func() error {
|
cmd.Run(false, false, command, func() error {
|
||||||
doAbout := f.Features().About
|
doAbout := f.Features().About
|
||||||
if doAbout == nil {
|
if doAbout == nil {
|
||||||
return errors.Errorf("%v doesn't support about", f)
|
return fmt.Errorf("%v doesn't support about", f)
|
||||||
}
|
}
|
||||||
u, err := doAbout(context.Background())
|
u, err := doAbout(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "About call failed")
|
return fmt.Errorf("About call failed: %w", err)
|
||||||
}
|
}
|
||||||
if u == nil {
|
if u == nil {
|
||||||
return errors.New("nil usage returned")
|
return errors.New("nil usage returned")
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
_ "github.com/rclone/rclone/cmd/about"
|
_ "github.com/rclone/rclone/cmd/about"
|
||||||
_ "github.com/rclone/rclone/cmd/authorize"
|
_ "github.com/rclone/rclone/cmd/authorize"
|
||||||
_ "github.com/rclone/rclone/cmd/backend"
|
_ "github.com/rclone/rclone/cmd/backend"
|
||||||
|
_ "github.com/rclone/rclone/cmd/bisync"
|
||||||
_ "github.com/rclone/rclone/cmd/cachestats"
|
_ "github.com/rclone/rclone/cmd/cachestats"
|
||||||
_ "github.com/rclone/rclone/cmd/cat"
|
_ "github.com/rclone/rclone/cmd/cat"
|
||||||
_ "github.com/rclone/rclone/cmd/check"
|
_ "github.com/rclone/rclone/cmd/check"
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/cmd"
|
"github.com/rclone/rclone/cmd"
|
||||||
"github.com/rclone/rclone/cmd/rc"
|
"github.com/rclone/rclone/cmd/rc"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
@@ -30,9 +29,9 @@ func init() {
|
|||||||
|
|
||||||
var commandDefinition = &cobra.Command{
|
var commandDefinition = &cobra.Command{
|
||||||
Use: "backend <command> remote:path [opts] <args>",
|
Use: "backend <command> remote:path [opts] <args>",
|
||||||
Short: `Run a backend specific command.`,
|
Short: `Run a backend-specific command.`,
|
||||||
Long: `
|
Long: `
|
||||||
This runs a backend specific command. The commands themselves (except
|
This runs a backend-specific command. The commands themselves (except
|
||||||
for "help" and "features") are defined by the backends and you should
|
for "help" and "features") are defined by the backends and you should
|
||||||
see the backend docs for definitions.
|
see the backend docs for definitions.
|
||||||
|
|
||||||
@@ -88,14 +87,14 @@ Note to run these commands on a running backend then see
|
|||||||
default:
|
default:
|
||||||
doCommand := f.Features().Command
|
doCommand := f.Features().Command
|
||||||
if doCommand == nil {
|
if doCommand == nil {
|
||||||
return errors.Errorf("%v: doesn't support backend commands", f)
|
return fmt.Errorf("%v: doesn't support backend commands", f)
|
||||||
}
|
}
|
||||||
arg := args[2:]
|
arg := args[2:]
|
||||||
opt := rc.ParseOptions(options)
|
opt := rc.ParseOptions(options)
|
||||||
out, err = doCommand(context.Background(), name, arg, opt)
|
out, err = doCommand(context.Background(), name, arg, opt)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "command %q failed", name)
|
return fmt.Errorf("command %q failed: %w", name, err)
|
||||||
|
|
||||||
}
|
}
|
||||||
// Output the result
|
// Output the result
|
||||||
@@ -121,7 +120,7 @@ Note to run these commands on a running backend then see
|
|||||||
enc.SetIndent("", "\t")
|
enc.SetIndent("", "\t")
|
||||||
err = enc.Encode(out)
|
err = enc.Encode(out)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to write JSON")
|
return fmt.Errorf("failed to write JSON: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -135,7 +134,7 @@ func showHelp(fsInfo *fs.RegInfo) error {
|
|||||||
cmds := fsInfo.CommandHelp
|
cmds := fsInfo.CommandHelp
|
||||||
name := fsInfo.Name
|
name := fsInfo.Name
|
||||||
if len(cmds) == 0 {
|
if len(cmds) == 0 {
|
||||||
return errors.Errorf("%s backend has no commands", name)
|
return fmt.Errorf("%s backend has no commands", name)
|
||||||
}
|
}
|
||||||
fmt.Printf("## Backend commands\n\n")
|
fmt.Printf("## Backend commands\n\n")
|
||||||
fmt.Printf(`Here are the commands specific to the %s backend.
|
fmt.Printf(`Here are the commands specific to the %s backend.
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user