mirror of
https://github.com/rclone/rclone.git
synced 2026-01-22 20:33:17 +00:00
Compare commits
33 Commits
fix-zero-c
...
v1.49-fixe
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
17c21aed5a | ||
|
|
75cd733d3e | ||
|
|
b5ea6af6e4 | ||
|
|
d8729441db | ||
|
|
5ac39c2176 | ||
|
|
8aae04208b | ||
|
|
d9bdd0575e | ||
|
|
b3cafe8f06 | ||
|
|
a7a4666ddd | ||
|
|
54d409a7dd | ||
|
|
9054542be5 | ||
|
|
4a8a8578a5 | ||
|
|
04da57fc68 | ||
|
|
3aeb6a5a4c | ||
|
|
c5d2da9a77 | ||
|
|
c89261bd99 | ||
|
|
1bdab29eab | ||
|
|
f77027e6b7 | ||
|
|
f73d0eb920 | ||
|
|
f1a9d821e4 | ||
|
|
5fe78936d5 | ||
|
|
4f3eee8d65 | ||
|
|
f2c05bc239 | ||
|
|
b463032901 | ||
|
|
358decb933 | ||
|
|
cefa2df3b2 | ||
|
|
52efb7e6d0 | ||
|
|
01fa6835c7 | ||
|
|
8adf22e294 | ||
|
|
45f7c687e2 | ||
|
|
a05dd6fc27 | ||
|
|
642cb03121 | ||
|
|
da4dfdc3ec |
25
.github/workflows/build.yml
vendored
25
.github/workflows/build.yml
vendored
@@ -19,12 +19,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'modules_race', 'go1.10', 'go1.11', 'go1.12']
|
||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'modules_race', 'go1.10', 'go1.11']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.13.x'
|
||||
go: '1.12.x'
|
||||
modules: 'off'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
@@ -34,7 +34,7 @@ jobs:
|
||||
|
||||
- job_name: mac
|
||||
os: macOS-latest
|
||||
go: '1.13.x'
|
||||
go: '1.12.x'
|
||||
modules: 'off'
|
||||
gotags: '' # cmount doesn't work on osx travis for some reason
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
@@ -44,7 +44,7 @@ jobs:
|
||||
|
||||
- job_name: windows_amd64
|
||||
os: windows-latest
|
||||
go: '1.13.x'
|
||||
go: '1.12.x'
|
||||
modules: 'off'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
@@ -54,7 +54,7 @@ jobs:
|
||||
|
||||
- job_name: windows_386
|
||||
os: windows-latest
|
||||
go: '1.13.x'
|
||||
go: '1.12.x'
|
||||
modules: 'off'
|
||||
gotags: cmount
|
||||
goarch: '386'
|
||||
@@ -65,7 +65,7 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.13.x'
|
||||
go: '1.12.x'
|
||||
modules: 'off'
|
||||
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
|
||||
compile_all: true
|
||||
@@ -73,7 +73,7 @@ jobs:
|
||||
|
||||
- job_name: modules_race
|
||||
os: ubuntu-latest
|
||||
go: '1.13.x'
|
||||
go: '1.12.x'
|
||||
modules: 'on'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
@@ -90,19 +90,13 @@ jobs:
|
||||
modules: 'off'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.12
|
||||
os: ubuntu-latest
|
||||
go: '1.12.x'
|
||||
modules: 'off'
|
||||
quicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@master
|
||||
with:
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
|
||||
@@ -211,7 +205,7 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@master
|
||||
with:
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
|
||||
@@ -247,4 +241,3 @@ jobs:
|
||||
make circleci_upload
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
if: github.head_ref == ''
|
||||
|
||||
@@ -341,12 +341,6 @@ Getting going
|
||||
* Add your remote to the imports in `backend/all/all.go`
|
||||
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
||||
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||
* Use fs/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||
* `go install -tags noencode`
|
||||
* `rclone purge -v TestRemote:rclone-info`
|
||||
* `rclone info -vv --write-json remote.json TestRemote:rclone-info`
|
||||
* `go run cmd/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||
* open `remote.csv` in a spreadsheet and examine
|
||||
|
||||
Unit tests
|
||||
|
||||
@@ -373,54 +367,12 @@ Or if you want to run the integration tests manually:
|
||||
|
||||
See the [testing](#testing) section for more information on integration tests.
|
||||
|
||||
Add your fs to the docs - you'll need to pick an icon for it from
|
||||
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
||||
alphabetical order of full name of remote (eg `drive` is ordered as
|
||||
`Google Drive`) but with the local file system last.
|
||||
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
|
||||
|
||||
* `README.md` - main GitHub page
|
||||
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||
* update them with `make backenddocs` - revert any changes in other backends
|
||||
* `docs/content/overview.md` - overview docs
|
||||
* `docs/content/docs.md` - list of remotes in config section
|
||||
* `docs/content/about.md` - front page of rclone.org
|
||||
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||
|
||||
Once you've written the docs, run `make serve` and check they look OK
|
||||
in the web browser and the links (internal and external) all work.
|
||||
|
||||
## Writing a plugin ##
|
||||
|
||||
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
|
||||
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
|
||||
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
|
||||
|
||||
Usage
|
||||
|
||||
- Naming
|
||||
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
||||
- `KIND` should be one of `backend`, `command` or `bundle`.
|
||||
- Example: A plugin with backend support for PiFS would be called
|
||||
`librcloneplugin_backend_pifs.so`.
|
||||
- Loading
|
||||
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
|
||||
- Supported on rclone v1.50 or greater.
|
||||
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
|
||||
- If this variable doesn't exist, plugin support is disabled.
|
||||
- Plugins must be compiled against the exact version of rclone to work.
|
||||
(The rclone used during building the plugin must be the same as the source of rclone)
|
||||
|
||||
Building
|
||||
|
||||
To turn your existing additions into a Go plugin, move them to an external repository
|
||||
and change the top-level package name to `main`.
|
||||
|
||||
Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
|
||||
|
||||
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
|
||||
|
||||
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
|
||||
|
||||
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
|
||||
|
||||
@@ -12,11 +12,10 @@ RUN ./rclone version
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk --no-cache add ca-certificates fuse
|
||||
RUN apk --no-cache add ca-certificates
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
WORKDIR /root/
|
||||
|
||||
ENTRYPOINT [ "rclone" ]
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone .
|
||||
|
||||
WORKDIR /data
|
||||
ENV XDG_CONFIG_HOME=/config
|
||||
ENTRYPOINT [ "./rclone" ]
|
||||
|
||||
@@ -12,7 +12,6 @@ Current active maintainers of rclone are:
|
||||
| Alex Chen | @Cnly | onedrive backend |
|
||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
13966
MANUAL.html
generated
13966
MANUAL.html
generated
File diff suppressed because one or more lines are too long
13580
MANUAL.txt
generated
13580
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
8
Makefile
8
Makefile
@@ -46,8 +46,7 @@ endif
|
||||
rclone:
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
mkdir -p `go env GOPATH`/bin/
|
||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
||||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/
|
||||
|
||||
test_all:
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
||||
@@ -97,11 +96,6 @@ update:
|
||||
GO111MODULE=on go mod tidy
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
# Tidy the module dependencies
|
||||
tidy:
|
||||
GO111MODULE=on go mod tidy
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||
|
||||
rclone.1: MANUAL.md
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
[](https://circleci.com/gh/rclone/rclone/tree/master)
|
||||
[](https://goreportcard.com/report/github.com/rclone/rclone)
|
||||
[](https://godoc.org/github.com/rclone/rclone)
|
||||
[](https://hub.docker.com/r/rclone/rclone)
|
||||
|
||||
# Rclone
|
||||
|
||||
@@ -22,14 +21,13 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
|
||||
## Storage providers
|
||||
|
||||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||
* 1Fichier [:page_facing_up:](https://rclone.org/ficher/)
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
@@ -42,7 +40,6 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
@@ -77,7 +74,6 @@ Please see [the full list of all storage providers and their features](https://r
|
||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||
* Can sync to and from network, e.g. two different cloud accounts
|
||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional cache ([Cache](https://rclone.org/cache/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
|
||||
17
RELEASE.md
17
RELEASE.md
@@ -1,21 +1,14 @@
|
||||
# Release
|
||||
|
||||
This file describes how to make the various kinds of releases
|
||||
|
||||
## Extra required software for making a release
|
||||
|
||||
Extra required software for making a release
|
||||
* [github-release](https://github.com/aktau/github-release) for uploading packages
|
||||
* pandoc for making the html and man pages
|
||||
|
||||
## Making a release
|
||||
|
||||
Making a release
|
||||
* git status - make sure everything is checked in
|
||||
* Check travis & appveyor builds are green
|
||||
* make check
|
||||
* make test # see integration test server or run locally
|
||||
* make tag
|
||||
* edit docs/content/changelog.md
|
||||
* make tidy
|
||||
* make doc
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX.0"
|
||||
@@ -33,8 +26,8 @@ This file describes how to make the various kinds of releases
|
||||
* # announce with forum post, twitter post, G+ post
|
||||
|
||||
Early in the next release cycle update the vendored dependencies
|
||||
|
||||
* Review any pinned packages in go.mod and remove if possible
|
||||
* GO111MODULE=on go get -u github.com/spf13/cobra@master
|
||||
* make update
|
||||
* git status
|
||||
* git add new files
|
||||
@@ -72,7 +65,7 @@ Now
|
||||
* git co ${BASE_TAG}-fixes
|
||||
* git cherry-pick any fixes
|
||||
* Test (see above)
|
||||
* make NEXT_VERSION=${NEW_TAG} tag
|
||||
* make NEW_TAG=v1.XX.1 tag
|
||||
* edit docs/content/changelog.md
|
||||
* make TAG=${NEW_TAG} doc
|
||||
* git commit -a -v -m "Version ${NEW_TAG}"
|
||||
@@ -89,7 +82,7 @@ Now
|
||||
* make TAG=${NEW_TAG} upload_github
|
||||
* NB this overwrites the current beta so we need to do this
|
||||
* git co master
|
||||
* make VERSION=${NEW_TAG} startdev
|
||||
* make LAST_TAG=${NEW_TAG} startdev
|
||||
* # cherry pick the changes to the changelog and VERSION
|
||||
* git checkout ${BASE_TAG}-fixes VERSION docs/content/changelog.md
|
||||
* git commit --amend
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/b2"
|
||||
_ "github.com/rclone/rclone/backend/box"
|
||||
_ "github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/chunker"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
@@ -21,7 +20,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
_ "github.com/rclone/rclone/backend/koofr"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/mailru"
|
||||
_ "github.com/rclone/rclone/backend/mega"
|
||||
_ "github.com/rclone/rclone/backend/onedrive"
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
@@ -31,7 +29,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/qingstor"
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
_ "github.com/rclone/rclone/backend/union"
|
||||
_ "github.com/rclone/rclone/backend/webdav"
|
||||
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -39,7 +38,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
enc = encodings.AmazonCloudDrive
|
||||
folderKind = "FOLDER"
|
||||
fileKind = "FILE"
|
||||
statusAvailable = "AVAILABLE"
|
||||
@@ -386,7 +384,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
var resp *http.Response
|
||||
var subFolder *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
subFolder, resp, err = folder.GetFolder(enc.FromStandardName(leaf))
|
||||
subFolder, resp, err = folder.GetFolder(leaf)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -413,7 +411,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
var resp *http.Response
|
||||
var info *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.CreateFolder(enc.FromStandardName(leaf))
|
||||
info, resp, err = folder.CreateFolder(leaf)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -481,7 +479,6 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
|
||||
if !hasValidParent {
|
||||
continue
|
||||
}
|
||||
*node.Name = enc.ToStandardName(*node.Name)
|
||||
// Store the nodes up in case we have to retry the listing
|
||||
out = append(out, node)
|
||||
}
|
||||
@@ -671,7 +668,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
start := time.Now()
|
||||
f.tokenRenewer.Start()
|
||||
info, resp, err = folder.Put(in, enc.FromStandardName(leaf))
|
||||
info, resp, err = folder.Put(in, leaf)
|
||||
f.tokenRenewer.Stop()
|
||||
var ok bool
|
||||
ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
|
||||
@@ -1041,7 +1038,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
var resp *http.Response
|
||||
var info *acd.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.GetFile(enc.FromStandardName(leaf))
|
||||
info, resp, err = folder.GetFile(leaf)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1161,7 +1158,7 @@ func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) {
|
||||
func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
newInfo, resp, err = info.Rename(enc.FromStandardName(newName))
|
||||
newInfo, resp, err = info.Rename(newName)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
return newInfo, err
|
||||
@@ -1357,11 +1354,10 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoin
|
||||
if len(node.Parents) > 0 {
|
||||
if path, ok := f.dirCache.GetInv(node.Parents[0]); ok {
|
||||
// and append the drive file name to compute the full file name
|
||||
name := enc.ToStandardName(*node.Name)
|
||||
if len(path) > 0 {
|
||||
path = path + "/" + name
|
||||
path = path + "/" + *node.Name
|
||||
} else {
|
||||
path = name
|
||||
path = *node.Name
|
||||
}
|
||||
// this will now clear the actual file too
|
||||
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
|
||||
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -61,8 +60,6 @@ const (
|
||||
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
|
||||
)
|
||||
|
||||
const enc = encodings.AzureBlob
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -211,8 +208,7 @@ func parsePath(path string) (root string) {
|
||||
// split returns container and containerPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) {
|
||||
containerName, containerPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return enc.FromStandardName(containerName), enc.FromStandardPath(containerPath)
|
||||
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
}
|
||||
|
||||
// split returns container and containerPath from the object
|
||||
@@ -312,9 +308,6 @@ func httpClientFactory(client *http.Client) pipeline.Factory {
|
||||
//
|
||||
// this code was copied from azblob.NewPipeline
|
||||
func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline.Pipeline {
|
||||
// Don't log stuff to syslog/Windows Event log
|
||||
pipeline.SetForceLogEnabled(false)
|
||||
|
||||
// Closest to API goes first; closest to the wire goes last
|
||||
factories := []pipeline.Factory{
|
||||
azblob.NewTelemetryPolicyFactory(o.Telemetry),
|
||||
@@ -582,18 +575,18 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
|
||||
}
|
||||
// Advance marker to next
|
||||
marker = response.NextMarker
|
||||
|
||||
for i := range response.Segment.BlobItems {
|
||||
file := &response.Segment.BlobItems[i]
|
||||
// Finish if file name no longer has prefix
|
||||
// if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
|
||||
// return nil
|
||||
// }
|
||||
remote := enc.ToStandardPath(file.Name)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Debugf(f, "Odd name received %q", remote)
|
||||
if !strings.HasPrefix(file.Name, prefix) {
|
||||
fs.Debugf(f, "Odd name received %q", file.Name)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
remote := file.Name[len(prefix):]
|
||||
if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) {
|
||||
continue // skip directory marker
|
||||
}
|
||||
@@ -609,7 +602,6 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
|
||||
// Send the subdirectories
|
||||
for _, remote := range response.Segment.BlobPrefixes {
|
||||
remote := strings.TrimRight(remote.Name, "/")
|
||||
remote = enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Debugf(f, "Odd directory name received %q", remote)
|
||||
continue
|
||||
@@ -673,7 +665,7 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
|
||||
return entries, nil
|
||||
}
|
||||
err = f.listContainersToFn(func(container *azblob.ContainerItem) error {
|
||||
d := fs.NewDir(enc.ToStandardName(container.Name), container.Properties.LastModified)
|
||||
d := fs.NewDir(container.Name, container.Properties.LastModified)
|
||||
f.cache.MarkOK(container.Name)
|
||||
entries = append(entries, d)
|
||||
return nil
|
||||
@@ -1123,7 +1115,7 @@ func (o *Object) parseTimeString(timeString string) (err error) {
|
||||
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
|
||||
return err
|
||||
}
|
||||
o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
|
||||
o.modTime = time.Unix(unixMilliseconds/1E3, (unixMilliseconds%1E3)*1E6).UTC()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1516,6 +1508,4 @@ var (
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
)
|
||||
|
||||
@@ -50,7 +50,7 @@ type Timestamp time.Time
|
||||
// MarshalJSON turns a Timestamp into JSON (in UTC)
|
||||
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
|
||||
timestamp := (*time.Time)(t).UTC().UnixNano()
|
||||
return []byte(strconv.FormatInt(timestamp/1e6, 10)), nil
|
||||
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Timestamp
|
||||
@@ -59,7 +59,7 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Timestamp(time.Unix(timestamp/1e3, (timestamp%1e3)*1e6).UTC())
|
||||
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
145
backend/b2/b2.go
145
backend/b2/b2.go
@@ -25,7 +25,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -35,8 +34,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const enc = encodings.B2
|
||||
|
||||
const (
|
||||
defaultEndpoint = "https://api.backblazeb2.com"
|
||||
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
|
||||
@@ -276,11 +273,11 @@ func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
if resp != nil && resp.StatusCode == 401 {
|
||||
fs.Debugf(f, "Unauthorized: %v", err)
|
||||
// Reauth
|
||||
authErr := f.authorizeAccount(ctx)
|
||||
authErr := f.authorizeAccount()
|
||||
if authErr != nil {
|
||||
err = authErr
|
||||
}
|
||||
@@ -396,13 +393,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
|
||||
}
|
||||
f.fillBufferTokens()
|
||||
err = f.authorizeAccount(ctx)
|
||||
err = f.authorizeAccount()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to authorize account")
|
||||
}
|
||||
// If this is a key limited to a single bucket, it must exist already
|
||||
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
|
||||
allowedBucket := enc.ToStandardName(f.info.Allowed.BucketName)
|
||||
allowedBucket := f.info.Allowed.BucketName
|
||||
if allowedBucket == "" {
|
||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
||||
}
|
||||
@@ -434,7 +431,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
// authorizeAccount gets the API endpoint and auth token. Can be used
|
||||
// for reauthentication too.
|
||||
func (f *Fs) authorizeAccount(ctx context.Context) error {
|
||||
func (f *Fs) authorizeAccount() error {
|
||||
f.authMu.Lock()
|
||||
defer f.authMu.Unlock()
|
||||
opts := rest.Opts{
|
||||
@@ -446,7 +443,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
||||
ExtraHeaders: map[string]string{"Authorization": ""}, // unset the Authorization for this request
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &f.info)
|
||||
resp, err := f.srv.CallJSON(&opts, nil, &f.info)
|
||||
return f.shouldRetryNoReauth(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -469,10 +466,10 @@ func (f *Fs) hasPermission(permission string) bool {
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
//
|
||||
// This should be returned with returnUploadURL when finished
|
||||
func (f *Fs) getUploadURL(ctx context.Context, bucket string) (upload *api.GetUploadURLResponse, err error) {
|
||||
func (f *Fs) getUploadURL(bucket string) (upload *api.GetUploadURLResponse, err error) {
|
||||
f.uploadMu.Lock()
|
||||
defer f.uploadMu.Unlock()
|
||||
bucketID, err := f.getBucketID(ctx, bucket)
|
||||
bucketID, err := f.getBucketID(bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -492,8 +489,8 @@ func (f *Fs) getUploadURL(ctx context.Context, bucket string) (upload *api.GetUp
|
||||
BucketID: bucketID,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &upload)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &upload)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
@@ -612,7 +609,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
if !recurse {
|
||||
delimiter = "/"
|
||||
}
|
||||
bucketID, err := f.getBucketID(ctx, bucket)
|
||||
bucketID, err := f.getBucketID(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -623,11 +620,11 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
var request = api.ListFileNamesRequest{
|
||||
BucketID: bucketID,
|
||||
MaxFileCount: chunkSize,
|
||||
Prefix: enc.FromStandardPath(directory),
|
||||
Prefix: directory,
|
||||
Delimiter: delimiter,
|
||||
}
|
||||
if directory != "" {
|
||||
request.StartFileName = enc.FromStandardPath(directory)
|
||||
request.StartFileName = directory
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -639,15 +636,14 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
for {
|
||||
var response api.ListFileNamesResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range response.Files {
|
||||
file := &response.Files[i]
|
||||
file.Name = enc.ToStandardPath(file.Name)
|
||||
// Finish if file name no longer has prefix
|
||||
if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
|
||||
return nil
|
||||
@@ -731,7 +727,7 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
||||
|
||||
// listBuckets returns all the buckets to out
|
||||
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
|
||||
err = f.listBucketsToFn(func(bucket *api.Bucket) error {
|
||||
d := fs.NewDir(bucket.Name, time.Time{})
|
||||
entries = append(entries, d)
|
||||
return nil
|
||||
@@ -824,7 +820,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
type listBucketFn func(*api.Bucket) error
|
||||
|
||||
// listBucketsToFn lists the buckets to the function supplied
|
||||
func (f *Fs) listBucketsToFn(ctx context.Context, fn listBucketFn) error {
|
||||
func (f *Fs) listBucketsToFn(fn listBucketFn) error {
|
||||
var account = api.ListBucketsRequest{
|
||||
AccountID: f.info.AccountID,
|
||||
BucketID: f.info.Allowed.BucketID,
|
||||
@@ -836,8 +832,8 @@ func (f *Fs) listBucketsToFn(ctx context.Context, fn listBucketFn) error {
|
||||
Path: "/b2_list_buckets",
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
resp, err := f.srv.CallJSON(&opts, &account, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -848,7 +844,6 @@ func (f *Fs) listBucketsToFn(ctx context.Context, fn listBucketFn) error {
|
||||
f._bucketType = make(map[string]string, 1)
|
||||
for i := range response.Buckets {
|
||||
bucket := &response.Buckets[i]
|
||||
bucket.Name = enc.ToStandardName(bucket.Name)
|
||||
f.cache.MarkOK(bucket.Name)
|
||||
f._bucketID[bucket.Name] = bucket.ID
|
||||
f._bucketType[bucket.Name] = bucket.Type
|
||||
@@ -867,14 +862,14 @@ func (f *Fs) listBucketsToFn(ctx context.Context, fn listBucketFn) error {
|
||||
|
||||
// getbucketType finds the bucketType for the current bucket name
|
||||
// can be one of allPublic. allPrivate, or snapshot
|
||||
func (f *Fs) getbucketType(ctx context.Context, bucket string) (bucketType string, err error) {
|
||||
func (f *Fs) getbucketType(bucket string) (bucketType string, err error) {
|
||||
f.bucketTypeMutex.Lock()
|
||||
bucketType = f._bucketType[bucket]
|
||||
f.bucketTypeMutex.Unlock()
|
||||
if bucketType != "" {
|
||||
return bucketType, nil
|
||||
}
|
||||
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
|
||||
err = f.listBucketsToFn(func(bucket *api.Bucket) error {
|
||||
// listBucketsToFn reads bucket Types
|
||||
return nil
|
||||
})
|
||||
@@ -902,14 +897,14 @@ func (f *Fs) clearBucketType(bucket string) {
|
||||
}
|
||||
|
||||
// getBucketID finds the ID for the current bucket name
|
||||
func (f *Fs) getBucketID(ctx context.Context, bucket string) (bucketID string, err error) {
|
||||
func (f *Fs) getBucketID(bucket string) (bucketID string, err error) {
|
||||
f.bucketIDMutex.Lock()
|
||||
bucketID = f._bucketID[bucket]
|
||||
f.bucketIDMutex.Unlock()
|
||||
if bucketID != "" {
|
||||
return bucketID, nil
|
||||
}
|
||||
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
|
||||
err = f.listBucketsToFn(func(bucket *api.Bucket) error {
|
||||
// listBucketsToFn sets IDs
|
||||
return nil
|
||||
})
|
||||
@@ -970,20 +965,20 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||
}
|
||||
var request = api.CreateBucketRequest{
|
||||
AccountID: f.info.AccountID,
|
||||
Name: enc.FromStandardName(bucket),
|
||||
Name: bucket,
|
||||
Type: "allPrivate",
|
||||
}
|
||||
var response api.Bucket
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.Code == "duplicate_bucket_name" {
|
||||
// Check this is our bucket - buckets are globally unique and this
|
||||
// might be someone elses.
|
||||
_, getBucketErr := f.getBucketID(ctx, bucket)
|
||||
_, getBucketErr := f.getBucketID(bucket)
|
||||
if getBucketErr == nil {
|
||||
// found so it is our bucket
|
||||
return nil
|
||||
@@ -1014,7 +1009,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
Method: "POST",
|
||||
Path: "/b2_delete_bucket",
|
||||
}
|
||||
bucketID, err := f.getBucketID(ctx, bucket)
|
||||
bucketID, err := f.getBucketID(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1024,8 +1019,8 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
}
|
||||
var response api.Bucket
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to delete bucket")
|
||||
@@ -1043,8 +1038,8 @@ func (f *Fs) Precision() time.Duration {
|
||||
}
|
||||
|
||||
// hide hides a file on the remote
|
||||
func (f *Fs) hide(ctx context.Context, bucket, bucketPath string) error {
|
||||
bucketID, err := f.getBucketID(ctx, bucket)
|
||||
func (f *Fs) hide(bucket, bucketPath string) error {
|
||||
bucketID, err := f.getBucketID(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1054,12 +1049,12 @@ func (f *Fs) hide(ctx context.Context, bucket, bucketPath string) error {
|
||||
}
|
||||
var request = api.HideFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: enc.FromStandardPath(bucketPath),
|
||||
Name: bucketPath,
|
||||
}
|
||||
var response api.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
@@ -1075,19 +1070,19 @@ func (f *Fs) hide(ctx context.Context, bucket, bucketPath string) error {
|
||||
}
|
||||
|
||||
// deleteByID deletes a file version given Name and ID
|
||||
func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
|
||||
func (f *Fs) deleteByID(ID, Name string) error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_delete_file_version",
|
||||
}
|
||||
var request = api.DeleteFileRequest{
|
||||
ID: ID,
|
||||
Name: enc.FromStandardPath(Name),
|
||||
Name: Name,
|
||||
}
|
||||
var response api.File
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to delete %q", Name)
|
||||
@@ -1137,7 +1132,7 @@ func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool)
|
||||
continue
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
err = f.deleteByID(ctx, object.ID, object.Name)
|
||||
err = f.deleteByID(object.ID, object.Name)
|
||||
checkErr(err)
|
||||
tr.Done(err)
|
||||
}
|
||||
@@ -1210,7 +1205,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
destBucketID, err := f.getBucketID(ctx, dstBucket)
|
||||
destBucketID, err := f.getBucketID(dstBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1220,14 +1215,14 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
var request = api.CopyFileRequest{
|
||||
SourceID: srcObj.id,
|
||||
Name: enc.FromStandardPath(dstPath),
|
||||
Name: dstPath,
|
||||
MetadataDirective: "COPY",
|
||||
DestBucketID: destBucketID,
|
||||
}
|
||||
var response api.FileInfo
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1250,7 +1245,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
|
||||
// getDownloadAuthorization returns authorization token for downloading
|
||||
// without account.
|
||||
func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string) (authorization string, err error) {
|
||||
func (f *Fs) getDownloadAuthorization(bucket, remote string) (authorization string, err error) {
|
||||
validDurationInSeconds := time.Duration(f.opt.DownloadAuthorizationDuration).Nanoseconds() / 1e9
|
||||
if validDurationInSeconds <= 0 || validDurationInSeconds > 604800 {
|
||||
return "", errors.New("--b2-download-auth-duration must be between 1 sec and 1 week")
|
||||
@@ -1258,7 +1253,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
|
||||
if !f.hasPermission("shareFiles") {
|
||||
return "", errors.New("sharing a file link requires the shareFiles permission")
|
||||
}
|
||||
bucketID, err := f.getBucketID(ctx, bucket)
|
||||
bucketID, err := f.getBucketID(bucket)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -1268,13 +1263,13 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
|
||||
}
|
||||
var request = api.GetDownloadAuthorizationRequest{
|
||||
BucketID: bucketID,
|
||||
FileNamePrefix: enc.FromStandardPath(path.Join(f.root, remote)),
|
||||
FileNamePrefix: path.Join(f.root, remote),
|
||||
ValidDurationInSeconds: validDurationInSeconds,
|
||||
}
|
||||
var response api.GetDownloadAuthorizationResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to get download authorization")
|
||||
@@ -1306,12 +1301,12 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
}
|
||||
absPath := "/" + bucketPath
|
||||
link = RootURL + "/file/" + urlEncode(bucket) + absPath
|
||||
bucketType, err := f.getbucketType(ctx, bucket)
|
||||
bucketType, err := f.getbucketType(bucket)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if bucketType == "allPrivate" || bucketType == "snapshot" {
|
||||
AuthorizationToken, err := f.getDownloadAuthorization(ctx, bucket, remote)
|
||||
AuthorizationToken, err := f.getDownloadAuthorization(bucket, remote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -1375,12 +1370,6 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
||||
if o.sha1 == "" || o.sha1 == "none" {
|
||||
o.sha1 = Info[sha1Key]
|
||||
}
|
||||
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
||||
// Some tools (eg Cyberduck) use this
|
||||
const unverified = "unverified:"
|
||||
if strings.HasPrefix(o.sha1, unverified) {
|
||||
o.sha1 = o.sha1[len(unverified):]
|
||||
}
|
||||
o.size = Size
|
||||
// Use the UploadTimestamp if can't get file info
|
||||
o.modTime = time.Time(UploadTimestamp)
|
||||
@@ -1464,7 +1453,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
// timeString returns modTime as the number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC as a decimal string.
|
||||
func timeString(modTime time.Time) string {
|
||||
return strconv.FormatInt(modTime.UnixNano()/1e6, 10)
|
||||
return strconv.FormatInt(modTime.UnixNano()/1E6, 10)
|
||||
}
|
||||
|
||||
// parseTimeString converts a decimal string number of milliseconds
|
||||
@@ -1479,7 +1468,7 @@ func (o *Object) parseTimeString(timeString string) (err error) {
|
||||
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
|
||||
return nil
|
||||
}
|
||||
o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
|
||||
o.modTime = time.Unix(unixMilliseconds/1E3, (unixMilliseconds%1E3)*1E6).UTC()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1509,15 +1498,15 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
}
|
||||
var request = api.CopyFileRequest{
|
||||
SourceID: o.id,
|
||||
Name: enc.FromStandardPath(bucketPath), // copy to same name
|
||||
Name: bucketPath, // copy to same name
|
||||
MetadataDirective: "REPLACE",
|
||||
ContentType: info.ContentType,
|
||||
Info: info.Info,
|
||||
}
|
||||
var response api.FileInfo
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
resp, err := o.fs.srv.CallJSON(&opts, &request, &response)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1611,12 +1600,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
|
||||
} else {
|
||||
bucket, bucketPath := o.split()
|
||||
opts.Path += "/file/" + urlEncode(enc.FromStandardName(bucket)) + "/" + urlEncode(enc.FromStandardPath(bucketPath))
|
||||
opts.Path += "/file/" + urlEncode(bucket) + "/" + urlEncode(bucketPath)
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open for download")
|
||||
@@ -1712,7 +1701,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
o.fs.putUploadBlock(buf)
|
||||
return err
|
||||
}
|
||||
return up.Stream(ctx, buf)
|
||||
return up.Stream(buf)
|
||||
} else if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
|
||||
defer o.fs.putUploadBlock(buf)
|
||||
@@ -1726,7 +1715,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.Upload(ctx)
|
||||
return up.Upload()
|
||||
}
|
||||
|
||||
modTime := src.ModTime(ctx)
|
||||
@@ -1740,7 +1729,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// Get upload URL
|
||||
upload, err := o.fs.getUploadURL(ctx, bucket)
|
||||
upload, err := o.fs.getUploadURL(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1808,7 +1797,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Body: in,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": upload.AuthorizationToken,
|
||||
"X-Bz-File-Name": urlEncode(enc.FromStandardPath(bucketPath)),
|
||||
"X-Bz-File-Name": urlEncode(bucketPath),
|
||||
"Content-Type": fs.MimeType(ctx, src),
|
||||
sha1Header: calculatedSha1,
|
||||
timeHeader: timeString(modTime),
|
||||
@@ -1818,8 +1807,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var response api.FileInfo
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
retry, err := o.fs.shouldRetry(ctx, resp, err)
|
||||
resp, err := o.fs.srv.CallJSON(&opts, nil, &response)
|
||||
retry, err := o.fs.shouldRetry(resp, err)
|
||||
// On retryable error clear UploadURL
|
||||
if retry {
|
||||
fs.Debugf(o, "Clearing upload URL because of error: %v", err)
|
||||
@@ -1840,9 +1829,9 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
return errNotWithVersions
|
||||
}
|
||||
if o.fs.opt.HardDelete {
|
||||
return o.fs.deleteByID(ctx, o.id, bucketPath)
|
||||
return o.fs.deleteByID(o.id, bucketPath)
|
||||
}
|
||||
return o.fs.hide(ctx, bucket, bucketPath)
|
||||
return o.fs.hide(bucket, bucketPath)
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
|
||||
@@ -105,13 +105,13 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
Path: "/b2_start_large_file",
|
||||
}
|
||||
bucket, bucketPath := o.split()
|
||||
bucketID, err := f.getBucketID(ctx, bucket)
|
||||
bucketID, err := f.getBucketID(bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var request = api.StartLargeFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: enc.FromStandardPath(bucketPath),
|
||||
Name: bucketPath,
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Info: map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
@@ -125,8 +125,8 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -150,7 +150,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
//
|
||||
// This should be returned with returnUploadURL when finished
|
||||
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
|
||||
func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
|
||||
up.uploadMu.Lock()
|
||||
defer up.uploadMu.Unlock()
|
||||
if len(up.uploads) == 0 {
|
||||
@@ -162,8 +162,8 @@ func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadP
|
||||
ID: up.id,
|
||||
}
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
|
||||
return up.f.shouldRetry(ctx, resp, err)
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
@@ -192,12 +192,12 @@ func (up *largeUpload) clearUploadURL() {
|
||||
}
|
||||
|
||||
// Transfer a chunk
|
||||
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
|
||||
func (up *largeUpload) transferChunk(part int64, body []byte) error {
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||
|
||||
// Get upload URL
|
||||
upload, err := up.getUploadURL(ctx)
|
||||
upload, err := up.getUploadURL()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -241,8 +241,8 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
||||
|
||||
var response api.UploadPartResponse
|
||||
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
retry, err := up.f.shouldRetry(ctx, resp, err)
|
||||
resp, err := up.f.srv.CallJSON(&opts, nil, &response)
|
||||
retry, err := up.f.shouldRetry(resp, err)
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||
}
|
||||
@@ -264,7 +264,7 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
||||
}
|
||||
|
||||
// finish closes off the large upload
|
||||
func (up *largeUpload) finish(ctx context.Context) error {
|
||||
func (up *largeUpload) finish() error {
|
||||
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -276,8 +276,8 @@ func (up *largeUpload) finish(ctx context.Context) error {
|
||||
}
|
||||
var response api.FileInfo
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return up.f.shouldRetry(ctx, resp, err)
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -286,7 +286,7 @@ func (up *largeUpload) finish(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// cancel aborts the large upload
|
||||
func (up *largeUpload) cancel(ctx context.Context) error {
|
||||
func (up *largeUpload) cancel() error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_cancel_large_file",
|
||||
@@ -296,18 +296,18 @@ func (up *largeUpload) cancel(ctx context.Context) error {
|
||||
}
|
||||
var response api.CancelLargeFileResponse
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return up.f.shouldRetry(ctx, resp, err)
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (up *largeUpload) managedTransferChunk(ctx context.Context, wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
|
||||
func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
|
||||
wg.Add(1)
|
||||
go func(part int64, buf []byte) {
|
||||
defer wg.Done()
|
||||
defer up.f.putUploadBlock(buf)
|
||||
err := up.transferChunk(ctx, part, buf)
|
||||
err := up.transferChunk(part, buf)
|
||||
if err != nil {
|
||||
select {
|
||||
case errs <- err:
|
||||
@@ -317,7 +317,7 @@ func (up *largeUpload) managedTransferChunk(ctx context.Context, wg *sync.WaitGr
|
||||
}(part, buf)
|
||||
}
|
||||
|
||||
func (up *largeUpload) finishOrCancelOnError(ctx context.Context, err error, errs chan error) error {
|
||||
func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
@@ -326,19 +326,19 @@ func (up *largeUpload) finishOrCancelOnError(ctx context.Context, err error, err
|
||||
}
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
|
||||
cancelErr := up.cancel(ctx)
|
||||
cancelErr := up.cancel()
|
||||
if cancelErr != nil {
|
||||
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return up.finish(ctx)
|
||||
return up.finish()
|
||||
}
|
||||
|
||||
// Stream uploads the chunks from the input, starting with a required initial
|
||||
// chunk. Assumes the file size is unknown and will upload until the input
|
||||
// reaches EOF.
|
||||
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
|
||||
func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
|
||||
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
||||
errs := make(chan error, 1)
|
||||
hasMoreParts := true
|
||||
@@ -346,7 +346,7 @@ func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (e
|
||||
|
||||
// Transfer initial chunk
|
||||
up.size = int64(len(initialUploadBlock))
|
||||
up.managedTransferChunk(ctx, &wg, errs, 1, initialUploadBlock)
|
||||
up.managedTransferChunk(&wg, errs, 1, initialUploadBlock)
|
||||
|
||||
outer:
|
||||
for part := int64(2); hasMoreParts; part++ {
|
||||
@@ -388,16 +388,16 @@ outer:
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
up.managedTransferChunk(ctx, &wg, errs, part, buf)
|
||||
up.managedTransferChunk(&wg, errs, part, buf)
|
||||
}
|
||||
wg.Wait()
|
||||
up.sha1s = up.sha1s[:up.parts]
|
||||
|
||||
return up.finishOrCancelOnError(ctx, err, errs)
|
||||
return up.finishOrCancelOnError(err, errs)
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
func (up *largeUpload) Upload(ctx context.Context) error {
|
||||
func (up *largeUpload) Upload() error {
|
||||
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
|
||||
remaining := up.size
|
||||
errs := make(chan error, 1)
|
||||
@@ -428,10 +428,10 @@ outer:
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
up.managedTransferChunk(ctx, &wg, errs, part, buf)
|
||||
up.managedTransferChunk(&wg, errs, part, buf)
|
||||
remaining -= reqSize
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return up.finishOrCancelOnError(ctx, err, errs)
|
||||
return up.finishOrCancelOnError(err, errs)
|
||||
}
|
||||
|
||||
@@ -202,23 +202,3 @@ type CommitUpload struct {
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
} `json:"attributes"`
|
||||
}
|
||||
|
||||
// ConfigJSON defines the shape of a box config.json
|
||||
type ConfigJSON struct {
|
||||
BoxAppSettings AppSettings `json:"boxAppSettings"`
|
||||
EnterpriseID string `json:"enterpriseID"`
|
||||
}
|
||||
|
||||
// AppSettings defines the shape of the boxAppSettings within box config.json
|
||||
type AppSettings struct {
|
||||
ClientID string `json:"clientID"`
|
||||
ClientSecret string `json:"clientSecret"`
|
||||
AppAuth AppAuth `json:"appAuth"`
|
||||
}
|
||||
|
||||
// AppAuth defines the shape of the appAuth within boxAppSettings in config.json
|
||||
type AppAuth struct {
|
||||
PublicKeyID string `json:"publicKeyID"`
|
||||
PrivateKey string `json:"privateKey"`
|
||||
Passphrase string `json:"passphrase"`
|
||||
}
|
||||
|
||||
@@ -11,12 +11,8 @@ package box
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rsa"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -25,10 +21,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/lib/jwtutil"
|
||||
|
||||
"github.com/youmark/pkcs8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -36,20 +28,15 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/jws"
|
||||
)
|
||||
|
||||
const enc = encodings.Box
|
||||
|
||||
const (
|
||||
rcloneClientID = "d0374ba6pgmaguie02ge15sv1mllndho"
|
||||
rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL"
|
||||
@@ -62,7 +49,6 @@ const (
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
|
||||
defaultUploadCutoff = 50 * 1024 * 1024
|
||||
tokenURL = "https://api.box.com/oauth2/token"
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -87,34 +73,9 @@ func init() {
|
||||
Description: "Box",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
var err error
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
boxConfig, err := getBoxConfig(jsonFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
claims, err := getClaims(boxConfig, boxSubType)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
|
||||
}
|
||||
} else {
|
||||
err = oauthutil.Config("box", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
|
||||
}
|
||||
err := oauthutil.Config("box", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
@@ -123,19 +84,6 @@ func init() {
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Box App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "box_config_file",
|
||||
Help: "Box App config.json location\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "box_sub_type",
|
||||
Default: "user",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "user",
|
||||
Help: "Rclone should act on behalf of a user",
|
||||
}, {
|
||||
Value: "enterprise",
|
||||
Help: "Rclone should act on behalf of a service account",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to multipart upload (>= 50MB).",
|
||||
@@ -150,74 +98,6 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
file, err := ioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to read Box config")
|
||||
}
|
||||
err = json.Unmarshal(file, &boxConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to parse Box config")
|
||||
}
|
||||
return boxConfig, nil
|
||||
}
|
||||
|
||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
|
||||
val, err := jwtutil.RandomHex(20)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to generate random string for jti")
|
||||
}
|
||||
|
||||
claims = &jws.ClaimSet{
|
||||
Iss: boxConfig.BoxAppSettings.ClientID,
|
||||
Sub: boxConfig.EnterpriseID,
|
||||
Aud: tokenURL,
|
||||
Iat: time.Now().Unix(),
|
||||
Exp: time.Now().Add(time.Second * 45).Unix(),
|
||||
PrivateClaims: map[string]interface{}{
|
||||
"box_sub_type": boxSubType,
|
||||
"aud": tokenURL,
|
||||
"jti": val,
|
||||
},
|
||||
}
|
||||
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
|
||||
signingHeaders := &jws.Header{
|
||||
Algorithm: "RS256",
|
||||
Typ: "JWT",
|
||||
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||
}
|
||||
|
||||
return signingHeaders
|
||||
}
|
||||
|
||||
func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
|
||||
queryParams := map[string]string{
|
||||
"client_id": boxConfig.BoxAppSettings.ClientID,
|
||||
"client_secret": boxConfig.BoxAppSettings.ClientSecret,
|
||||
}
|
||||
|
||||
return queryParams
|
||||
}
|
||||
|
||||
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
||||
|
||||
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
||||
if len(rest) > 0 {
|
||||
return nil, errors.Wrap(err, "box: extra data included in private key")
|
||||
}
|
||||
|
||||
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to decrypt private key")
|
||||
}
|
||||
|
||||
return rsaKey.(*rsa.PrivateKey), nil
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
@@ -301,6 +181,18 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// substitute reserved characters for box
|
||||
func replaceReservedChars(x string) string {
|
||||
// Backslash for FULLWIDTH REVERSE SOLIDUS
|
||||
return strings.Replace(x, "\\", "\", -1)
|
||||
}
|
||||
|
||||
// restore reserved characters for box
|
||||
func restoreReservedChars(x string) string {
|
||||
// FULLWIDTH REVERSE SOLIDUS for Backslash
|
||||
return strings.Replace(x, "\", "\\", -1)
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||
@@ -312,7 +204,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
return nil, err
|
||||
}
|
||||
|
||||
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
|
||||
found, err := f.listAll(directoryID, false, true, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
info = item
|
||||
return true
|
||||
@@ -460,7 +352,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
pathIDOut = item.ID
|
||||
return true
|
||||
@@ -488,13 +380,13 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
mkdir := api.CreateFolder{
|
||||
Name: enc.FromStandardName(leaf),
|
||||
Name: replaceReservedChars(leaf),
|
||||
Parent: api.Parent{
|
||||
ID: pathID,
|
||||
},
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
|
||||
resp, err = f.srv.CallJSON(&opts, &mkdir, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -516,7 +408,7 @@ type listAllFn func(*api.Item) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folders/" + dirID + "/items",
|
||||
@@ -531,7 +423,7 @@ OUTER:
|
||||
var result api.FolderItems
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -554,7 +446,7 @@ OUTER:
|
||||
if item.ItemStatus != api.ItemStatusActive {
|
||||
continue
|
||||
}
|
||||
item.Name = enc.ToStandardName(item.Name)
|
||||
item.Name = restoreReservedChars(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break OUTER
|
||||
@@ -587,7 +479,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return nil, err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
||||
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
|
||||
remote := path.Join(dir, info.Name)
|
||||
if info.Type == api.ItemTypeFolder {
|
||||
// cache the directory ID for later lookups
|
||||
@@ -689,14 +581,14 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
}
|
||||
|
||||
// deleteObject removes an object by ID
|
||||
func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
func (f *Fs) deleteObject(id string) error {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/files/" + id,
|
||||
NoResponse: true,
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(ctx, &opts)
|
||||
resp, err := f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
@@ -727,7 +619,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
opts.Parameters.Set("recursive", strconv.FormatBool(!check))
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
resp, err = f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -790,8 +682,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Path: "/files/" + srcObj.id + "/copy",
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
replacedLeaf := replaceReservedChars(leaf)
|
||||
copyFile := api.CopyFile{
|
||||
Name: enc.FromStandardName(leaf),
|
||||
Name: replacedLeaf,
|
||||
Parent: api.Parent{
|
||||
ID: directoryID,
|
||||
},
|
||||
@@ -799,7 +692,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var resp *http.Response
|
||||
var info *api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©File, &info)
|
||||
resp, err = f.srv.CallJSON(&opts, ©File, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -822,7 +715,7 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// move a file or folder
|
||||
func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (info *api.Item, err error) {
|
||||
func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err error) {
|
||||
// Move the object
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
@@ -830,14 +723,14 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
move := api.UpdateFileMove{
|
||||
Name: enc.FromStandardName(leaf),
|
||||
Name: replaceReservedChars(leaf),
|
||||
Parent: api.Parent{
|
||||
ID: directoryID,
|
||||
},
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||
resp, err = f.srv.CallJSON(&opts, &move, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -869,7 +762,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// Do the move
|
||||
info, err := f.move(ctx, "/files/", srcObj.id, leaf, directoryID)
|
||||
info, err := f.move("/files/", srcObj.id, leaf, directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -952,7 +845,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// Do the move
|
||||
_, err = f.move(ctx, "/folders/", srcID, leaf, directoryID)
|
||||
_, err = f.move("/folders/", srcID, leaf, directoryID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -994,7 +887,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||
var info api.Item
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &shareLink, &info)
|
||||
resp, err = f.srv.CallJSON(&opts, &shareLink, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info.SharedLink.URL, err
|
||||
@@ -1031,6 +924,11 @@ func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// srvPath returns a path for use in server
|
||||
func (o *Object) srvPath() string {
|
||||
return replaceReservedChars(o.fs.rootSlash() + o.remote)
|
||||
}
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.SHA1 {
|
||||
@@ -1108,7 +1006,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
||||
}
|
||||
var info *api.Item
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
|
||||
resp, err := o.fs.srv.CallJSON(&opts, &update, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info, err
|
||||
@@ -1141,7 +1039,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1153,9 +1051,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// upload does a single non-multipart upload
|
||||
//
|
||||
// This is recommended for less than 50 MB of content
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
|
||||
func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
|
||||
upload := api.UploadFile{
|
||||
Name: enc.FromStandardName(leaf),
|
||||
Name: replaceReservedChars(leaf),
|
||||
ContentModifiedAt: api.Time(modTime),
|
||||
ContentCreatedAt: api.Time(modTime),
|
||||
Parent: api.Parent{
|
||||
@@ -1180,7 +1078,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
|
||||
opts.Path = "/files/content"
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &upload, &result)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &upload, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1213,16 +1111,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Upload with simple or multipart
|
||||
if size <= int64(o.fs.opt.UploadCutoff) {
|
||||
err = o.upload(ctx, in, leaf, directoryID, modTime)
|
||||
err = o.upload(in, leaf, directoryID, modTime)
|
||||
} else {
|
||||
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime)
|
||||
err = o.uploadMultipart(in, leaf, directoryID, size, modTime)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.deleteObject(ctx, o.id)
|
||||
return o.fs.deleteObject(o.id)
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
|
||||
@@ -4,7 +4,6 @@ package box
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
@@ -23,7 +22,7 @@ import (
|
||||
)
|
||||
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
|
||||
func (o *Object) createUploadSession(leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/files/upload_sessions",
|
||||
@@ -38,11 +37,11 @@ func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID stri
|
||||
} else {
|
||||
opts.Path = "/files/upload_sessions"
|
||||
request.FolderID = directoryID
|
||||
request.FileName = enc.FromStandardName(leaf)
|
||||
request.FileName = replaceReservedChars(leaf)
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return
|
||||
@@ -54,7 +53,7 @@ func sha1Digest(digest []byte) string {
|
||||
}
|
||||
|
||||
// uploadPart uploads a part in an upload session
|
||||
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
|
||||
func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
|
||||
chunkSize := int64(len(chunk))
|
||||
sha1sum := sha1.Sum(chunk)
|
||||
opts := rest.Opts{
|
||||
@@ -71,7 +70,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
opts.Body = wrap(bytes.NewReader(chunk))
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -81,7 +80,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
|
||||
}
|
||||
|
||||
// commitUpload finishes an upload session
|
||||
func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
|
||||
func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/files/upload_sessions/" + SessionID + "/commit",
|
||||
@@ -105,7 +104,7 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
|
||||
outer:
|
||||
for tries = 0; tries < maxTries; tries++ {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
@@ -155,7 +154,7 @@ outer:
|
||||
}
|
||||
|
||||
// abortUpload cancels an upload session
|
||||
func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error) {
|
||||
func (o *Object) abortUpload(SessionID string) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/files/upload_sessions/" + SessionID,
|
||||
@@ -164,16 +163,16 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
|
||||
func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
|
||||
// Create upload session
|
||||
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
|
||||
session, err := o.createUploadSession(leaf, directoryID, size)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload create session failed")
|
||||
}
|
||||
@@ -184,7 +183,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Cancelling multipart upload: %v", err)
|
||||
cancelErr := o.abortUpload(ctx, session.ID)
|
||||
cancelErr := o.abortUpload(session.ID)
|
||||
if cancelErr != nil {
|
||||
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
|
||||
}
|
||||
@@ -236,7 +235,7 @@ outer:
|
||||
defer wg.Done()
|
||||
defer o.fs.uploadToken.Put()
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap)
|
||||
partResponse, err := o.uploadPart(session.ID, position, size, buf, wrap)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to upload part")
|
||||
select {
|
||||
@@ -264,7 +263,7 @@ outer:
|
||||
}
|
||||
|
||||
// Finalise the upload session
|
||||
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
|
||||
result, err := o.commitUpload(session.ID, parts, modTime, hash.Sum(nil))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to finalize")
|
||||
}
|
||||
|
||||
1
backend/cache/cache_test.go
vendored
1
backend/cache/cache_test.go
vendored
@@ -19,6 +19,5 @@ func TestIntegration(t *testing.T) {
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "MergeDirs", "OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
|
||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||
})
|
||||
}
|
||||
|
||||
2
backend/cache/storage_persistent.go
vendored
2
backend/cache/storage_persistent.go
vendored
@@ -16,7 +16,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
bolt "github.com/etcd-io/bbolt"
|
||||
bolt "github.com/coreos/bbolt"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,691 +0,0 @@
|
||||
package chunker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Command line flags
|
||||
var (
|
||||
UploadKilobytes = flag.Int("upload-kilobytes", 0, "Upload size in Kilobytes, set this to test large uploads")
|
||||
)
|
||||
|
||||
// test that chunking does not break large uploads
|
||||
func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
||||
t.Run(fmt.Sprintf("PutLarge%dk", kilobytes), func(t *testing.T) {
|
||||
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
|
||||
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
||||
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
|
||||
Size: int64(kilobytes) * int64(fs.KibiByte),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// test chunk name parser
|
||||
func testChunkNameFormat(t *testing.T, f *Fs) {
|
||||
saveOpt := f.opt
|
||||
defer func() {
|
||||
// restore original settings (f is pointer, f.opt is struct)
|
||||
f.opt = saveOpt
|
||||
_ = f.setChunkNameFormat(f.opt.NameFormat)
|
||||
}()
|
||||
|
||||
assertFormat := func(pattern, wantDataFormat, wantCtrlFormat, wantNameRegexp string) {
|
||||
err := f.setChunkNameFormat(pattern)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, wantDataFormat, f.dataNameFmt)
|
||||
assert.Equal(t, wantCtrlFormat, f.ctrlNameFmt)
|
||||
assert.Equal(t, wantNameRegexp, f.nameRegexp.String())
|
||||
}
|
||||
|
||||
assertFormatValid := func(pattern string) {
|
||||
err := f.setChunkNameFormat(pattern)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
assertFormatInvalid := func(pattern string) {
|
||||
err := f.setChunkNameFormat(pattern)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType, xactID string) {
|
||||
gotChunkName := ""
|
||||
assert.NotPanics(t, func() {
|
||||
gotChunkName = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
|
||||
}, "makeChunkName(%q,%d,%q,%q) must not panic", mainName, chunkNo, ctrlType, xactID)
|
||||
if gotChunkName != "" {
|
||||
assert.Equal(t, wantChunkName, gotChunkName)
|
||||
}
|
||||
}
|
||||
|
||||
assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType, xactID string) {
|
||||
assert.Panics(t, func() {
|
||||
_ = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
|
||||
}, "makeChunkName(%q,%d,%q,%q) should panic", mainName, chunkNo, ctrlType, xactID)
|
||||
}
|
||||
|
||||
assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType, wantXactID string) {
|
||||
gotMainName, gotChunkNo, gotCtrlType, gotXactID := f.parseChunkName(fileName)
|
||||
assert.Equal(t, wantMainName, gotMainName)
|
||||
assert.Equal(t, wantChunkNo, gotChunkNo)
|
||||
assert.Equal(t, wantCtrlType, gotCtrlType)
|
||||
assert.Equal(t, wantXactID, gotXactID)
|
||||
}
|
||||
|
||||
const newFormatSupported = false // support for patterns not starting with base name (*)
|
||||
|
||||
// valid formats
|
||||
assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
||||
assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
||||
assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
||||
assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
||||
assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
||||
if newFormatSupported {
|
||||
assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z][a-z0-9]{2,6})),(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
||||
}
|
||||
|
||||
// invalid formats
|
||||
assertFormatInvalid(`chunk-#`)
|
||||
assertFormatInvalid(`*-chunk`)
|
||||
assertFormatInvalid(`*-*-chunk-#`)
|
||||
assertFormatInvalid(`*-chunk-#-#`)
|
||||
assertFormatInvalid(`#-chunk-*`)
|
||||
assertFormatInvalid(`*/#`)
|
||||
|
||||
assertFormatValid(`*#`)
|
||||
assertFormatInvalid(`**#`)
|
||||
assertFormatInvalid(`#*`)
|
||||
assertFormatInvalid(``)
|
||||
assertFormatInvalid(`-`)
|
||||
|
||||
// quick tests
|
||||
if newFormatSupported {
|
||||
assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9][0-9a-z]{3,8})\.\.tmp_([0-9]{10,13}))?$`)
|
||||
f.opt.StartFrom = 1
|
||||
|
||||
assertMakeName(`part_fish_1`, "fish", 0, "", "")
|
||||
assertParseName(`part_fish_43`, "fish", 42, "", "")
|
||||
assertMakeName(`part_fish__locks`, "fish", -2, "locks", "")
|
||||
assertParseName(`part_fish__locks`, "fish", -1, "locks", "")
|
||||
assertMakeName(`part_fish__x2y`, "fish", -2, "x2y", "")
|
||||
assertParseName(`part_fish__x2y`, "fish", -1, "x2y", "")
|
||||
assertMakeName(`part_fish_3_0004`, "fish", 2, "", "4")
|
||||
assertParseName(`part_fish_4_0005`, "fish", 3, "", "0005")
|
||||
assertMakeName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -3, "blkinfo", "jj5fvo3wr")
|
||||
assertParseName(`part_fish__blkinfo_zz9fvo3wr`, "fish", -1, "blkinfo", "zz9fvo3wr")
|
||||
|
||||
// old-style temporary suffix (parse only)
|
||||
assertParseName(`part_fish_4..tmp_0000000011`, "fish", 3, "", "000b")
|
||||
assertParseName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -1, "blkinfo", "jj5fvo3wr")
|
||||
}
|
||||
|
||||
// prepare format for long tests
|
||||
assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
||||
f.opt.StartFrom = 2
|
||||
|
||||
// valid data chunks
|
||||
assertMakeName(`fish.chunk.003`, "fish", 1, "", "")
|
||||
assertParseName(`fish.chunk.003`, "fish", 1, "", "")
|
||||
assertMakeName(`fish.chunk.021`, "fish", 19, "", "")
|
||||
assertParseName(`fish.chunk.021`, "fish", 19, "", "")
|
||||
|
||||
// valid temporary data chunks
|
||||
assertMakeName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
|
||||
assertParseName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
|
||||
assertMakeName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
|
||||
assertParseName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
|
||||
assertMakeName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
|
||||
assertParseName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
|
||||
assertMakeName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
|
||||
assertParseName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
|
||||
|
||||
// valid temporary data chunks (old temporary suffix, only parse)
|
||||
assertParseName(`fish.chunk.004..tmp_0000000047`, "fish", 2, "", "001b")
|
||||
assertParseName(`fish.chunk.323..tmp_9994567890123`, "fish", 321, "", "3jjfvo3wr")
|
||||
|
||||
// parsing invalid data chunk names
|
||||
assertParseName(`fish.chunk.3`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.001`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.21`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.-21`, "", -1, "", "")
|
||||
|
||||
assertParseName(`fish.chunk.004abcd`, "", -1, "", "") // missing underscore delimiter
|
||||
assertParseName(`fish.chunk.004__1234`, "", -1, "", "") // extra underscore delimiter
|
||||
assertParseName(`fish.chunk.004_123`, "", -1, "", "") // too short temporary suffix
|
||||
assertParseName(`fish.chunk.004_1234567890`, "", -1, "", "") // too long temporary suffix
|
||||
assertParseName(`fish.chunk.004_-1234`, "", -1, "", "") // temporary suffix must be positive
|
||||
assertParseName(`fish.chunk.004_123E`, "", -1, "", "") // uppercase not allowed
|
||||
assertParseName(`fish.chunk.004_12.3`, "", -1, "", "") // punctuation not allowed
|
||||
|
||||
// parsing invalid data chunk names (old temporary suffix)
|
||||
assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.323..tmp_12345678901234`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", "")
|
||||
|
||||
// valid control chunks
|
||||
assertMakeName(`fish.chunk._info`, "fish", -1, "info", "")
|
||||
assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", "")
|
||||
assertMakeName(`fish.chunk._blkinfo`, "fish", -3, "blkinfo", "")
|
||||
assertMakeName(`fish.chunk._x2y`, "fish", -4, "x2y", "")
|
||||
|
||||
assertParseName(`fish.chunk._info`, "fish", -1, "info", "")
|
||||
assertParseName(`fish.chunk._locks`, "fish", -1, "locks", "")
|
||||
assertParseName(`fish.chunk._blkinfo`, "fish", -1, "blkinfo", "")
|
||||
assertParseName(`fish.chunk._x2y`, "fish", -1, "x2y", "")
|
||||
|
||||
// valid temporary control chunks
|
||||
assertMakeName(`fish.chunk._info_0001`, "fish", -1, "info", "1")
|
||||
assertMakeName(`fish.chunk._locks_4321`, "fish", -2, "locks", "4321")
|
||||
assertMakeName(`fish.chunk._uploads_abcd`, "fish", -3, "uploads", "abcd")
|
||||
assertMakeName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -4, "blkinfo", "xyzabcdef")
|
||||
assertMakeName(`fish.chunk._x2y_1aaa`, "fish", -5, "x2y", "1aaa")
|
||||
|
||||
assertParseName(`fish.chunk._info_0001`, "fish", -1, "info", "0001")
|
||||
assertParseName(`fish.chunk._locks_4321`, "fish", -1, "locks", "4321")
|
||||
assertParseName(`fish.chunk._uploads_9abc`, "fish", -1, "uploads", "9abc")
|
||||
assertParseName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -1, "blkinfo", "xyzabcdef")
|
||||
assertParseName(`fish.chunk._x2y_1aaa`, "fish", -1, "x2y", "1aaa")
|
||||
|
||||
// valid temporary control chunks (old temporary suffix, parse only)
|
||||
assertParseName(`fish.chunk._info..tmp_0000000047`, "fish", -1, "info", "001b")
|
||||
assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", "15wx")
|
||||
assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", "0000")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123`, "fish", -1, "blkinfo", "3jjfvo3wr")
|
||||
assertParseName(`fish.chunk._x2y..tmp_0000000000`, "fish", -1, "x2y", "0000")
|
||||
|
||||
// parsing invalid control chunk names
|
||||
assertParseName(`fish.chunk.metadata`, "", -1, "", "") // must be prepended by underscore
|
||||
assertParseName(`fish.chunk.info`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.locks`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.uploads`, "", -1, "", "")
|
||||
|
||||
assertParseName(`fish.chunk._os`, "", -1, "", "") // too short
|
||||
assertParseName(`fish.chunk._metadata`, "", -1, "", "") // too long
|
||||
assertParseName(`fish.chunk._blockinfo`, "", -1, "", "") // way too long
|
||||
assertParseName(`fish.chunk._4me`, "", -1, "", "") // cannot start with digit
|
||||
assertParseName(`fish.chunk._567`, "", -1, "", "") // cannot be all digits
|
||||
assertParseName(`fish.chunk._me_ta`, "", -1, "", "") // punctuation not allowed
|
||||
assertParseName(`fish.chunk._in-fo`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk._.bin`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk._.2xy`, "", -1, "", "")
|
||||
|
||||
// parsing invalid temporary control chunks
|
||||
assertParseName(`fish.chunk._blkinfo1234`, "", -1, "", "") // missing underscore delimiter
|
||||
assertParseName(`fish.chunk._info__1234`, "", -1, "", "") // extra underscore delimiter
|
||||
assertParseName(`fish.chunk._info_123`, "", -1, "", "") // too short temporary suffix
|
||||
assertParseName(`fish.chunk._info_1234567890`, "", -1, "", "") // too long temporary suffix
|
||||
assertParseName(`fish.chunk._info_-1234`, "", -1, "", "") // temporary suffix must be positive
|
||||
assertParseName(`fish.chunk._info_123E`, "", -1, "", "") // uppercase not allowed
|
||||
assertParseName(`fish.chunk._info_12.3`, "", -1, "", "") // punctuation not allowed
|
||||
|
||||
assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", "")
|
||||
|
||||
// short control chunk names: 3 letters ok, 1-2 letters not allowed
|
||||
assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", "")
|
||||
assertParseName(`fish.chunk._int`, "fish", -1, "int", "")
|
||||
|
||||
assertMakeNamePanics("fish", -1, "in", "")
|
||||
assertMakeNamePanics("fish", -1, "up", "4")
|
||||
assertMakeNamePanics("fish", -1, "x", "")
|
||||
assertMakeNamePanics("fish", -1, "c", "1z")
|
||||
|
||||
assertMakeName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0")
|
||||
assertMakeName(`fish.chunk._ext_0026`, "fish", -1, "ext", "26")
|
||||
assertMakeName(`fish.chunk._int_0abc`, "fish", -1, "int", "abc")
|
||||
assertMakeName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
|
||||
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
|
||||
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
|
||||
|
||||
assertParseName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0000")
|
||||
assertParseName(`fish.chunk._ext_0026`, "fish", -1, "ext", "0026")
|
||||
assertParseName(`fish.chunk._int_0abc`, "fish", -1, "int", "0abc")
|
||||
assertParseName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
|
||||
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
|
||||
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
|
||||
|
||||
// base file name can sometimes look like a valid chunk name
|
||||
assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", "")
|
||||
assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", "")
|
||||
assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", "")
|
||||
|
||||
assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", "")
|
||||
assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", "")
|
||||
assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", "")
|
||||
|
||||
// base file name looking like a valid chunk name (old temporary suffix)
|
||||
assertParseName(`fish.chunk.003.chunk.005..tmp_0000000022`, "fish.chunk.003", 3, "", "000m")
|
||||
assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk._info.chunk.005..tmp_0000000023`, "fish.chunk._info", 3, "", "000n")
|
||||
assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
|
||||
|
||||
assertParseName(`fish.chunk.003.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.003", -1, "blkinfo", "3jjfvo3wr")
|
||||
assertParseName(`fish.chunk._info.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._info", -1, "blkinfo", "3jjfvo3wr")
|
||||
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", "")
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000025`, "fish.chunk.004..tmp_0000000021", 3, "", "000p")
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", "")
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.004..tmp_0000000021", -1, "blkinfo", "3jjfvo3wr")
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", "")
|
||||
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.004`, "fish.chunk._blkinfo..tmp_9994567890123", 2, "", "")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.005..tmp_0000000026`, "fish.chunk._blkinfo..tmp_9994567890123", 3, "", "000q")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "info", "")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "blkinfo", "3jjfvo3wr")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._Meta`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
|
||||
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blkinfo..tmp_1234567890123456789", 2, "", "")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.005..tmp_0000000022`, "fish.chunk._blkinfo..tmp_1234567890123456789", 3, "", "000m")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "info", "")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "blkinfo", "3jjfvo3wr")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
|
||||
|
||||
// attempts to make invalid chunk names
|
||||
assertMakeNamePanics("fish", -1, "", "") // neither data nor control
|
||||
assertMakeNamePanics("fish", 0, "info", "") // both data and control
|
||||
assertMakeNamePanics("fish", -1, "metadata", "") // control type too long
|
||||
assertMakeNamePanics("fish", -1, "blockinfo", "") // control type way too long
|
||||
assertMakeNamePanics("fish", -1, "2xy", "") // first digit not allowed
|
||||
assertMakeNamePanics("fish", -1, "123", "") // all digits not allowed
|
||||
assertMakeNamePanics("fish", -1, "Meta", "") // only lower case letters allowed
|
||||
assertMakeNamePanics("fish", -1, "in-fo", "") // punctuation not allowed
|
||||
assertMakeNamePanics("fish", -1, "_info", "")
|
||||
assertMakeNamePanics("fish", -1, "info_", "")
|
||||
assertMakeNamePanics("fish", -2, ".bind", "")
|
||||
assertMakeNamePanics("fish", -2, "bind.", "")
|
||||
|
||||
assertMakeNamePanics("fish", -1, "", "1") // neither data nor control
|
||||
assertMakeNamePanics("fish", 0, "info", "23") // both data and control
|
||||
assertMakeNamePanics("fish", -1, "metadata", "45") // control type too long
|
||||
assertMakeNamePanics("fish", -1, "blockinfo", "7") // control type way too long
|
||||
assertMakeNamePanics("fish", -1, "2xy", "abc") // first digit not allowed
|
||||
assertMakeNamePanics("fish", -1, "123", "def") // all digits not allowed
|
||||
assertMakeNamePanics("fish", -1, "Meta", "mnk") // only lower case letters allowed
|
||||
assertMakeNamePanics("fish", -1, "in-fo", "xyz") // punctuation not allowed
|
||||
assertMakeNamePanics("fish", -1, "_info", "5678")
|
||||
assertMakeNamePanics("fish", -1, "info_", "999")
|
||||
assertMakeNamePanics("fish", -2, ".bind", "0")
|
||||
assertMakeNamePanics("fish", -2, "bind.", "0")
|
||||
|
||||
assertMakeNamePanics("fish", 0, "", "1234567890") // temporary suffix too long
|
||||
assertMakeNamePanics("fish", 0, "", "123F4") // uppercase not allowed
|
||||
assertMakeNamePanics("fish", 0, "", "123.") // punctuation not allowed
|
||||
assertMakeNamePanics("fish", 0, "", "_123")
|
||||
}
|
||||
|
||||
func testSmallFileInternals(t *testing.T, f *Fs) {
|
||||
const dir = "small"
|
||||
ctx := context.Background()
|
||||
saveOpt := f.opt
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
}()
|
||||
f.opt.FailHard = false
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
|
||||
checkSmallFileInternals := func(obj fs.Object) {
|
||||
assert.NotNil(t, obj)
|
||||
o, ok := obj.(*Object)
|
||||
assert.True(t, ok)
|
||||
assert.NotNil(t, o)
|
||||
if o == nil {
|
||||
return
|
||||
}
|
||||
switch {
|
||||
case !f.useMeta:
|
||||
// If meta format is "none", non-chunked file (even empty)
|
||||
// internally is a single chunk without meta object.
|
||||
assert.Nil(t, o.main)
|
||||
assert.True(t, o.isComposite()) // sorry, sometimes a name is misleading
|
||||
assert.Equal(t, 1, len(o.chunks))
|
||||
case f.hashAll:
|
||||
// Consistent hashing forces meta object on small files too
|
||||
assert.NotNil(t, o.main)
|
||||
assert.True(t, o.isComposite())
|
||||
assert.Equal(t, 1, len(o.chunks))
|
||||
default:
|
||||
// normally non-chunked file is kept in the Object's main field
|
||||
assert.NotNil(t, o.main)
|
||||
assert.False(t, o.isComposite())
|
||||
assert.Equal(t, 0, len(o.chunks))
|
||||
}
|
||||
}
|
||||
|
||||
checkContents := func(obj fs.Object, contents string) {
|
||||
assert.NotNil(t, obj)
|
||||
assert.Equal(t, int64(len(contents)), obj.Size())
|
||||
|
||||
r, err := obj.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, r)
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
data, err := ioutil.ReadAll(r)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, contents, string(data))
|
||||
_ = r.Close()
|
||||
}
|
||||
|
||||
checkHashsum := func(obj fs.Object) {
|
||||
var ht hash.Type
|
||||
switch {
|
||||
case !f.hashAll:
|
||||
return
|
||||
case f.useMD5:
|
||||
ht = hash.MD5
|
||||
case f.useSHA1:
|
||||
ht = hash.SHA1
|
||||
default:
|
||||
return
|
||||
}
|
||||
// even empty files must have hashsum in consistent mode
|
||||
sum, err := obj.Hash(ctx, ht)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, sum, "")
|
||||
}
|
||||
|
||||
checkSmallFile := func(name, contents string) {
|
||||
filename := path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
_, put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
|
||||
assert.NotNil(t, put)
|
||||
checkSmallFileInternals(put)
|
||||
checkContents(put, contents)
|
||||
checkHashsum(put)
|
||||
|
||||
// objects returned by Put and NewObject must have similar structure
|
||||
obj, err := f.NewObject(ctx, filename)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, obj)
|
||||
checkSmallFileInternals(obj)
|
||||
checkContents(obj, contents)
|
||||
checkHashsum(obj)
|
||||
|
||||
_ = obj.Remove(ctx)
|
||||
_ = put.Remove(ctx) // for good
|
||||
}
|
||||
|
||||
checkSmallFile("emptyfile", "")
|
||||
checkSmallFile("smallfile", "Ok")
|
||||
}
|
||||
|
||||
func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
if f.opt.ChunkSize > 50 {
|
||||
t.Skip("this test requires small chunks")
|
||||
}
|
||||
const dir = "corrupted"
|
||||
ctx := context.Background()
|
||||
saveOpt := f.opt
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
}()
|
||||
f.opt.FailHard = true
|
||||
|
||||
contents := random.String(250)
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
const overlapMessage = "chunk overlap"
|
||||
|
||||
assertOverlapError := func(err error) {
|
||||
assert.Error(t, err)
|
||||
if err != nil {
|
||||
assert.Contains(t, err.Error(), overlapMessage)
|
||||
}
|
||||
}
|
||||
|
||||
newFile := func(name string) fs.Object {
|
||||
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
return obj
|
||||
}
|
||||
billyObj := newFile("billy")
|
||||
|
||||
billyChunkName := func(chunkNo int) string {
|
||||
return f.makeChunkName(billyObj.Remote(), chunkNo, "", "")
|
||||
}
|
||||
|
||||
err := f.Mkdir(ctx, billyChunkName(1))
|
||||
assertOverlapError(err)
|
||||
|
||||
_, err = f.Move(ctx, newFile("silly1"), billyChunkName(2))
|
||||
assert.Error(t, err)
|
||||
assert.True(t, err == fs.ErrorCantMove || (err != nil && strings.Contains(err.Error(), overlapMessage)))
|
||||
|
||||
_, err = f.Copy(ctx, newFile("silly2"), billyChunkName(3))
|
||||
assert.Error(t, err)
|
||||
assert.True(t, err == fs.ErrorCantCopy || (err != nil && strings.Contains(err.Error(), overlapMessage)))
|
||||
|
||||
// accessing chunks in strict mode is prohibited
|
||||
f.opt.FailHard = true
|
||||
billyChunk4Name := billyChunkName(4)
|
||||
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
|
||||
assertOverlapError(err)
|
||||
|
||||
f.opt.FailHard = false
|
||||
billyChunk4, err = f.NewObject(ctx, billyChunk4Name)
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, billyChunk4)
|
||||
|
||||
f.opt.FailHard = true
|
||||
_, err = f.Put(ctx, bytes.NewBufferString(contents), billyChunk4)
|
||||
assertOverlapError(err)
|
||||
|
||||
// you can freely read chunks (if you have an object)
|
||||
r, err := billyChunk4.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
var chunkContents []byte
|
||||
assert.NotPanics(t, func() {
|
||||
chunkContents, err = ioutil.ReadAll(r)
|
||||
_ = r.Close()
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, contents, string(chunkContents))
|
||||
|
||||
// but you can't change them
|
||||
err = billyChunk4.Update(ctx, bytes.NewBufferString(contents), newFile("silly3"))
|
||||
assertOverlapError(err)
|
||||
|
||||
// Remove isn't special, you can't corrupt files even if you have an object
|
||||
err = billyChunk4.Remove(ctx)
|
||||
assertOverlapError(err)
|
||||
|
||||
// recreate billy in case it was anyhow corrupted
|
||||
willyObj := newFile("willy")
|
||||
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", "")
|
||||
f.opt.FailHard = false
|
||||
willyChunk, err := f.NewObject(ctx, willyChunkName)
|
||||
f.opt.FailHard = true
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, willyChunk)
|
||||
|
||||
_, err = operations.Copy(ctx, f, willyChunk, willyChunkName, newFile("silly4"))
|
||||
assertOverlapError(err)
|
||||
|
||||
// operations.Move will return error when chunker's Move refused
|
||||
// to corrupt target file, but reverts to copy/delete method
|
||||
// still trying to delete target chunk. Chunker must come to rescue.
|
||||
_, err = operations.Move(ctx, f, willyChunk, willyChunkName, newFile("silly5"))
|
||||
assertOverlapError(err)
|
||||
r, err = willyChunk.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotPanics(t, func() {
|
||||
_, err = ioutil.ReadAll(r)
|
||||
_ = r.Close()
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func testChunkNumberOverflow(t *testing.T, f *Fs) {
|
||||
if f.opt.ChunkSize > 50 {
|
||||
t.Skip("this test requires small chunks")
|
||||
}
|
||||
const dir = "wreaked"
|
||||
const wreakNumber = 10200300
|
||||
ctx := context.Background()
|
||||
saveOpt := f.opt
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
}()
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
contents := random.String(100)
|
||||
|
||||
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
||||
filename := path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
return obj, filename
|
||||
}
|
||||
|
||||
f.opt.FailHard = false
|
||||
file, fileName := newFile(f, "wreaker")
|
||||
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", ""))
|
||||
|
||||
f.opt.FailHard = false
|
||||
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
|
||||
_, err := f.NewObject(ctx, fileName)
|
||||
assert.Error(t, err)
|
||||
|
||||
f.opt.FailHard = true
|
||||
_, err = f.List(ctx, dir)
|
||||
assert.Error(t, err)
|
||||
_, err = f.NewObject(ctx, fileName)
|
||||
assert.Error(t, err)
|
||||
|
||||
f.opt.FailHard = false
|
||||
_ = wreak.Remove(ctx)
|
||||
_ = file.Remove(ctx)
|
||||
}
|
||||
|
||||
func testMetadataInput(t *testing.T, f *Fs) {
|
||||
const minChunkForTest = 50
|
||||
if f.opt.ChunkSize < minChunkForTest {
|
||||
t.Skip("this test requires chunks that fit metadata")
|
||||
}
|
||||
|
||||
const dir = "usermeta"
|
||||
ctx := context.Background()
|
||||
saveOpt := f.opt
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
}()
|
||||
f.opt.FailHard = false
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
|
||||
putFile := func(f fs.Fs, name, contents, message string, check bool) fs.Object {
|
||||
item := fstest.Item{Path: name, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
||||
assert.NotNil(t, obj, message)
|
||||
return obj
|
||||
}
|
||||
|
||||
runSubtest := func(contents, name string) {
|
||||
description := fmt.Sprintf("file with %s metadata", name)
|
||||
filename := path.Join(dir, name)
|
||||
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
|
||||
|
||||
part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
|
||||
_ = putFile(f, filename, contents, "upload "+description, false)
|
||||
|
||||
obj, err := f.NewObject(ctx, filename)
|
||||
assert.NoError(t, err, "access "+description)
|
||||
assert.NotNil(t, obj)
|
||||
assert.Equal(t, int64(len(contents)), obj.Size(), "size "+description)
|
||||
|
||||
o, ok := obj.(*Object)
|
||||
assert.NotNil(t, ok)
|
||||
if o != nil {
|
||||
assert.True(t, o.isComposite() && len(o.chunks) == 1, description+" is forced composite")
|
||||
o = nil
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = obj.Remove(ctx)
|
||||
_ = part.Remove(ctx)
|
||||
}()
|
||||
|
||||
r, err := obj.Open(ctx)
|
||||
assert.NoError(t, err, "open "+description)
|
||||
assert.NotNil(t, r, "open stream of "+description)
|
||||
if err == nil && r != nil {
|
||||
data, err := ioutil.ReadAll(r)
|
||||
assert.NoError(t, err, "read all of "+description)
|
||||
assert.Equal(t, contents, string(data), description+" contents is ok")
|
||||
_ = r.Close()
|
||||
}
|
||||
}
|
||||
|
||||
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "")
|
||||
require.NoError(t, err)
|
||||
todaysMeta := string(metaData)
|
||||
runSubtest(todaysMeta, "today")
|
||||
|
||||
pastMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":1`)
|
||||
pastMeta = regexp.MustCompile(`"size":[0-9]+`).ReplaceAllLiteralString(pastMeta, `"size":0`)
|
||||
runSubtest(pastMeta, "past")
|
||||
|
||||
futureMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":999`)
|
||||
futureMeta = regexp.MustCompile(`"nchunks":[0-9]+`).ReplaceAllLiteralString(futureMeta, `"nchunks":0,"x":"y"`)
|
||||
runSubtest(futureMeta, "future")
|
||||
}
|
||||
|
||||
// InternalTest dispatches all internal tests
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("PutLarge", func(t *testing.T) {
|
||||
if *UploadKilobytes <= 0 {
|
||||
t.Skip("-upload-kilobytes is not set")
|
||||
}
|
||||
testPutLarge(t, f, *UploadKilobytes)
|
||||
})
|
||||
t.Run("ChunkNameFormat", func(t *testing.T) {
|
||||
testChunkNameFormat(t, f)
|
||||
})
|
||||
t.Run("SmallFileInternals", func(t *testing.T) {
|
||||
testSmallFileInternals(t, f)
|
||||
})
|
||||
t.Run("PreventCorruption", func(t *testing.T) {
|
||||
testPreventCorruption(t, f)
|
||||
})
|
||||
t.Run("ChunkNumberOverflow", func(t *testing.T) {
|
||||
testChunkNumberOverflow(t, f)
|
||||
})
|
||||
t.Run("MetadataInput", func(t *testing.T) {
|
||||
testMetadataInput(t, f)
|
||||
})
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
@@ -1,58 +0,0 @@
|
||||
// Test the Chunker filesystem interface
|
||||
package chunker_test
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/all" // for integration tests
|
||||
"github.com/rclone/rclone/backend/chunker"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// Command line flags
|
||||
var (
|
||||
// Invalid characters are not supported by some remotes, eg. Mailru.
|
||||
// We enable testing with invalid characters when -remote is not set, so
|
||||
// chunker overlays a local directory, but invalid characters are disabled
|
||||
// by default when -remote is set, eg. when test_all runs backend tests.
|
||||
// You can still test with invalid characters using the below flag.
|
||||
UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set")
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against a concrete remote
|
||||
// set by the -remote flag. If the flag is not set, it creates a
|
||||
// dynamic chunker overlay wrapping a local temporary directory.
|
||||
func TestIntegration(t *testing.T) {
|
||||
opt := fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*chunker.Object)(nil),
|
||||
SkipBadWindowsCharacters: !*UseBadChars,
|
||||
UnimplementableObjectMethods: []string{
|
||||
"MimeType",
|
||||
"GetTier",
|
||||
"SetTier",
|
||||
},
|
||||
UnimplementableFsMethods: []string{
|
||||
"PublicLink",
|
||||
"OpenWriterAt",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
}
|
||||
if *fstest.RemoteName == "" {
|
||||
name := "TestChunker"
|
||||
opt.RemoteName = name + ":"
|
||||
tempDir := filepath.Join(os.TempDir(), "rclone-chunker-test-standard")
|
||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "chunker"},
|
||||
{Name: name, Key: "remote", Value: tempDir},
|
||||
}
|
||||
}
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
@@ -208,6 +208,21 @@ func (c *cipher) putBlock(buf []byte) {
|
||||
c.buffers.Put(buf)
|
||||
}
|
||||
|
||||
// check to see if the byte string is valid with no control characters
|
||||
// from 0x00 to 0x1F and is a valid UTF-8 string
|
||||
func checkValidString(buf []byte) error {
|
||||
for i := range buf {
|
||||
c := buf[i]
|
||||
if c >= 0x00 && c < 0x20 || c == 0x7F {
|
||||
return ErrorBadDecryptControlChar
|
||||
}
|
||||
}
|
||||
if !utf8.Valid(buf) {
|
||||
return ErrorBadDecryptUTF8
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// encodeFileName encodes a filename using a modified version of
|
||||
// standard base32 as described in RFC4648
|
||||
//
|
||||
@@ -279,6 +294,10 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = checkValidString(plaintext)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(plaintext), err
|
||||
}
|
||||
|
||||
|
||||
@@ -44,6 +44,69 @@ func TestNewNameEncryptionModeString(t *testing.T) {
|
||||
assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3")
|
||||
}
|
||||
|
||||
func TestValidString(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expected error
|
||||
}{
|
||||
{"", nil},
|
||||
{"\x01", ErrorBadDecryptControlChar},
|
||||
{"a\x02", ErrorBadDecryptControlChar},
|
||||
{"abc\x03", ErrorBadDecryptControlChar},
|
||||
{"abc\x04def", ErrorBadDecryptControlChar},
|
||||
{"\x05d", ErrorBadDecryptControlChar},
|
||||
{"\x06def", ErrorBadDecryptControlChar},
|
||||
{"\x07", ErrorBadDecryptControlChar},
|
||||
{"\x08", ErrorBadDecryptControlChar},
|
||||
{"\x09", ErrorBadDecryptControlChar},
|
||||
{"\x0A", ErrorBadDecryptControlChar},
|
||||
{"\x0B", ErrorBadDecryptControlChar},
|
||||
{"\x0C", ErrorBadDecryptControlChar},
|
||||
{"\x0D", ErrorBadDecryptControlChar},
|
||||
{"\x0E", ErrorBadDecryptControlChar},
|
||||
{"\x0F", ErrorBadDecryptControlChar},
|
||||
{"\x10", ErrorBadDecryptControlChar},
|
||||
{"\x11", ErrorBadDecryptControlChar},
|
||||
{"\x12", ErrorBadDecryptControlChar},
|
||||
{"\x13", ErrorBadDecryptControlChar},
|
||||
{"\x14", ErrorBadDecryptControlChar},
|
||||
{"\x15", ErrorBadDecryptControlChar},
|
||||
{"\x16", ErrorBadDecryptControlChar},
|
||||
{"\x17", ErrorBadDecryptControlChar},
|
||||
{"\x18", ErrorBadDecryptControlChar},
|
||||
{"\x19", ErrorBadDecryptControlChar},
|
||||
{"\x1A", ErrorBadDecryptControlChar},
|
||||
{"\x1B", ErrorBadDecryptControlChar},
|
||||
{"\x1C", ErrorBadDecryptControlChar},
|
||||
{"\x1D", ErrorBadDecryptControlChar},
|
||||
{"\x1E", ErrorBadDecryptControlChar},
|
||||
{"\x1F", ErrorBadDecryptControlChar},
|
||||
{"\x20", nil},
|
||||
{"\x7E", nil},
|
||||
{"\x7F", ErrorBadDecryptControlChar},
|
||||
{"£100", nil},
|
||||
{`hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/z.txt`, nil},
|
||||
{"£100", nil},
|
||||
// Following tests from https://secure.php.net/manual/en/reference.pcre.pattern.modifiers.php#54805
|
||||
{"a", nil}, // Valid ASCII
|
||||
{"\xc3\xb1", nil}, // Valid 2 Octet Sequence
|
||||
{"\xc3\x28", ErrorBadDecryptUTF8}, // Invalid 2 Octet Sequence
|
||||
{"\xa0\xa1", ErrorBadDecryptUTF8}, // Invalid Sequence Identifier
|
||||
{"\xe2\x82\xa1", nil}, // Valid 3 Octet Sequence
|
||||
{"\xe2\x28\xa1", ErrorBadDecryptUTF8}, // Invalid 3 Octet Sequence (in 2nd Octet)
|
||||
{"\xe2\x82\x28", ErrorBadDecryptUTF8}, // Invalid 3 Octet Sequence (in 3rd Octet)
|
||||
{"\xf0\x90\x8c\xbc", nil}, // Valid 4 Octet Sequence
|
||||
{"\xf0\x28\x8c\xbc", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 2nd Octet)
|
||||
{"\xf0\x90\x28\xbc", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 3rd Octet)
|
||||
{"\xf0\x28\x8c\x28", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 4th Octet)
|
||||
{"\xf8\xa1\xa1\xa1\xa1", ErrorBadDecryptUTF8}, // Valid 5 Octet Sequence (but not Unicode!)
|
||||
{"\xfc\xa1\xa1\xa1\xa1\xa1", ErrorBadDecryptUTF8}, // Valid 6 Octet Sequence (but not Unicode!)
|
||||
} {
|
||||
actual := checkValidString([]byte(test.in))
|
||||
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeFileName(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
@@ -147,6 +210,8 @@ func TestDecryptSegment(t *testing.T) {
|
||||
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
{c.encryptSegment("\x01"), ErrorBadDecryptControlChar},
|
||||
{c.encryptSegment("\xc3\x28"), ErrorBadDecryptUTF8},
|
||||
} {
|
||||
actual, actualErr := c.decryptSegment(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
@@ -640,16 +705,16 @@ var (
|
||||
|
||||
// Test test infrastructure first!
|
||||
func TestRandomSource(t *testing.T) {
|
||||
source := newRandomSource(1e8)
|
||||
sink := newRandomSource(1e8)
|
||||
source := newRandomSource(1E8)
|
||||
sink := newRandomSource(1E8)
|
||||
n, err := io.Copy(sink, source)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(1e8), n)
|
||||
assert.Equal(t, int64(1E8), n)
|
||||
|
||||
source = newRandomSource(1e8)
|
||||
source = newRandomSource(1E8)
|
||||
buf := make([]byte, 16)
|
||||
_, _ = source.Read(buf)
|
||||
sink = newRandomSource(1e8)
|
||||
sink = newRandomSource(1E8)
|
||||
_, err = io.Copy(sink, source)
|
||||
assert.Error(t, err, "Error in stream")
|
||||
}
|
||||
@@ -689,23 +754,23 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
|
||||
}
|
||||
|
||||
func TestEncryptDecrypt1(t *testing.T) {
|
||||
testEncryptDecrypt(t, 1, 1e7)
|
||||
testEncryptDecrypt(t, 1, 1E7)
|
||||
}
|
||||
|
||||
func TestEncryptDecrypt32(t *testing.T) {
|
||||
testEncryptDecrypt(t, 32, 1e8)
|
||||
testEncryptDecrypt(t, 32, 1E8)
|
||||
}
|
||||
|
||||
func TestEncryptDecrypt4096(t *testing.T) {
|
||||
testEncryptDecrypt(t, 4096, 1e8)
|
||||
testEncryptDecrypt(t, 4096, 1E8)
|
||||
}
|
||||
|
||||
func TestEncryptDecrypt65536(t *testing.T) {
|
||||
testEncryptDecrypt(t, 65536, 1e8)
|
||||
testEncryptDecrypt(t, 65536, 1E8)
|
||||
}
|
||||
|
||||
func TestEncryptDecrypt65537(t *testing.T) {
|
||||
testEncryptDecrypt(t, 65537, 1e8)
|
||||
testEncryptDecrypt(t, 65537, 1E8)
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -738,7 +803,7 @@ func TestEncryptData(t *testing.T) {
|
||||
} {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
|
||||
|
||||
// Check encode works
|
||||
buf := bytes.NewBuffer(test.in)
|
||||
@@ -761,7 +826,7 @@ func TestEncryptData(t *testing.T) {
|
||||
func TestNewEncrypter(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
|
||||
|
||||
z := &zeroes{}
|
||||
|
||||
@@ -788,7 +853,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
|
||||
fh, err := c.newEncrypter(in, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1E6)
|
||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||
assert.Equal(t, int64(32), n)
|
||||
}
|
||||
@@ -820,7 +885,7 @@ func (c *closeDetector) Close() error {
|
||||
func TestNewDecrypter(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
|
||||
|
||||
cd := newCloseDetector(bytes.NewBuffer(file0))
|
||||
fh, err := c.newDecrypter(cd)
|
||||
@@ -871,7 +936,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
||||
fh, err := c.newDecrypter(in)
|
||||
assert.NoError(t, err)
|
||||
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1E6)
|
||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||
assert.Equal(t, int64(16), n)
|
||||
}
|
||||
|
||||
@@ -63,7 +63,6 @@ func init() {
|
||||
Name: "password",
|
||||
Help: "Password or pass phrase for encryption.",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password2",
|
||||
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||
|
||||
@@ -10,7 +10,6 @@ package drive
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -33,7 +32,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -49,8 +47,6 @@ import (
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
const enc = encodings.Drive
|
||||
|
||||
// Constants
|
||||
const (
|
||||
rcloneClientID = "202264815644.apps.googleusercontent.com"
|
||||
@@ -160,7 +156,6 @@ func init() {
|
||||
Description: "Google Drive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
ctx := context.TODO()
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -182,7 +177,7 @@ func init() {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
}
|
||||
err = configTeamDrive(ctx, opt, m, name)
|
||||
err = configTeamDrive(opt, m, name)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure team drive: %v", err)
|
||||
}
|
||||
@@ -214,15 +209,7 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: `ID of the root folder
|
||||
Leave blank normally.
|
||||
|
||||
Fill in to access "Computers" folders (see docs), or for rclone to use
|
||||
a non root folder as its starting point.
|
||||
|
||||
Note that if this is blank, the first time rclone runs it will fill it
|
||||
in with the ID of the root folder.
|
||||
`,
|
||||
Help: "ID of the root folder\nLeave blank normally.\nFill in to access \"Computers\" folders. (see docs).",
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
@@ -326,17 +313,6 @@ Photos folder" option in your google drive settings. You can then copy
|
||||
or move the photos locally and use the date the image was taken
|
||||
(created) set as the modification date.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_shared_date",
|
||||
Default: false,
|
||||
Help: `Use date file was shared instead of modified date.
|
||||
|
||||
Note that, as with "--drive-use-created-date", this flag may have
|
||||
unexpected consequences when uploading/downloading files.
|
||||
|
||||
If both this flag and "--drive-use-created-date" are set, the created
|
||||
date is used.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Default: 1000,
|
||||
@@ -422,23 +398,9 @@ older versions that have been set to keep forever.`,
|
||||
|
||||
This can be useful if you wish to do a server side copy between two
|
||||
different Google drives. Note that this isn't enabled by default
|
||||
because it isn't easy to tell if it will work between any two
|
||||
because it isn't easy to tell if it will work beween any two
|
||||
configurations.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_http2",
|
||||
Default: true,
|
||||
Help: `Disable drive using http2
|
||||
|
||||
There is currently an unsolved issue with the google drive backend and
|
||||
HTTP/2. HTTP/2 is therefore disabled by default for the drive backend
|
||||
but can be re-enabled here. When the issue is solved this flag will
|
||||
be removed.
|
||||
|
||||
See: https://github.com/rclone/rclone/issues/3631
|
||||
|
||||
`,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
|
||||
@@ -474,7 +436,6 @@ type Options struct {
|
||||
ImportExtensions string `config:"import_formats"`
|
||||
AllowImportNameChange bool `config:"allow_import_name_change"`
|
||||
UseCreatedDate bool `config:"use_created_date"`
|
||||
UseSharedDate bool `config:"use_shared_date"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
AlternateExport bool `config:"alternate_export"`
|
||||
@@ -487,7 +448,6 @@ type Options struct {
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
PacerBurst int `config:"pacer_burst"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
DisableHTTP2 bool `config:"disable_http2"`
|
||||
}
|
||||
|
||||
// Fs represents a remote drive server
|
||||
@@ -601,23 +561,6 @@ func containsString(slice []string, s string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// getRootID returns the canonical ID for the "root" ID
|
||||
func (f *Fs) getRootID() (string, error) {
|
||||
var info *drive.File
|
||||
var err error
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = f.svc.Files.Get("root").
|
||||
Fields("id").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "couldn't find root directory ID")
|
||||
}
|
||||
return info.Id, nil
|
||||
}
|
||||
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
@@ -655,10 +598,11 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
}
|
||||
var stems []string
|
||||
if title != "" {
|
||||
searchTitle := enc.FromStandardName(title)
|
||||
// Escaping the backslash isn't documented but seems to work
|
||||
searchTitle = strings.Replace(searchTitle, `\`, `\\`, -1)
|
||||
searchTitle := strings.Replace(title, `\`, `\\`, -1)
|
||||
searchTitle = strings.Replace(searchTitle, `'`, `\'`, -1)
|
||||
// Convert / to / for search
|
||||
searchTitle = strings.Replace(searchTitle, "/", "/", -1)
|
||||
|
||||
var titleQuery bytes.Buffer
|
||||
_, _ = fmt.Fprintf(&titleQuery, "(name='%s'", searchTitle)
|
||||
@@ -706,9 +650,6 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
if f.opt.AuthOwnerOnly {
|
||||
fields += ",owners"
|
||||
}
|
||||
if f.opt.UseSharedDate {
|
||||
fields += ",sharedWithMeTime"
|
||||
}
|
||||
if f.opt.SkipChecksumGphotos {
|
||||
fields += ",spaces"
|
||||
}
|
||||
@@ -722,16 +663,18 @@ OUTER:
|
||||
for {
|
||||
var files *drive.FileList
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
files, err = list.Fields(googleapi.Field(fields)).Context(ctx).Do()
|
||||
files, err = list.Fields(googleapi.Field(fields)).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "couldn't list directory")
|
||||
}
|
||||
for _, item := range files.Files {
|
||||
item.Name = enc.ToStandardName(item.Name)
|
||||
// Convert / to / for listing purposes
|
||||
item.Name = strings.Replace(item.Name, "/", "/", -1)
|
||||
// Check the case of items is correct since
|
||||
// the `=` operator is case insensitive.
|
||||
|
||||
if title != "" && title != item.Name {
|
||||
found := false
|
||||
for _, stem := range stems {
|
||||
@@ -835,7 +778,7 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
|
||||
}
|
||||
|
||||
// Figure out if the user wants to use a team drive
|
||||
func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
|
||||
func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
|
||||
// Stop if we are running non-interactive config
|
||||
if fs.Config.AutoConfirm {
|
||||
return nil
|
||||
@@ -845,7 +788,7 @@ func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name
|
||||
} else {
|
||||
fmt.Printf("Change current team drive ID %q?\n", opt.TeamDriveID)
|
||||
}
|
||||
if !config.Confirm(false) {
|
||||
if !config.Confirm() {
|
||||
return nil
|
||||
}
|
||||
client, err := createOAuthClient(opt, name, m)
|
||||
@@ -863,7 +806,7 @@ func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name
|
||||
for {
|
||||
var teamDrives *drive.TeamDriveList
|
||||
err = newPacer(opt).Call(func() (bool, error) {
|
||||
teamDrives, err = listTeamDrives.Context(ctx).Do()
|
||||
teamDrives, err = listTeamDrives.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -896,18 +839,6 @@ func newPacer(opt *Options) *fs.Pacer {
|
||||
return fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst)))
|
||||
}
|
||||
|
||||
// getClient makes an http client according to the options
|
||||
func getClient(opt *Options) *http.Client {
|
||||
t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) {
|
||||
if opt.DisableHTTP2 {
|
||||
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
|
||||
}
|
||||
})
|
||||
return &http.Client{
|
||||
Transport: t,
|
||||
}
|
||||
}
|
||||
|
||||
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
|
||||
scopes := driveScopes(opt.Scope)
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
|
||||
@@ -917,7 +848,7 @@ func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client
|
||||
if opt.Impersonate != "" {
|
||||
conf.Subject = opt.Impersonate
|
||||
}
|
||||
ctxWithSpecialClient := oauthutil.Context(getClient(opt))
|
||||
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
|
||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||
}
|
||||
|
||||
@@ -939,7 +870,7 @@ func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Cli
|
||||
return nil, errors.Wrap(err, "failed to create oauth client from service account")
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(name, m, driveConfig, getClient(opt))
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, m, driveConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create oauth client")
|
||||
}
|
||||
@@ -1036,25 +967,15 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// set root folder for a team drive or query the user root folder
|
||||
if opt.RootFolderID != "" {
|
||||
// override root folder if set or cached in the config
|
||||
f.rootFolderID = opt.RootFolderID
|
||||
} else if f.isTeamDrive {
|
||||
if f.isTeamDrive {
|
||||
f.rootFolderID = f.opt.TeamDriveID
|
||||
} else {
|
||||
// Look up the root ID and cache it in the config
|
||||
rootID, err := f.getRootID()
|
||||
if err != nil {
|
||||
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
// 404 means that this scope does not have permission to get the
|
||||
// root so just use "root"
|
||||
rootID = "root"
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
f.rootFolderID = rootID
|
||||
m.Set("root_folder_id", rootID)
|
||||
f.rootFolderID = "root"
|
||||
}
|
||||
|
||||
// override root folder if set in the config
|
||||
if opt.RootFolderID != "" {
|
||||
f.rootFolderID = opt.RootFolderID
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, f.rootFolderID, f)
|
||||
@@ -1110,8 +1031,6 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
|
||||
modifiedDate := info.ModifiedTime
|
||||
if f.opt.UseCreatedDate {
|
||||
modifiedDate = info.CreatedTime
|
||||
} else if f.opt.UseSharedDate && info.SharedWithMeTime != "" {
|
||||
modifiedDate = info.SharedWithMeTime
|
||||
}
|
||||
size := info.Size
|
||||
if f.opt.SizeAsQuota {
|
||||
@@ -1290,7 +1209,6 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
// fmt.Println("Making", path)
|
||||
// Define the metadata for the directory we are going to create.
|
||||
createInfo := &drive.File{
|
||||
@@ -1480,14 +1398,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if iErr != nil {
|
||||
return nil, iErr
|
||||
}
|
||||
// If listing the root of a teamdrive and got no entries,
|
||||
// double check we have access
|
||||
if f.isTeamDrive && len(entries) == 0 && f.root == "" && dir == "" {
|
||||
err = f.teamDriveOK(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
@@ -1546,10 +1456,6 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list
|
||||
listRSlices{dirs, paths}.Sort()
|
||||
var iErr error
|
||||
_, err := f.list(ctx, dirs, "", false, false, false, func(item *drive.File) bool {
|
||||
// shared with me items have no parents when at the root
|
||||
if f.opt.SharedWithMe && len(item.Parents) == 0 && len(paths) == 1 && paths[0] == "" {
|
||||
item.Parents = dirs
|
||||
}
|
||||
for _, parent := range item.Parents {
|
||||
// only handle parents that are in the requested dirs list
|
||||
i := sort.SearchStrings(dirs, parent)
|
||||
@@ -1618,6 +1524,20 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if directoryID == "root" {
|
||||
var info *drive.File
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = f.svc.Files.Get("root").
|
||||
Fields("id").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
directoryID = info.Id
|
||||
}
|
||||
|
||||
mu := sync.Mutex{} // protects in and overflow
|
||||
wg := sync.WaitGroup{}
|
||||
@@ -1625,7 +1545,6 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
out := make(chan error, fs.Config.Checkers)
|
||||
list := walk.NewListRHelper(callback)
|
||||
overflow := []listREntry{}
|
||||
listed := 0
|
||||
|
||||
cb := func(entry fs.DirEntry) error {
|
||||
mu.Lock()
|
||||
@@ -1638,7 +1557,6 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
overflow = append(overflow, listREntry{d.ID(), d.Remote()})
|
||||
}
|
||||
}
|
||||
listed++
|
||||
return list.Add(entry)
|
||||
}
|
||||
|
||||
@@ -1695,21 +1613,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
return err
|
||||
}
|
||||
|
||||
err = list.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If listing the root of a teamdrive and got no entries,
|
||||
// double check we have access
|
||||
if f.isTeamDrive && listed == 0 && f.root == "" && dir == "" {
|
||||
err = f.teamDriveOK(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// itemToDirEntry converts a drive.File to a fs.DirEntry.
|
||||
@@ -1740,7 +1644,6 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
|
||||
return nil, err
|
||||
}
|
||||
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
// Define the metadata for the file we are going to create.
|
||||
createInfo := &drive.File{
|
||||
Name: leaf,
|
||||
@@ -1831,7 +1734,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
}
|
||||
} else {
|
||||
// Upload the file in chunks
|
||||
info, err = f.Upload(ctx, in, size, srcMimeType, "", remote, createInfo)
|
||||
info, err = f.Upload(in, size, srcMimeType, "", remote, createInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -2072,7 +1975,7 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
// CleanUp empties the trash
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
|
||||
err := f.svc.Files.EmptyTrash().Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
|
||||
@@ -2082,37 +1985,16 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// teamDriveOK checks to see if we can access the team drive
|
||||
func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
|
||||
if !f.isTeamDrive {
|
||||
return nil
|
||||
}
|
||||
var td *drive.Drive
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
td, err = f.svc.Drives.Get(f.opt.TeamDriveID).Fields("name,id,capabilities,createdTime,restrictions").Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get Team/Shared Drive info")
|
||||
}
|
||||
fs.Debugf(f, "read info from team drive %q", td.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if f.isTeamDrive {
|
||||
err := f.teamDriveOK(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Teamdrives don't appear to have a usage API so just return empty
|
||||
return &fs.Usage{}, nil
|
||||
}
|
||||
var about *drive.About
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
about, err = f.svc.About.Get().Fields("storageQuota").Context(ctx).Do()
|
||||
about, err = f.svc.About.Get().Fields("storageQuota").Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -2371,7 +2253,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
}
|
||||
}
|
||||
fs.Debugf(f, "Checking for changes on remote")
|
||||
startPageToken, err = f.changeNotifyRunner(ctx, notifyFunc, startPageToken)
|
||||
startPageToken, err = f.changeNotifyRunner(notifyFunc, startPageToken)
|
||||
if err != nil {
|
||||
fs.Infof(f, "Change notify listener failure: %s", err)
|
||||
}
|
||||
@@ -2382,11 +2264,9 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
func (f *Fs) changeNotifyStartPageToken() (pageToken string, err error) {
|
||||
var startPageToken *drive.StartPageToken
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
changes := f.svc.Changes.GetStartPageToken().SupportsAllDrives(true)
|
||||
if f.isTeamDrive {
|
||||
changes.DriveId(f.opt.TeamDriveID)
|
||||
}
|
||||
startPageToken, err = changes.Do()
|
||||
startPageToken, err = f.svc.Changes.GetStartPageToken().
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -2395,7 +2275,7 @@ func (f *Fs) changeNotifyStartPageToken() (pageToken string, err error) {
|
||||
return startPageToken.StartPageToken, nil
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), startPageToken string) (newStartPageToken string, err error) {
|
||||
func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), startPageToken string) (newStartPageToken string, err error) {
|
||||
pageToken := startPageToken
|
||||
for {
|
||||
var changeList *drive.ChangeList
|
||||
@@ -2409,13 +2289,9 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
changesCall.SupportsAllDrives(true)
|
||||
changesCall.IncludeItemsFromAllDrives(true)
|
||||
if f.isTeamDrive {
|
||||
changesCall.DriveId(f.opt.TeamDriveID)
|
||||
changesCall.TeamDriveId(f.opt.TeamDriveID)
|
||||
}
|
||||
// If using appDataFolder then need to add Spaces
|
||||
if f.rootFolderID == "appDataFolder" {
|
||||
changesCall.Spaces("appDataFolder")
|
||||
}
|
||||
changeList, err = changesCall.Context(ctx).Do()
|
||||
changeList, err = changesCall.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -2439,7 +2315,6 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
|
||||
// find the new path
|
||||
if change.File != nil {
|
||||
change.File.Name = enc.ToStandardName(change.File.Name)
|
||||
changeType := fs.EntryDirectory
|
||||
if change.File.MimeType != driveFolderType {
|
||||
changeType = fs.EntryObject
|
||||
@@ -2624,7 +2499,7 @@ func (o *baseObject) Storable() bool {
|
||||
|
||||
// httpResponse gets an http.Response object for the object
|
||||
// using the url and method passed in
|
||||
func (o *baseObject) httpResponse(ctx context.Context, url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
|
||||
func (o *baseObject) httpResponse(url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
|
||||
if url == "" {
|
||||
return nil, nil, errors.New("forbidden to download - check sharing permission")
|
||||
}
|
||||
@@ -2632,7 +2507,6 @@ func (o *baseObject) httpResponse(ctx context.Context, url, method string, optio
|
||||
if err != nil {
|
||||
return req, nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
if o.bytes == 0 {
|
||||
// Don't supply range requests for 0 length objects as they always fail
|
||||
@@ -2703,8 +2577,8 @@ func isGoogleError(err error, what string) bool {
|
||||
}
|
||||
|
||||
// open a url for reading
|
||||
func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
_, res, err := o.httpResponse(ctx, url, "GET", options)
|
||||
func (o *baseObject) open(url string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
_, res, err := o.httpResponse(url, "GET", options)
|
||||
if err != nil {
|
||||
if isGoogleError(err, "cannotDownloadAbusiveFile") {
|
||||
if o.fs.opt.AcknowledgeAbuse {
|
||||
@@ -2715,7 +2589,7 @@ func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOpt
|
||||
url += "?"
|
||||
}
|
||||
url += "acknowledgeAbuse=true"
|
||||
_, res, err = o.httpResponse(ctx, url, "GET", options)
|
||||
_, res, err = o.httpResponse(url, "GET", options)
|
||||
} else {
|
||||
err = errors.Wrap(err, "Use the --drive-acknowledge-abuse flag to download this file")
|
||||
}
|
||||
@@ -2744,7 +2618,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
o.v2Download = false
|
||||
}
|
||||
}
|
||||
return o.baseObject.open(ctx, o.url, options...)
|
||||
return o.baseObject.open(o.url, options...)
|
||||
}
|
||||
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Update the size with what we are reading as it can change from
|
||||
@@ -2769,7 +2643,7 @@ func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in
|
||||
if offset != 0 {
|
||||
return nil, errors.New("partial downloads are not supported while exporting Google Documents")
|
||||
}
|
||||
in, err = o.baseObject.open(ctx, o.url, options...)
|
||||
in, err = o.baseObject.open(o.url, options...)
|
||||
if in != nil {
|
||||
in = &openDocumentFile{o: o, in: in}
|
||||
}
|
||||
@@ -2804,7 +2678,7 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
|
||||
return ioutil.NopCloser(bytes.NewReader(data)), nil
|
||||
}
|
||||
|
||||
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
|
||||
func (o *baseObject) update(updateInfo *drive.File, uploadMimeType string, in io.Reader,
|
||||
src fs.ObjectInfo) (info *drive.File, err error) {
|
||||
// Make the API request to upload metadata and file data.
|
||||
size := src.Size()
|
||||
@@ -2822,7 +2696,7 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
|
||||
return
|
||||
}
|
||||
// Upload the file in chunks
|
||||
return o.fs.Upload(ctx, in, size, uploadMimeType, o.id, o.remote, updateInfo)
|
||||
return o.fs.Upload(in, size, uploadMimeType, o.id, o.remote, updateInfo)
|
||||
}
|
||||
|
||||
// Update the already existing object
|
||||
@@ -2836,7 +2710,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
MimeType: srcMimeType,
|
||||
ModifiedTime: src.ModTime(ctx).Format(timeFormatOut),
|
||||
}
|
||||
info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src)
|
||||
info, err := o.baseObject.update(updateInfo, srcMimeType, in, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2873,7 +2747,7 @@ func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.Object
|
||||
}
|
||||
updateInfo.MimeType = importMimeType
|
||||
|
||||
info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src)
|
||||
info, err := o.baseObject.update(updateInfo, srcMimeType, in, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@
|
||||
package drive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -51,7 +50,7 @@ type resumableUpload struct {
|
||||
}
|
||||
|
||||
// Upload the io.Reader in of size bytes with contentType and info
|
||||
func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
|
||||
func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
|
||||
params := url.Values{
|
||||
"alt": {"json"},
|
||||
"uploadType": {"resumable"},
|
||||
@@ -82,7 +81,6 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
googleapi.Expand(req.URL, map[string]string{
|
||||
"fileId": fileID,
|
||||
})
|
||||
@@ -108,13 +106,12 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
|
||||
MediaType: contentType,
|
||||
ContentLength: size,
|
||||
}
|
||||
return rx.Upload(ctx)
|
||||
return rx.Upload()
|
||||
}
|
||||
|
||||
// Make an http.Request for the range passed in
|
||||
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request {
|
||||
func (rx *resumableUpload) makeRequest(start int64, body io.ReadSeeker, reqSize int64) *http.Request {
|
||||
req, _ := http.NewRequest("POST", rx.URI, body)
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.ContentLength = reqSize
|
||||
if reqSize != 0 {
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
|
||||
@@ -132,8 +129,8 @@ var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
|
||||
// Query drive for the amount transferred so far
|
||||
//
|
||||
// If error is nil, then start should be valid
|
||||
func (rx *resumableUpload) transferStatus(ctx context.Context) (start int64, err error) {
|
||||
req := rx.makeRequest(ctx, 0, nil, 0)
|
||||
func (rx *resumableUpload) transferStatus() (start int64, err error) {
|
||||
req := rx.makeRequest(0, nil, 0)
|
||||
res, err := rx.f.client.Do(req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@@ -160,9 +157,9 @@ func (rx *resumableUpload) transferStatus(ctx context.Context) (start int64, err
|
||||
}
|
||||
|
||||
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
|
||||
func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
|
||||
func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
|
||||
_, _ = chunk.Seek(0, io.SeekStart)
|
||||
req := rx.makeRequest(ctx, start, chunk, chunkSize)
|
||||
req := rx.makeRequest(start, chunk, chunkSize)
|
||||
res, err := rx.f.client.Do(req)
|
||||
if err != nil {
|
||||
return 599, err
|
||||
@@ -195,7 +192,7 @@ func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
// It retries each chunk using the pacer and --low-level-retries
|
||||
func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
|
||||
func (rx *resumableUpload) Upload() (*drive.File, error) {
|
||||
start := int64(0)
|
||||
var StatusCode int
|
||||
var err error
|
||||
@@ -210,7 +207,7 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
|
||||
// Transfer the chunk
|
||||
err = rx.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
|
||||
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
|
||||
StatusCode, err = rx.transferChunk(start, chunk, reqSize)
|
||||
again, err := shouldRetry(err)
|
||||
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
|
||||
again = false
|
||||
|
||||
@@ -39,13 +39,11 @@ import (
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/dropbox/dbhash"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
@@ -54,8 +52,6 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.Dropbox
|
||||
|
||||
// Constants
|
||||
const (
|
||||
rcloneClientID = "5jcck7diasz0rqy"
|
||||
@@ -106,14 +102,10 @@ var (
|
||||
// A regexp matching path names for files Dropbox ignores
|
||||
// See https://www.dropbox.com/en/help/145 - Ignored files
|
||||
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
|
||||
|
||||
// DbHashType is the hash.Type for Dropbox
|
||||
DbHashType hash.Type
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "dropbox",
|
||||
Description: "Dropbox",
|
||||
@@ -380,15 +372,14 @@ func (f *Fs) setRoot(root string) {
|
||||
// getMetadata gets the metadata for a file or directory
|
||||
func (f *Fs) getMetadata(objPath string) (entry files.IsMetadata, notFound bool, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
|
||||
Path: enc.FromStandardPath(objPath),
|
||||
})
|
||||
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{Path: objPath})
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
case files.GetMetadataAPIError:
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
|
||||
switch e.EndpointError.Path.Tag {
|
||||
case files.LookupErrorNotFound:
|
||||
notFound = true
|
||||
err = nil
|
||||
}
|
||||
@@ -475,7 +466,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
for {
|
||||
if !started {
|
||||
arg := files.ListFolderArg{
|
||||
Path: enc.FromStandardPath(root),
|
||||
Path: root,
|
||||
Recursive: false,
|
||||
}
|
||||
if root == "/" {
|
||||
@@ -488,7 +479,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
case files.ListFolderAPIError:
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
|
||||
switch e.EndpointError.Path.Tag {
|
||||
case files.LookupErrorNotFound:
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
@@ -525,7 +517,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
|
||||
// Only the last element is reliably cased in PathDisplay
|
||||
entryPath := metadata.PathDisplay
|
||||
leaf := enc.ToStandardName(path.Base(entryPath))
|
||||
leaf := path.Base(entryPath)
|
||||
remote := path.Join(dir, leaf)
|
||||
if folderInfo != nil {
|
||||
d := fs.NewDir(remote, time.Now())
|
||||
@@ -583,7 +575,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
|
||||
// create it
|
||||
arg2 := files.CreateFolderArg{
|
||||
Path: enc.FromStandardPath(root),
|
||||
Path: root,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.srv.CreateFolderV2(&arg2)
|
||||
@@ -609,7 +601,6 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
|
||||
root = enc.FromStandardPath(root)
|
||||
// check directory empty
|
||||
arg := files.ListFolderArg{
|
||||
Path: root,
|
||||
@@ -666,12 +657,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// Copy
|
||||
arg := files.RelocationArg{
|
||||
RelocationPath: files.RelocationPath{
|
||||
FromPath: enc.FromStandardPath(srcObj.remotePath()),
|
||||
ToPath: enc.FromStandardPath(dstObj.remotePath()),
|
||||
},
|
||||
}
|
||||
arg := files.RelocationArg{}
|
||||
arg.FromPath = srcObj.remotePath()
|
||||
arg.ToPath = dstObj.remotePath()
|
||||
var err error
|
||||
var result *files.RelocationResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -703,9 +691,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
func (f *Fs) Purge(ctx context.Context) (err error) {
|
||||
// Let dropbox delete the filesystem tree
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.srv.DeleteV2(&files.DeleteArg{
|
||||
Path: enc.FromStandardPath(f.slashRoot),
|
||||
})
|
||||
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: f.slashRoot})
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
@@ -734,12 +720,9 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// Do the move
|
||||
arg := files.RelocationArg{
|
||||
RelocationPath: files.RelocationPath{
|
||||
FromPath: enc.FromStandardPath(srcObj.remotePath()),
|
||||
ToPath: enc.FromStandardPath(dstObj.remotePath()),
|
||||
},
|
||||
}
|
||||
arg := files.RelocationArg{}
|
||||
arg.FromPath = srcObj.remotePath()
|
||||
arg.ToPath = dstObj.remotePath()
|
||||
var err error
|
||||
var result *files.RelocationResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -764,7 +747,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
absPath := enc.FromStandardPath(path.Join(f.slashRoot, remote))
|
||||
absPath := "/" + path.Join(f.Root(), remote)
|
||||
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
|
||||
createArg := sharing.CreateSharedLinkWithSettingsArg{
|
||||
Path: absPath,
|
||||
@@ -775,8 +758,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
return shouldRetry(err)
|
||||
})
|
||||
|
||||
if err != nil && strings.Contains(err.Error(),
|
||||
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
|
||||
if err != nil && strings.Contains(err.Error(), sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
|
||||
fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
|
||||
listArg := sharing.ListSharedLinksArg{
|
||||
Path: absPath,
|
||||
@@ -838,12 +820,9 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
// ...apparently not necessary
|
||||
|
||||
// Do the move
|
||||
arg := files.RelocationArg{
|
||||
RelocationPath: files.RelocationPath{
|
||||
FromPath: enc.FromStandardPath(srcPath),
|
||||
ToPath: enc.FromStandardPath(dstPath),
|
||||
},
|
||||
}
|
||||
arg := files.RelocationArg{}
|
||||
arg.FromPath = srcPath
|
||||
arg.ToPath = dstPath
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.srv.MoveV2(&arg)
|
||||
return shouldRetry(err)
|
||||
@@ -884,7 +863,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(DbHashType)
|
||||
return hash.Set(hash.Dropbox)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -909,7 +888,7 @@ func (o *Object) Remote() string {
|
||||
|
||||
// Hash returns the dropbox special hash
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != DbHashType {
|
||||
if t != hash.Dropbox {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
err := o.readMetaData()
|
||||
@@ -998,10 +977,7 @@ func (o *Object) Storable() bool {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
arg := files.DownloadArg{
|
||||
Path: enc.FromStandardPath(o.remotePath()),
|
||||
ExtraHeaders: headers,
|
||||
}
|
||||
arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, in, err = o.fs.srv.Download(&arg)
|
||||
return shouldRetry(err)
|
||||
@@ -1010,7 +986,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
switch e := err.(type) {
|
||||
case files.DownloadAPIError:
|
||||
// Don't attempt to retry copyright violation errors
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
|
||||
if e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
|
||||
return nil, fserrors.NoRetryError(err)
|
||||
}
|
||||
}
|
||||
@@ -1128,9 +1104,10 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
remote := o.remotePath()
|
||||
if ignoredFiles.MatchString(remote) {
|
||||
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
||||
fs.Logf(o, "File name disallowed - not uploading")
|
||||
return nil
|
||||
}
|
||||
commitInfo := files.NewCommitInfo(enc.FromStandardPath(o.remotePath()))
|
||||
commitInfo := files.NewCommitInfo(o.remotePath())
|
||||
commitInfo.Mode.Tag = "overwrite"
|
||||
// The Dropbox API only accepts timestamps in UTC with second precision.
|
||||
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
|
||||
@@ -1155,9 +1132,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
|
||||
Path: enc.FromStandardPath(o.remotePath()),
|
||||
})
|
||||
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{Path: o.remotePath()})
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
|
||||
@@ -32,7 +32,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
|
||||
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
|
||||
|
||||
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
|
||||
func (f *Fs) getDownloadToken(url string) (*GetTokenResponse, error) {
|
||||
request := DownloadRequest{
|
||||
URL: url,
|
||||
Single: 1,
|
||||
@@ -44,7 +44,7 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
|
||||
|
||||
var token GetTokenResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
|
||||
resp, err := f.rest.CallJSON(&opts, &request, &token)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -72,7 +72,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
|
||||
|
||||
var sharedFiles SharedFolderResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, &sharedFiles)
|
||||
resp, err := f.rest.CallJSON(&opts, nil, &sharedFiles)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -88,7 +88,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesList, err error) {
|
||||
func (f *Fs) listFiles(directoryID int) (filesList *FilesList, err error) {
|
||||
// fs.Debugf(f, "Requesting files for dir `%s`", directoryID)
|
||||
request := ListFilesRequest{
|
||||
FolderID: directoryID,
|
||||
@@ -101,21 +101,17 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
|
||||
|
||||
filesList = &FilesList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, filesList)
|
||||
resp, err := f.rest.CallJSON(&opts, &request, filesList)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
for i := range filesList.Items {
|
||||
item := &filesList.Items[i]
|
||||
item.Filename = enc.ToStandardName(item.Filename)
|
||||
}
|
||||
|
||||
return filesList, nil
|
||||
}
|
||||
|
||||
func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *FoldersList, err error) {
|
||||
func (f *Fs) listFolders(directoryID int) (foldersList *FoldersList, err error) {
|
||||
// fs.Debugf(f, "Requesting folders for id `%s`", directoryID)
|
||||
|
||||
request := ListFolderRequest{
|
||||
@@ -129,17 +125,12 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
|
||||
|
||||
foldersList = &FoldersList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, foldersList)
|
||||
resp, err := f.rest.CallJSON(&opts, &request, foldersList)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list folders")
|
||||
}
|
||||
foldersList.Name = enc.ToStandardName(foldersList.Name)
|
||||
for i := range foldersList.SubFolders {
|
||||
folder := &foldersList.SubFolders[i]
|
||||
folder.Name = enc.ToStandardName(folder.Name)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Got FoldersList for id `%s`", directoryID)
|
||||
|
||||
@@ -162,12 +153,12 @@ func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, er
|
||||
return nil, err
|
||||
}
|
||||
|
||||
files, err := f.listFiles(ctx, folderID)
|
||||
files, err := f.listFiles(folderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folders, err := f.listFolders(ctx, folderID)
|
||||
folders, err := f.listFolders(folderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -175,6 +166,7 @@ func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, er
|
||||
entries = make([]fs.DirEntry, len(files.Items)+len(folders.SubFolders))
|
||||
|
||||
for i, item := range files.Items {
|
||||
item.Filename = restoreReservedChars(item.Filename)
|
||||
entries[i] = f.newObjectFromFile(ctx, dir, item)
|
||||
}
|
||||
|
||||
@@ -184,6 +176,7 @@ func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, er
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folder.Name = restoreReservedChars(folder.Name)
|
||||
fullPath := getRemote(dir, folder.Name)
|
||||
folderID := strconv.Itoa(folder.ID)
|
||||
|
||||
@@ -212,8 +205,8 @@ func getRemote(dir, fileName string) string {
|
||||
return dir + "/" + fileName
|
||||
}
|
||||
|
||||
func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (response *MakeFolderResponse, err error) {
|
||||
name := enc.FromStandardName(leaf)
|
||||
func (f *Fs) makeFolder(leaf string, folderID int) (response *MakeFolderResponse, err error) {
|
||||
name := replaceReservedChars(leaf)
|
||||
// fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID)
|
||||
|
||||
request := MakeFolderRequest{
|
||||
@@ -228,7 +221,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
|
||||
|
||||
response = &MakeFolderResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, response)
|
||||
resp, err := f.rest.CallJSON(&opts, &request, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -240,7 +233,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (response *GenericOKResponse, err error) {
|
||||
func (f *Fs) removeFolder(name string, folderID int) (response *GenericOKResponse, err error) {
|
||||
// fs.Debugf(f, "Removing folder with id `%s`", directoryID)
|
||||
|
||||
request := &RemoveFolderRequest{
|
||||
@@ -255,7 +248,7 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
|
||||
response = &GenericOKResponse{}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rest.CallJSON(ctx, &opts, request, response)
|
||||
resp, err = f.rest.CallJSON(&opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -270,7 +263,7 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKResponse, err error) {
|
||||
func (f *Fs) deleteFile(url string) (response *GenericOKResponse, err error) {
|
||||
request := &RemoveFileRequest{
|
||||
Files: []RmFile{
|
||||
{url},
|
||||
@@ -284,7 +277,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
|
||||
|
||||
response = &GenericOKResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
resp, err := f.rest.CallJSON(&opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -297,7 +290,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
|
||||
func (f *Fs) getUploadNode() (response *GetUploadNodeResponse, err error) {
|
||||
// fs.Debugf(f, "Requesting Upload node")
|
||||
|
||||
opts := rest.Opts{
|
||||
@@ -308,7 +301,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
|
||||
|
||||
response = &GetUploadNodeResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
|
||||
resp, err := f.rest.CallJSON(&opts, nil, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -320,10 +313,10 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
|
||||
func (f *Fs) uploadFile(in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
|
||||
// fs.Debugf(f, "Uploading File `%s`", fileName)
|
||||
|
||||
fileName = enc.FromStandardName(fileName)
|
||||
fileName = replaceReservedChars(fileName)
|
||||
|
||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||
return nil, errors.New("Invalid UploadID")
|
||||
@@ -350,7 +343,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
|
||||
}
|
||||
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, nil)
|
||||
resp, err := f.rest.CallJSON(&opts, nil, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -363,7 +356,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
|
||||
func (f *Fs) endUpload(uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
|
||||
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
|
||||
|
||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||
@@ -384,7 +377,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
|
||||
|
||||
response = &EndFileUploadResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
|
||||
resp, err := f.rest.CallJSON(&opts, nil, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
@@ -29,8 +28,6 @@ const (
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
const enc = encodings.Fichier
|
||||
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "fichier",
|
||||
@@ -77,7 +74,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
folders, err := f.listFolders(ctx, folderID)
|
||||
folders, err := f.listFolders(folderID)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
@@ -98,7 +95,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
resp, err := f.makeFolder(ctx, leaf, folderID)
|
||||
resp, err := f.makeFolder(leaf, folderID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -144,7 +141,8 @@ func (f *Fs) Features() *fs.Features {
|
||||
//
|
||||
// On Windows avoid single character remote names as they can be mixed
|
||||
// up with drive letters.
|
||||
func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
||||
func NewFs(name string, rootleaf string, config configmap.Mapper) (fs.Fs, error) {
|
||||
root := replaceReservedChars(rootleaf)
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(config, opt)
|
||||
if err != nil {
|
||||
@@ -253,7 +251,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files, err := f.listFiles(ctx, folderID)
|
||||
files, err := f.listFiles(folderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -300,13 +298,13 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if size > int64(100e9) {
|
||||
if size > int64(100E9) {
|
||||
return nil, errors.New("File too big, cant upload")
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
nodeResponse, err := f.getUploadNode(ctx)
|
||||
nodeResponse, err := f.getUploadNode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -316,12 +314,12 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
|
||||
_, err = f.uploadFile(in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileUploadResponse, err := f.endUpload(ctx, nodeResponse.ID, nodeResponse.URL)
|
||||
fileUploadResponse, err := f.endUpload(nodeResponse.ID, nodeResponse.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -348,7 +346,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
Date: time.Now().Format("2006-01-02 15:04:05"),
|
||||
Filename: link.Filename,
|
||||
Pass: 0,
|
||||
Size: fileSize,
|
||||
Size: int(fileSize),
|
||||
URL: link.Download,
|
||||
},
|
||||
}, nil
|
||||
@@ -395,7 +393,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.removeFolder(ctx, dir, folderID)
|
||||
_, err = f.removeFolder(dir, folderID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.file.Size
|
||||
return int64(o.file.Size)
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
@@ -74,8 +74,8 @@ func (o *Object) SetModTime(context.Context, time.Time) error {
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
fs.FixRangeOption(options, o.file.Size)
|
||||
downloadToken, err := o.fs.getDownloadToken(ctx, o.file.URL)
|
||||
fs.FixRangeOption(options, int64(o.file.Size))
|
||||
downloadToken, err := o.fs.getDownloadToken(o.file.URL)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -89,7 +89,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.rest.Call(ctx, &opts)
|
||||
resp, err = o.fs.rest.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -131,7 +131,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
// fs.Debugf(f, "Removing file `%s` with url `%s`", o.file.Filename, o.file.URL)
|
||||
|
||||
_, err := o.fs.deleteFile(ctx, o.file.URL)
|
||||
_, err := o.fs.deleteFile(o.file.URL)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
71
backend/fichier/replace.go
Normal file
71
backend/fichier/replace.go
Normal file
@@ -0,0 +1,71 @@
|
||||
/*
|
||||
Translate file names for 1fichier
|
||||
|
||||
1Fichier reserved characters
|
||||
|
||||
The following characters are 1Fichier reserved characters, and can't
|
||||
be used in 1Fichier folder and file names.
|
||||
|
||||
*/
|
||||
|
||||
package fichier
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// 1Fichier has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
'\'': ''', // FULLWIDTH APOSTROPHE
|
||||
'$': '$', // FULLWIDTH DOLLAR SIGN
|
||||
'`': '`', // FULLWIDTH GRAVE ACCENT
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// file names can't start with space either
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Replace reserved characters
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
24
backend/fichier/replace_test.go
Normal file
24
backend/fichier/replace_test.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package fichier
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{"\"'<>/\\$`", `"'<>/\$``},
|
||||
{" leading space", "␠leading space"},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -69,7 +69,7 @@ type SharedFolderResponse []SharedFile
|
||||
type SharedFile struct {
|
||||
Filename string `json:"filename"`
|
||||
Link string `json:"link"`
|
||||
Size int64 `json:"size"`
|
||||
Size int `json:"size"`
|
||||
}
|
||||
|
||||
// EndFileUploadResponse is the response structure of the corresponding request
|
||||
@@ -93,7 +93,7 @@ type File struct {
|
||||
Date string `json:"date"`
|
||||
Filename string `json:"filename"`
|
||||
Pass int `json:"pass"`
|
||||
Size int64 `json:"size"`
|
||||
Size int `json:"size"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
|
||||
@@ -17,14 +17,11 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
const enc = encodings.FTP
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -65,11 +62,6 @@ func init() {
|
||||
Help: "Do not verify the TLS certificate of the server",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_epsv",
|
||||
Help: "Disable using EPSV even if server advertises support",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
@@ -84,7 +76,6 @@ type Options struct {
|
||||
TLS bool `config:"tls"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
@@ -150,9 +141,6 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
||||
}
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
|
||||
}
|
||||
if f.opt.DisableEPSV {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
||||
}
|
||||
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
|
||||
@@ -307,25 +295,6 @@ func translateErrorDir(err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// entryToStandard converts an incoming ftp.Entry to Standard encoding
|
||||
func entryToStandard(entry *ftp.Entry) {
|
||||
// Skip . and .. as we don't want these encoded
|
||||
if entry.Name == "." || entry.Name == ".." {
|
||||
return
|
||||
}
|
||||
entry.Name = enc.ToStandardName(entry.Name)
|
||||
entry.Target = enc.ToStandardPath(entry.Target)
|
||||
}
|
||||
|
||||
// dirFromStandardPath returns dir in encoded form.
|
||||
func dirFromStandardPath(dir string) string {
|
||||
// Skip . and .. as we don't want these encoded
|
||||
if dir == "." || dir == ".." {
|
||||
return dir
|
||||
}
|
||||
return enc.FromStandardPath(dir)
|
||||
}
|
||||
|
||||
// findItem finds a directory entry for the name in its parent directory
|
||||
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
@@ -345,13 +314,12 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "findItem")
|
||||
}
|
||||
files, err := c.List(dirFromStandardPath(dir))
|
||||
files, err := c.List(dir)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
}
|
||||
for _, file := range files {
|
||||
entryToStandard(file)
|
||||
if file.Name == base {
|
||||
return file, nil
|
||||
}
|
||||
@@ -418,7 +386,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
resultchan := make(chan []*ftp.Entry, 1)
|
||||
errchan := make(chan error, 1)
|
||||
go func() {
|
||||
result, err := c.List(dirFromStandardPath(path.Join(f.root, dir)))
|
||||
result, err := c.List(path.Join(f.root, dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
errchan <- err
|
||||
@@ -455,7 +423,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
for i := range files {
|
||||
object := files[i]
|
||||
entryToStandard(object)
|
||||
newremote := path.Join(dir, object.Name)
|
||||
switch object.Type {
|
||||
case ftp.EntryTypeFolder:
|
||||
@@ -525,21 +492,19 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getInfo")
|
||||
}
|
||||
files, err := c.List(dirFromStandardPath(dir))
|
||||
files, err := c.List(dir)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
}
|
||||
|
||||
for i := range files {
|
||||
file := files[i]
|
||||
entryToStandard(file)
|
||||
if file.Name == base {
|
||||
if files[i].Name == base {
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: file.Size,
|
||||
ModTime: file.Time,
|
||||
IsDir: file.Type == ftp.EntryTypeFolder,
|
||||
Size: files[i].Size,
|
||||
ModTime: files[i].Time,
|
||||
IsDir: files[i].Type == ftp.EntryTypeFolder,
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
@@ -549,7 +514,6 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
||||
|
||||
// mkdir makes the directory and parents using unrooted paths
|
||||
func (f *Fs) mkdir(abspath string) error {
|
||||
abspath = path.Clean(abspath)
|
||||
if abspath == "." || abspath == "/" {
|
||||
return nil
|
||||
}
|
||||
@@ -571,7 +535,7 @@ func (f *Fs) mkdir(abspath string) error {
|
||||
if connErr != nil {
|
||||
return errors.Wrap(connErr, "mkdir")
|
||||
}
|
||||
err = c.MakeDir(dirFromStandardPath(abspath))
|
||||
err = c.MakeDir(abspath)
|
||||
f.putFtpConnection(&c, err)
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
@@ -607,7 +571,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
||||
}
|
||||
err = c.RemoveDir(dirFromStandardPath(path.Join(f.root, dir)))
|
||||
err = c.RemoveDir(path.Join(f.root, dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
return translateErrorDir(err)
|
||||
}
|
||||
@@ -628,8 +592,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, errors.Wrap(err, "Move")
|
||||
}
|
||||
err = c.Rename(
|
||||
enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
|
||||
enc.FromStandardPath(path.Join(f.root, remote)),
|
||||
path.Join(srcObj.fs.root, srcObj.remote),
|
||||
path.Join(f.root, remote),
|
||||
)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
@@ -682,8 +646,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return errors.Wrap(err, "DirMove")
|
||||
}
|
||||
err = c.Rename(
|
||||
dirFromStandardPath(srcPath),
|
||||
dirFromStandardPath(dstPath),
|
||||
srcPath,
|
||||
dstPath,
|
||||
)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
@@ -809,7 +773,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open")
|
||||
}
|
||||
fd, err := c.RetrFrom(enc.FromStandardPath(path), uint64(offset))
|
||||
fd, err := c.RetrFrom(path, uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return nil, errors.Wrap(err, "open")
|
||||
@@ -844,7 +808,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Update")
|
||||
}
|
||||
err = c.Stor(enc.FromStandardPath(path), in)
|
||||
err = c.Stor(path, in)
|
||||
if err != nil {
|
||||
_ = c.Quit() // toss this connection to avoid sync errors
|
||||
remove()
|
||||
@@ -874,7 +838,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Remove")
|
||||
}
|
||||
err = c.Delete(enc.FromStandardPath(path))
|
||||
err = c.Delete(path)
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
}
|
||||
return err
|
||||
|
||||
@@ -32,7 +32,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -69,8 +68,6 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
const enc = encodings.GoogleCloudStorage
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -352,8 +349,7 @@ func parsePath(path string) (root string) {
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return enc.FromStandardName(bucketName), enc.FromStandardPath(bucketPath)
|
||||
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the object
|
||||
@@ -378,7 +374,6 @@ func (f *Fs) setRoot(root string) {
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
var oAuthClient *http.Client
|
||||
|
||||
// Parse config into Options struct
|
||||
@@ -442,9 +437,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
// Check to see if the object exists
|
||||
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
|
||||
_, err = f.svc.Objects.Get(f.rootBucket, f.rootDirectory).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -463,7 +457,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage.Object) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -471,7 +465,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage
|
||||
if info != nil {
|
||||
o.setMetaData(info)
|
||||
} else {
|
||||
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
err := o.readMetaData() // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -482,7 +476,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object.
|
||||
@@ -510,7 +504,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
for {
|
||||
var objects *storage.Objects
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
objects, err = list.Context(ctx).Do()
|
||||
objects, err = list.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -527,7 +521,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
if !strings.HasSuffix(remote, "/") {
|
||||
continue
|
||||
}
|
||||
remote = enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
@@ -543,12 +536,11 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
}
|
||||
for _, object := range objects.Items {
|
||||
remote := enc.ToStandardPath(object.Name)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
if !strings.HasPrefix(object.Name, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", object.Name)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
remote := object.Name[len(prefix):]
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
@@ -571,12 +563,12 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
|
||||
func (f *Fs) itemToDirEntry(remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
|
||||
if isDirectory {
|
||||
d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size))
|
||||
return d, nil
|
||||
}
|
||||
o, err := f.newObjectWithInfo(ctx, remote, object)
|
||||
o, err := f.newObjectWithInfo(remote, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -587,7 +579,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
// List the objects
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -613,14 +605,14 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
for {
|
||||
var buckets *storage.Buckets
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
buckets, err = listBuckets.Context(ctx).Do()
|
||||
buckets, err = listBuckets.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, bucket := range buckets.Items {
|
||||
d := fs.NewDir(enc.ToStandardName(bucket.Name), time.Time{})
|
||||
d := fs.NewDir(bucket.Name, time.Time{})
|
||||
entries = append(entries, d)
|
||||
}
|
||||
if buckets.NextPageToken == "" {
|
||||
@@ -672,7 +664,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -739,7 +731,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
|
||||
_, err = f.svc.Objects.List(bucket).MaxResults(1).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -774,7 +766,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
}
|
||||
_, err = insertBucket.Context(ctx).Do()
|
||||
_, err = insertBucket.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}, nil)
|
||||
@@ -791,7 +783,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
}
|
||||
return f.cache.Remove(bucket, func() error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
|
||||
err = f.svc.Buckets.Delete(bucket).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
})
|
||||
@@ -836,7 +828,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
newObject, err = copyObject.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -920,10 +912,10 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
}
|
||||
|
||||
// readObjectInfo reads the definition for an object
|
||||
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
|
||||
func (o *Object) readObjectInfo() (object *storage.Object, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
|
||||
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -940,11 +932,11 @@ func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, er
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
object, err := o.readObjectInfo(ctx)
|
||||
object, err := o.readObjectInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -957,7 +949,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData(ctx)
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
// fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
@@ -975,7 +967,7 @@ func metadataFromModTime(modTime time.Time) map[string]string {
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
// read the complete existing object first
|
||||
object, err := o.readObjectInfo(ctx)
|
||||
object, err := o.readObjectInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -994,7 +986,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
newObject, err = copyObject.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1015,7 +1007,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
var res *http.Response
|
||||
@@ -1063,7 +1054,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = insertObject.Context(ctx).Do()
|
||||
newObject, err = insertObject.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1078,7 +1069,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
|
||||
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
|
||||
@@ -290,7 +290,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// fetchEndpoint gets the openid endpoint named from the Google config
|
||||
func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, err error) {
|
||||
func (f *Fs) fetchEndpoint(name string) (endpoint string, err error) {
|
||||
// Get openID config without auth
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
@@ -298,7 +298,7 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
|
||||
}
|
||||
var openIDconfig map[string]interface{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig)
|
||||
resp, err := f.unAuth.CallJSON(&opts, nil, &openIDconfig)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -316,7 +316,7 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
|
||||
|
||||
// UserInfo fetches info about the current user with oauth2
|
||||
func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err error) {
|
||||
endpoint, err := f.fetchEndpoint(ctx, "userinfo_endpoint")
|
||||
endpoint, err := f.fetchEndpoint("userinfo_endpoint")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -327,7 +327,7 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
|
||||
RootURL: endpoint,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &userInfo)
|
||||
resp, err := f.srv.CallJSON(&opts, nil, &userInfo)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -338,7 +338,7 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
|
||||
|
||||
// Disconnect kills the token and refresh token
|
||||
func (f *Fs) Disconnect(ctx context.Context) (err error) {
|
||||
endpoint, err := f.fetchEndpoint(ctx, "revocation_endpoint")
|
||||
endpoint, err := f.fetchEndpoint("revocation_endpoint")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -358,7 +358,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
|
||||
}
|
||||
var res interface{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res)
|
||||
resp, err := f.srv.CallJSON(&opts, nil, &res)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -423,7 +423,7 @@ func findID(name string) string {
|
||||
|
||||
// list the albums into an internal cache
|
||||
// FIXME cache invalidation
|
||||
func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err error) {
|
||||
func (f *Fs) listAlbums(shared bool) (all *albums, err error) {
|
||||
f.albumsMu.Lock()
|
||||
defer f.albumsMu.Unlock()
|
||||
all, ok := f.albums[shared]
|
||||
@@ -445,7 +445,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
|
||||
var result api.ListAlbums
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -482,7 +482,7 @@ type listFn func(remote string, object *api.MediaItem, isDirectory bool) error
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err error) {
|
||||
func (f *Fs) list(filter api.SearchFilter, fn listFn) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/mediaItems:search",
|
||||
@@ -494,7 +494,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
|
||||
var result api.MediaItems
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &filter, &result)
|
||||
resp, err = f.srv.CallJSON(&opts, &filter, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -543,7 +543,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *api.MediaI
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
|
||||
// List the objects
|
||||
err = f.list(ctx, filter, func(remote string, item *api.MediaItem, isDirectory bool) error {
|
||||
err = f.list(filter, func(remote string, item *api.MediaItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, prefix+remote, item, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -638,7 +638,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
|
||||
var result api.Album
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
|
||||
resp, err = f.srv.CallJSON(&opts, request, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -654,7 +654,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
|
||||
func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *api.Album, err error) {
|
||||
f.createMu.Lock()
|
||||
defer f.createMu.Unlock()
|
||||
albums, err := f.listAlbums(ctx, false)
|
||||
albums, err := f.listAlbums(false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -708,7 +708,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
return err
|
||||
}
|
||||
albumTitle := match[1]
|
||||
allAlbums, err := f.listAlbums(ctx, false)
|
||||
allAlbums, err := f.listAlbums(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -773,7 +773,7 @@ func (o *Object) Size() int64 {
|
||||
RootURL: o.downloadURL(),
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -824,7 +824,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
var item api.MediaItem
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &item)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &item)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -901,7 +901,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -954,7 +954,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var token []byte
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
@@ -986,7 +986,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
var result api.BatchCreateResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, request, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1029,7 +1029,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
// file pattern parsing
|
||||
type lister interface {
|
||||
listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error)
|
||||
listAlbums(ctx context.Context, shared bool) (all *albums, err error)
|
||||
listAlbums(shared bool) (all *albums, err error)
|
||||
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
|
||||
dirTime() time.Time
|
||||
}
|
||||
@@ -296,7 +296,7 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
|
||||
// is a prefix of another album, or actual files, or a combination of
|
||||
// the two.
|
||||
func albumsToEntries(ctx context.Context, f lister, shared bool, prefix string, albumPath string) (entries fs.DirEntries, err error) {
|
||||
albums, err := f.listAlbums(ctx, shared)
|
||||
albums, err := f.listAlbums(shared)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ func (f *testLister) listDir(ctx context.Context, prefix string, filter api.Sear
|
||||
}
|
||||
|
||||
// mock listAlbums for testing
|
||||
func (f *testLister) listAlbums(ctx context.Context, shared bool) (all *albums, err error) {
|
||||
func (f *testLister) listAlbums(shared bool) (all *albums, err error) {
|
||||
return f.albums, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -78,26 +77,6 @@ Note that this may cause rclone to confuse genuine HTML files with
|
||||
directories.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_head",
|
||||
Help: `Don't use HEAD requests to find file sizes in dir listing
|
||||
|
||||
If your site is being very slow to load then you can try this option.
|
||||
Normally rclone does a HEAD request for each potential file in a
|
||||
directory listing to:
|
||||
|
||||
- find its size
|
||||
- check it really exists
|
||||
- check to see if it is a directory
|
||||
|
||||
If you set this option, rclone will not do the HEAD request. This will mean
|
||||
|
||||
- directory listings are much quicker
|
||||
- rclone won't have the times or sizes of any files
|
||||
- some files that don't exist may be in the listing
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -107,7 +86,6 @@ If you set this option, rclone will not do the HEAD request. This will mean
|
||||
type Options struct {
|
||||
Endpoint string `config:"url"`
|
||||
NoSlash bool `config:"no_slash"`
|
||||
NoHead bool `config:"no_head"`
|
||||
Headers fs.CommaSepList `config:"headers"`
|
||||
}
|
||||
|
||||
@@ -146,7 +124,6 @@ func statusError(res *http.Response, err error) error {
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -185,7 +162,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// check to see if points to a file
|
||||
req, err := http.NewRequest("HEAD", u.String(), nil)
|
||||
if err == nil {
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
addHeaders(req, opt)
|
||||
res, err := noRedir.Do(req)
|
||||
err = statusError(res, err)
|
||||
@@ -261,7 +237,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err := o.stat(ctx)
|
||||
err := o.stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -379,7 +355,7 @@ func (f *Fs) addHeaders(req *http.Request) {
|
||||
}
|
||||
|
||||
// Read the directory passed in
|
||||
func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error) {
|
||||
func (f *Fs) readDir(dir string) (names []string, err error) {
|
||||
URL := f.url(dir)
|
||||
u, err := url.Parse(URL)
|
||||
if err != nil {
|
||||
@@ -393,7 +369,6 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "readDir failed")
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
f.addHeaders(req)
|
||||
res, err := f.httpClient.Do(req)
|
||||
if err == nil {
|
||||
@@ -433,53 +408,34 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if !strings.HasSuffix(dir, "/") && dir != "" {
|
||||
dir += "/"
|
||||
}
|
||||
names, err := f.readDir(ctx, dir)
|
||||
names, err := f.readDir(dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error listing %q", dir)
|
||||
}
|
||||
var (
|
||||
entriesMu sync.Mutex // to protect entries
|
||||
wg sync.WaitGroup
|
||||
in = make(chan string, fs.Config.Checkers)
|
||||
)
|
||||
add := func(entry fs.DirEntry) {
|
||||
entriesMu.Lock()
|
||||
entries = append(entries, entry)
|
||||
entriesMu.Unlock()
|
||||
}
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for remote := range in {
|
||||
file := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
switch err := file.stat(ctx); err {
|
||||
case nil:
|
||||
add(file)
|
||||
case fs.ErrorNotAFile:
|
||||
// ...found a directory not a file
|
||||
add(fs.NewDir(remote, timeUnset))
|
||||
default:
|
||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
for _, name := range names {
|
||||
isDir := name[len(name)-1] == '/'
|
||||
name = strings.TrimRight(name, "/")
|
||||
remote := path.Join(dir, name)
|
||||
if isDir {
|
||||
add(fs.NewDir(remote, timeUnset))
|
||||
dir := fs.NewDir(remote, timeUnset)
|
||||
entries = append(entries, dir)
|
||||
} else {
|
||||
in <- remote
|
||||
file := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
switch err = file.stat(); err {
|
||||
case nil:
|
||||
entries = append(entries, file)
|
||||
case fs.ErrorNotAFile:
|
||||
// ...found a directory not a file
|
||||
dir := fs.NewDir(remote, timeUnset)
|
||||
entries = append(entries, dir)
|
||||
default:
|
||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
close(in)
|
||||
wg.Wait()
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
@@ -536,19 +492,12 @@ func (o *Object) url() string {
|
||||
}
|
||||
|
||||
// stat updates the info field in the Object
|
||||
func (o *Object) stat(ctx context.Context) error {
|
||||
if o.fs.opt.NoHead {
|
||||
o.size = -1
|
||||
o.modTime = timeUnset
|
||||
o.contentType = fs.MimeType(ctx, o)
|
||||
return nil
|
||||
}
|
||||
func (o *Object) stat() error {
|
||||
url := o.url()
|
||||
req, err := http.NewRequest("HEAD", url, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "stat failed")
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
o.fs.addHeaders(req)
|
||||
res, err := o.fs.httpClient.Do(req)
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
@@ -597,7 +546,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open failed")
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
|
||||
// Add optional headers
|
||||
for k, v := range fs.OpenOptionHeaders(options) {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package hubic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
@@ -27,7 +26,7 @@ func newAuth(f *Fs) *auth {
|
||||
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
|
||||
const retries = 10
|
||||
for try := 1; try <= retries; try++ {
|
||||
err = a.f.getCredentials(context.TODO())
|
||||
err = a.f.getCredentials()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ package hubic
|
||||
// to be revisted after some actual experience.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -116,12 +115,11 @@ func (f *Fs) String() string {
|
||||
// getCredentials reads the OpenStack Credentials using the Hubic API
|
||||
//
|
||||
// The credentials are read into the Fs
|
||||
func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
func (f *Fs) getCredentials() (err error) {
|
||||
req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -46,26 +46,13 @@ func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
||||
// APIString returns Time string in Jottacloud API format
|
||||
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
|
||||
|
||||
// LoginToken is struct representing the login token generated in the WebUI
|
||||
type LoginToken struct {
|
||||
Username string `json:"username"`
|
||||
Realm string `json:"realm"`
|
||||
WellKnownLink string `json:"well_known_link"`
|
||||
AuthToken string `json:"auth_token"`
|
||||
}
|
||||
|
||||
// TokenJSON is the struct representing the HTTP response from OAuth2
|
||||
// providers returning a token in JSON form.
|
||||
type TokenJSON struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
RefreshExpiresIn int32 `json:"refresh_expires_in"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
IDToken string `json:"id_token"`
|
||||
NotBeforePolicy int32 `json:"not-before-policy"`
|
||||
SessionState string `json:"session_state"`
|
||||
Scope string `json:"scope"`
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
}
|
||||
|
||||
// JSON structures returned by new API
|
||||
|
||||
@@ -4,13 +4,12 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -26,7 +25,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -37,29 +36,31 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.JottaCloud
|
||||
|
||||
// Globals
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Archive"
|
||||
rootURL = "https://www.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
tokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
configDevice = "device"
|
||||
configMountpoint = "mountpoint"
|
||||
configVersion = 1
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Archive"
|
||||
rootURL = "https://www.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
tokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||
registerURL = "https://api.jottacloud.com/auth/v1/register"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
configClientID = "client_id"
|
||||
configClientSecret = "client_secret"
|
||||
configDevice = "device"
|
||||
configMountpoint = "mountpoint"
|
||||
charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app for a personal account
|
||||
oauthConfig = &oauth2.Config{
|
||||
ClientID: "jottacli",
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: tokenURL,
|
||||
TokenURL: tokenURL,
|
||||
@@ -76,40 +77,43 @@ func init() {
|
||||
Description: "JottaCloud",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
ctx := context.TODO()
|
||||
tokenString, ok := m.Get("token")
|
||||
if ok && tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !config.Confirm() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
refresh := false
|
||||
if version, ok := m.Get("configVersion"); ok {
|
||||
ver, err := strconv.Atoi(version)
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
|
||||
if config.Confirm() {
|
||||
deviceRegistration, err := registerDevice(srv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse config version - corrupted config")
|
||||
log.Fatalf("Failed to register device: %v", err)
|
||||
}
|
||||
refresh = ver != configVersion
|
||||
} else {
|
||||
refresh = true
|
||||
|
||||
m.Set(configClientID, deviceRegistration.ClientID)
|
||||
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
||||
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
|
||||
}
|
||||
|
||||
if refresh {
|
||||
fmt.Printf("Config outdated - refreshing\n")
|
||||
} else {
|
||||
tokenString, ok := m.Get("token")
|
||||
if ok && tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !config.Confirm(false) {
|
||||
return
|
||||
}
|
||||
}
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = rcloneClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = rcloneEncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
clientConfig := *fs.Config
|
||||
clientConfig.UserAgent = "JottaCli 0.6.18626 windows-amd64"
|
||||
srv := rest.NewClient(fshttp.NewClient(&clientConfig))
|
||||
fmt.Printf("Username> ")
|
||||
username := config.ReadLine()
|
||||
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
|
||||
|
||||
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
|
||||
fmt.Printf("Login Token> ")
|
||||
loginToken := config.ReadLine()
|
||||
|
||||
token, err := doAuth(ctx, srv, loginToken)
|
||||
token, err := doAuth(srv, username, password)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
}
|
||||
@@ -119,7 +123,7 @@ func init() {
|
||||
}
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm(false) {
|
||||
if config.Confirm() {
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
@@ -128,15 +132,13 @@ func init() {
|
||||
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
|
||||
device, mountpoint, err := setupMountpoint(srv, apiSrv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to setup mountpoint: %s", err)
|
||||
}
|
||||
m.Set(configDevice, device)
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
}
|
||||
|
||||
m.Set("configVersion", strconv.Itoa(configVersion))
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "md5_memory_limit",
|
||||
@@ -243,51 +245,67 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// doAuth runs the actual token request
|
||||
func doAuth(ctx context.Context, srv *rest.Client, loginTokenBase64 string) (token oauth2.Token, err error) {
|
||||
loginTokenBytes, err := base64.StdEncoding.DecodeString(loginTokenBase64)
|
||||
if err != nil {
|
||||
return token, err
|
||||
// registerDevice register a new device for use with the jottacloud API
|
||||
func registerDevice(srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
|
||||
// random generator to generate random device names
|
||||
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
randonDeviceNamePartLength := 21
|
||||
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
|
||||
for i := range randomDeviceNamePart {
|
||||
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
|
||||
}
|
||||
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
|
||||
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
|
||||
|
||||
var loginToken api.LoginToken
|
||||
decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes))
|
||||
err = decoder.Decode(&loginToken)
|
||||
if err != nil {
|
||||
return token, err
|
||||
}
|
||||
values := url.Values{}
|
||||
values.Set("device_id", randomDeviceName)
|
||||
|
||||
// we don't seem to need any data from this link but the API is not happy if skip it
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: loginToken.WellKnownLink,
|
||||
NoResponse: true,
|
||||
}
|
||||
_, err = srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return token, err
|
||||
Method: "POST",
|
||||
RootURL: registerURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
var deviceRegistration *api.DeviceRegistrationResponse
|
||||
_, err = srv.CallJSON(&opts, nil, &deviceRegistration)
|
||||
return deviceRegistration, err
|
||||
}
|
||||
|
||||
// doAuth runs the actual token request
|
||||
func doAuth(srv *rest.Client, username, password string) (token oauth2.Token, err error) {
|
||||
// prepare out token request with username and password
|
||||
values := url.Values{}
|
||||
values.Set("client_id", "jottacli")
|
||||
values.Set("grant_type", "password")
|
||||
values.Set("password", loginToken.AuthToken)
|
||||
values.Set("scope", "offline_access+openid")
|
||||
values.Set("username", loginToken.Username)
|
||||
values.Encode()
|
||||
opts = rest.Opts{
|
||||
values.Set("grant_type", "PASSWORD")
|
||||
values.Set("password", password)
|
||||
values.Set("username", username)
|
||||
values.Set("client_id", oauthConfig.ClientID)
|
||||
values.Set("client_secret", oauthConfig.ClientSecret)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: oauthConfig.Endpoint.AuthURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Body: strings.NewReader(values.Encode()),
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
// do the first request
|
||||
var jsonToken api.TokenJSON
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
resp, err := srv.CallJSON(&opts, nil, &jsonToken)
|
||||
if err != nil {
|
||||
return token, err
|
||||
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
|
||||
if resp != nil {
|
||||
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
||||
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
|
||||
fmt.Printf("Enter verification code> ")
|
||||
authCode := config.ReadLine()
|
||||
|
||||
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||
resp, err = srv.CallJSON(&opts, nil, &jsonToken)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
token.AccessToken = jsonToken.AccessToken
|
||||
@@ -298,13 +316,13 @@ func doAuth(ctx context.Context, srv *rest.Client, loginTokenBase64 string) (tok
|
||||
}
|
||||
|
||||
// setupMountpoint sets up a custom device and mountpoint if desired by the user
|
||||
func setupMountpoint(ctx context.Context, srv *rest.Client, apiSrv *rest.Client) (device, mountpoint string, err error) {
|
||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||
func setupMountpoint(srv *rest.Client, apiSrv *rest.Client) (device, mountpoint string, err error) {
|
||||
cust, err := getCustomerInfo(apiSrv)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
acc, err := getDriveInfo(ctx, srv, cust.Username)
|
||||
acc, err := getDriveInfo(srv, cust.Username)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
@@ -315,7 +333,7 @@ func setupMountpoint(ctx context.Context, srv *rest.Client, apiSrv *rest.Client)
|
||||
fmt.Printf("Please select the device to use. Normally this will be Jotta\n")
|
||||
device = config.Choose("Devices", deviceNames, nil, false)
|
||||
|
||||
dev, err := getDeviceInfo(ctx, srv, path.Join(cust.Username, device))
|
||||
dev, err := getDeviceInfo(srv, path.Join(cust.Username, device))
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
@@ -333,13 +351,13 @@ func setupMountpoint(ctx context.Context, srv *rest.Client, apiSrv *rest.Client)
|
||||
}
|
||||
|
||||
// getCustomerInfo queries general information about the account
|
||||
func getCustomerInfo(ctx context.Context, srv *rest.Client) (info *api.CustomerInfo, err error) {
|
||||
func getCustomerInfo(srv *rest.Client) (info *api.CustomerInfo, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "account/v1/customer",
|
||||
}
|
||||
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &info)
|
||||
_, err = srv.CallJSON(&opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get customer info")
|
||||
}
|
||||
@@ -348,13 +366,13 @@ func getCustomerInfo(ctx context.Context, srv *rest.Client) (info *api.CustomerI
|
||||
}
|
||||
|
||||
// getDriveInfo queries general information about the account and the available devices and mountpoints.
|
||||
func getDriveInfo(ctx context.Context, srv *rest.Client, username string) (info *api.DriveInfo, err error) {
|
||||
func getDriveInfo(srv *rest.Client, username string) (info *api.DriveInfo, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: username,
|
||||
}
|
||||
|
||||
_, err = srv.CallXML(ctx, &opts, nil, &info)
|
||||
_, err = srv.CallXML(&opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get drive info")
|
||||
}
|
||||
@@ -363,13 +381,13 @@ func getDriveInfo(ctx context.Context, srv *rest.Client, username string) (info
|
||||
}
|
||||
|
||||
// getDeviceInfo queries Information about a jottacloud device
|
||||
func getDeviceInfo(ctx context.Context, srv *rest.Client, path string) (info *api.JottaDevice, err error) {
|
||||
func getDeviceInfo(srv *rest.Client, path string) (info *api.JottaDevice, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: urlPathEscape(path),
|
||||
}
|
||||
|
||||
_, err = srv.CallXML(ctx, &opts, nil, &info)
|
||||
_, err = srv.CallXML(&opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get device info")
|
||||
}
|
||||
@@ -389,7 +407,7 @@ func (f *Fs) setEndpointURL() {
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.JottaFile, err error) {
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.filePath(path),
|
||||
@@ -397,7 +415,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Jo
|
||||
var result api.JottaFile
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -441,7 +459,7 @@ func urlPathEscape(in string) string {
|
||||
|
||||
// filePathRaw returns an unescaped file path (f.root, file)
|
||||
func (f *Fs) filePathRaw(file string) string {
|
||||
return path.Join(f.endpointURL, enc.FromStandardPath(path.Join(f.root, file)))
|
||||
return path.Join(f.endpointURL, replaceReservedChars(path.Join(f.root, file)))
|
||||
}
|
||||
|
||||
// filePath returns a escaped file path (f.root, file)
|
||||
@@ -449,9 +467,31 @@ func (f *Fs) filePath(file string) string {
|
||||
return urlPathEscape(f.filePathRaw(file))
|
||||
}
|
||||
|
||||
// Jottacloud requires the grant_type 'refresh_token' string
|
||||
// to be uppercase and throws a 400 Bad Request if we use the
|
||||
// lower case used by the oauth2 module
|
||||
//
|
||||
// This filter catches all refresh requests, reads the body,
|
||||
// changes the case and then sends it on
|
||||
func grantTypeFilter(req *http.Request) {
|
||||
if tokenURL == req.URL.String() {
|
||||
// read the entire body
|
||||
refreshBody, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_ = req.Body.Close()
|
||||
|
||||
// make the refresh token upper case
|
||||
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
|
||||
|
||||
// set the new ReadCloser (with a dummy Close())
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
|
||||
}
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -459,23 +499,30 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ok bool
|
||||
var version string
|
||||
if version, ok = m.Get("configVersion"); ok {
|
||||
ver, err := strconv.Atoi(version)
|
||||
if err != nil {
|
||||
return nil, errors.New("Failed to parse config version")
|
||||
}
|
||||
ok = ver == configVersion
|
||||
}
|
||||
if !ok {
|
||||
return nil, errors.New("Outdated config - please reconfigure this backend")
|
||||
}
|
||||
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = parsePath(root)
|
||||
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = rcloneClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = rcloneEncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
// the oauth client for the api servers needs
|
||||
// a filter to fix the grant_type issues (see above)
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
SetRequestFilter(f func(req *http.Request))
|
||||
}); ok {
|
||||
do.SetRequestFilter(grantTypeFilter)
|
||||
} else {
|
||||
fs.Debugf(name+":", "Couldn't add request filter - uploads will fail")
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
|
||||
@@ -499,11 +546,11 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
_, err := f.readMetaDataForPath("")
|
||||
return err
|
||||
})
|
||||
|
||||
cust, err := getCustomerInfo(ctx, f.apiSrv)
|
||||
cust, err := getCustomerInfo(f.apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -535,7 +582,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.JottaFile) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -545,7 +592,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Jot
|
||||
// Set info
|
||||
err = o.setMetaData(info)
|
||||
} else {
|
||||
err = o.readMetaData(ctx, false) // reads info and meta, returning an error
|
||||
err = o.readMetaData(false) // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -556,11 +603,11 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Jot
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// CreateDir makes a directory
|
||||
func (f *Fs) CreateDir(ctx context.Context, path string) (jf *api.JottaFolder, err error) {
|
||||
func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) {
|
||||
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
@@ -572,7 +619,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (jf *api.JottaFolder, e
|
||||
opts.Parameters.Set("mkDir", "true")
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &jf)
|
||||
resp, err = f.srv.CallXML(&opts, nil, &jf)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -601,7 +648,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
var resp *http.Response
|
||||
var result api.JottaFolder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -624,7 +671,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if item.Deleted {
|
||||
continue
|
||||
}
|
||||
remote := path.Join(dir, enc.ToStandardName(item.Name))
|
||||
remote := path.Join(dir, restoreReservedChars(item.Name))
|
||||
d := fs.NewDir(remote, time.Time(item.ModifiedAt))
|
||||
entries = append(entries, d)
|
||||
}
|
||||
@@ -634,8 +681,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if item.Deleted || item.State != "COMPLETED" {
|
||||
continue
|
||||
}
|
||||
remote := path.Join(dir, enc.ToStandardName(item.Name))
|
||||
o, err := f.newObjectWithInfo(ctx, remote, item)
|
||||
remote := path.Join(dir, restoreReservedChars(item.Name))
|
||||
o, err := f.newObjectWithInfo(remote, item)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -649,7 +696,7 @@ type listFileDirFn func(fs.DirEntry) error
|
||||
|
||||
// List the objects and directories into entries, from a
|
||||
// special kind of JottaFolder representing a FileDirLis
|
||||
func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolder *api.JottaFolder, fn listFileDirFn) error {
|
||||
func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, fn listFileDirFn) error {
|
||||
pathPrefix := "/" + f.filePathRaw("") // Non-escaped prefix of API paths to be cut off, to be left with the remote path including the remoteStartPath
|
||||
pathPrefixLength := len(pathPrefix)
|
||||
startPath := path.Join(pathPrefix, remoteStartPath) // Non-escaped API path up to and including remoteStartPath, to decide if it should be created as a new dir object
|
||||
@@ -659,7 +706,7 @@ func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolde
|
||||
if folder.Deleted {
|
||||
return nil
|
||||
}
|
||||
folderPath := enc.ToStandardPath(path.Join(folder.Path, folder.Name))
|
||||
folderPath := restoreReservedChars(path.Join(folder.Path, folder.Name))
|
||||
folderPathLength := len(folderPath)
|
||||
var remoteDir string
|
||||
if folderPathLength > pathPrefixLength {
|
||||
@@ -677,8 +724,8 @@ func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolde
|
||||
if file.Deleted || file.State != "COMPLETED" {
|
||||
continue
|
||||
}
|
||||
remoteFile := path.Join(remoteDir, enc.ToStandardName(file.Name))
|
||||
o, err := f.newObjectWithInfo(ctx, remoteFile, file)
|
||||
remoteFile := path.Join(remoteDir, restoreReservedChars(file.Name))
|
||||
o, err := f.newObjectWithInfo(remoteFile, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -707,7 +754,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
var resp *http.Response
|
||||
var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -720,7 +767,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
return errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.listFileDir(ctx, dir, &result, func(entry fs.DirEntry) error {
|
||||
err = f.listFileDir(dir, &result, func(entry fs.DirEntry) error {
|
||||
return list.Add(entry)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -774,7 +821,7 @@ func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
_, err := f.CreateDir(ctx, dir)
|
||||
_, err := f.CreateDir(dir)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -813,7 +860,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
resp, err = f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -841,18 +888,18 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||
func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *api.JottaFile, err error) {
|
||||
func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: src,
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set(method, "/"+path.Join(f.endpointURL, enc.FromStandardPath(path.Join(f.root, dest))))
|
||||
opts.Parameters.Set(method, "/"+path.Join(f.endpointURL, replaceReservedChars(path.Join(f.root, dest))))
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||
resp, err = f.srv.CallXML(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -881,13 +928,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
|
||||
info, err := f.copyOrMove("cp", srcObj.filePath(), remote)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't copy file")
|
||||
}
|
||||
|
||||
return f.newObjectWithInfo(ctx, remote, info)
|
||||
return f.newObjectWithInfo(remote, info)
|
||||
//return f.newObjectWithInfo(remote, &result)
|
||||
}
|
||||
|
||||
@@ -911,13 +958,13 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote)
|
||||
info, err := f.copyOrMove("mv", srcObj.filePath(), remote)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't move file")
|
||||
}
|
||||
|
||||
return f.newObjectWithInfo(ctx, remote, info)
|
||||
return f.newObjectWithInfo(remote, info)
|
||||
//return f.newObjectWithInfo(remote, result)
|
||||
}
|
||||
|
||||
@@ -955,7 +1002,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, enc.FromStandardPath(srcPath))+"/", dstRemote)
|
||||
_, err = f.copyOrMove("mvDir", path.Join(f.endpointURL, replaceReservedChars(srcPath))+"/", dstRemote)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't move directory")
|
||||
@@ -980,7 +1027,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
var resp *http.Response
|
||||
var result api.JottaFile
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -1011,7 +1058,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
info, err := getDriveInfo(ctx, f.srv, f.user)
|
||||
info, err := getDriveInfo(f.srv, f.user)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1066,8 +1113,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
ctx := context.TODO()
|
||||
err := o.readMetaData(ctx, false)
|
||||
err := o.readMetaData(false)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return 0
|
||||
@@ -1091,11 +1137,11 @@ func (o *Object) setMetaData(info *api.JottaFile) (err error) {
|
||||
}
|
||||
|
||||
// readMetaData reads and updates the metadata for an object
|
||||
func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
|
||||
func (o *Object) readMetaData(force bool) (err error) {
|
||||
if o.hasMetaData && !force {
|
||||
return nil
|
||||
}
|
||||
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
||||
info, err := o.fs.readMetaDataForPath(o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1110,7 +1156,7 @@ func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData(ctx, false)
|
||||
err := o.readMetaData(false)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
@@ -1142,7 +1188,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
opts.Parameters.Set("mode", "bin")
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1246,13 +1292,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Created: fileDate,
|
||||
Modified: fileDate,
|
||||
Md5: md5String,
|
||||
Path: path.Join(o.fs.opt.Mountpoint, enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
|
||||
Path: path.Join(o.fs.opt.Mountpoint, replaceReservedChars(path.Join(o.fs.root, o.remote))),
|
||||
}
|
||||
|
||||
// send it
|
||||
var response api.AllocateFileResponse
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, &request, &response)
|
||||
resp, err = o.fs.apiSrv.CallJSON(&opts, &request, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1283,7 +1329,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// send the remaining bytes
|
||||
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err = o.fs.apiSrv.CallJSON(&opts, nil, &result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1295,7 +1341,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
o.modTime = time.Unix(result.Modified/1000, 0)
|
||||
} else {
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
|
||||
return o.readMetaData(ctx, true)
|
||||
return o.readMetaData(true)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -1317,7 +1363,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
}
|
||||
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
|
||||
resp, err := o.fs.srv.CallXML(&opts, nil, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
77
backend/jottacloud/replace.go
Normal file
77
backend/jottacloud/replace.go
Normal file
@@ -0,0 +1,77 @@
|
||||
/*
|
||||
Translate file names for JottaCloud adapted from OneDrive
|
||||
|
||||
|
||||
The following characters are JottaCloud reserved characters, and can't
|
||||
be used in JottaCloud folder and file names.
|
||||
|
||||
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
|
||||
|
||||
|
||||
*/
|
||||
|
||||
package jottacloud
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// Onedrive has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
':': ':', // FULLWIDTH COLON
|
||||
';': ';', // FULLWIDTH SEMICOLON
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// Filenames can't start with space
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Filenames can't end with space
|
||||
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
28
backend/jottacloud/replace_test.go
Normal file
28
backend/jottacloud/replace_test.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package jottacloud
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\*<>?:;|"`, `\*<>?:;|"`},
|
||||
{`\*<>?:;|"\*<>?:;|"`, `\*<>?:;|"\*<>?:;|"`},
|
||||
{" leading space", "␠leading space"},
|
||||
{"trailing space ", "trailing space␠"},
|
||||
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
|
||||
{"trailing space /trailing space /trailing space ", "trailing space␠/trailing space␠/trailing space␠"},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -15,16 +15,12 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
|
||||
httpclient "github.com/koofr/go-httpclient"
|
||||
koofrclient "github.com/koofr/go-koofrclient"
|
||||
)
|
||||
|
||||
const enc = encodings.Koofr
|
||||
|
||||
// Register Fs with rclone
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -246,7 +242,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
|
||||
// fullPath constructs a full, absolute path from a Fs root relative path,
|
||||
func (f *Fs) fullPath(part string) string {
|
||||
return enc.FromStandardPath(path.Join("/", f.root, part))
|
||||
return path.Join("/", f.root, part)
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and configuration options
|
||||
@@ -260,9 +256,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
httpClient := httpclient.New()
|
||||
httpClient.Client = fshttp.NewClient(fs.Config)
|
||||
client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient)
|
||||
client := koofrclient.NewKoofrClient(opt.Endpoint, false)
|
||||
basicAuth := fmt.Sprintf("Basic %s",
|
||||
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
|
||||
client.HTTPClient.Headers.Set("Authorization", basicAuth)
|
||||
@@ -299,7 +293,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
}
|
||||
return nil, errors.New("Failed to find mount " + opt.MountID)
|
||||
}
|
||||
rootFile, err := f.client.FilesInfo(f.mountID, enc.FromStandardPath("/"+f.root))
|
||||
rootFile, err := f.client.FilesInfo(f.mountID, "/"+f.root)
|
||||
if err == nil && rootFile.Type != "dir" {
|
||||
f.root = dir(f.root)
|
||||
err = fs.ErrorIsFile
|
||||
@@ -317,14 +311,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
entries = make([]fs.DirEntry, len(files))
|
||||
for i, file := range files {
|
||||
remote := path.Join(dir, enc.ToStandardName(file.Name))
|
||||
if file.Type == "dir" {
|
||||
entries[i] = fs.NewDir(remote, time.Unix(0, 0))
|
||||
entries[i] = fs.NewDir(path.Join(dir, file.Name), time.Unix(0, 0))
|
||||
} else {
|
||||
entries[i] = &Object{
|
||||
fs: f,
|
||||
info: file,
|
||||
remote: remote,
|
||||
remote: path.Join(dir, file.Name),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
//+build darwin
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
)
|
||||
|
||||
const enc = encodings.LocalMacOS
|
||||
@@ -1,9 +0,0 @@
|
||||
//+build !windows,!darwin
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
)
|
||||
|
||||
const enc = encodings.LocalUnix
|
||||
@@ -1,9 +0,0 @@
|
||||
//+build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
)
|
||||
|
||||
const enc = encodings.LocalWindows
|
||||
@@ -142,19 +142,19 @@ type Fs struct {
|
||||
dev uint64 // device number of root node
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
warnedMu sync.Mutex // used for locking access to 'warned'.
|
||||
wmu sync.Mutex // used for locking access to 'warned'.
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
|
||||
// do os.Lstat or os.Stat
|
||||
lstat func(name string) (os.FileInfo, error)
|
||||
dirNames *mapper // directory name mapping
|
||||
objectHashesMu sync.Mutex // global lock for Object.hashes
|
||||
}
|
||||
|
||||
// Object represents a local filesystem object
|
||||
type Object struct {
|
||||
fs *Fs // The Fs this object is part of
|
||||
remote string // The remote path (encoded path)
|
||||
path string // The local path (OS path)
|
||||
remote string // The remote path - properly UTF-8 encoded - for rclone
|
||||
path string // The local path - may not be properly UTF-8 encoded - for OS
|
||||
size int64 // file metadata - always present
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
@@ -183,13 +183,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
warned: make(map[string]struct{}),
|
||||
dev: devUnset,
|
||||
lstat: os.Lstat,
|
||||
name: name,
|
||||
opt: *opt,
|
||||
warned: make(map[string]struct{}),
|
||||
dev: devUnset,
|
||||
lstat: os.Lstat,
|
||||
dirNames: newMapper(),
|
||||
}
|
||||
f.root = cleanRootPath(root, f.opt.NoUNC)
|
||||
f.root = f.cleanPath(root)
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: f.caseInsensitive(),
|
||||
CanHaveEmptyDirectories: true,
|
||||
@@ -234,12 +235,12 @@ func (f *Fs) Name() string {
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return enc.ToStandardPath(filepath.ToSlash(f.root))
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Local file system at %s", f.Root())
|
||||
return fmt.Sprintf("Local file system at %s", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
@@ -267,27 +268,33 @@ func (f *Fs) caseInsensitive() bool {
|
||||
// and returns a new path, removing the suffix as needed,
|
||||
// It also returns whether this is a translated link at all
|
||||
//
|
||||
// for regular files, localPath is returned unchanged
|
||||
func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) {
|
||||
// for regular files, dstPath is returned unchanged
|
||||
func translateLink(remote, dstPath string) (newDstPath string, isTranslatedLink bool) {
|
||||
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
|
||||
newLocalPath = strings.TrimSuffix(localPath, linkSuffix)
|
||||
return newLocalPath, isTranslatedLink
|
||||
newDstPath = strings.TrimSuffix(dstPath, linkSuffix)
|
||||
return newDstPath, isTranslatedLink
|
||||
}
|
||||
|
||||
// newObject makes a half completed Object
|
||||
func (f *Fs) newObject(remote string) *Object {
|
||||
//
|
||||
// if dstPath is empty then it is made from remote
|
||||
func (f *Fs) newObject(remote, dstPath string) *Object {
|
||||
translatedLink := false
|
||||
localPath := f.localPath(remote)
|
||||
|
||||
if dstPath == "" {
|
||||
dstPath = f.cleanPath(filepath.Join(f.root, remote))
|
||||
}
|
||||
remote = f.cleanRemote(remote)
|
||||
|
||||
if f.opt.TranslateSymlinks {
|
||||
// Possibly receive a new name for localPath
|
||||
localPath, translatedLink = translateLink(remote, localPath)
|
||||
// Possibly receive a new name for dstPath
|
||||
dstPath, translatedLink = translateLink(remote, dstPath)
|
||||
}
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
path: localPath,
|
||||
path: dstPath,
|
||||
translatedLink: translatedLink,
|
||||
}
|
||||
}
|
||||
@@ -295,8 +302,8 @@ func (f *Fs) newObject(remote string) *Object {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *Fs) newObjectWithInfo(remote string, info os.FileInfo) (fs.Object, error) {
|
||||
o := f.newObject(remote)
|
||||
func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Object, error) {
|
||||
o := f.newObject(remote, dstPath)
|
||||
if info != nil {
|
||||
o.setMetadata(info)
|
||||
} else {
|
||||
@@ -325,7 +332,7 @@ func (f *Fs) newObjectWithInfo(remote string, info os.FileInfo) (fs.Object, erro
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
return f.newObjectWithInfo(remote, "", nil)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
@@ -338,7 +345,10 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
fsDirPath := f.localPath(dir)
|
||||
|
||||
dir = f.dirNames.Load(dir)
|
||||
fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
|
||||
remote := f.cleanRemote(dir)
|
||||
_, err = os.Stat(fsDirPath)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
@@ -350,7 +360,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
err = errors.Wrapf(err, "failed to open directory %q", dir)
|
||||
fs.Errorf(dir, "%v", err)
|
||||
if isPerm {
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
|
||||
accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
|
||||
err = nil // ignore error but fail sync
|
||||
}
|
||||
return nil, err
|
||||
@@ -386,7 +396,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if fierr != nil {
|
||||
err = errors.Wrapf(err, "failed to read directory %q", namepath)
|
||||
fs.Errorf(dir, "%v", fierr)
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
continue
|
||||
}
|
||||
fis = append(fis, fi)
|
||||
@@ -400,16 +410,16 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
for _, fi := range fis {
|
||||
name := fi.Name()
|
||||
mode := fi.Mode()
|
||||
newRemote := f.cleanRemote(dir, name)
|
||||
newRemote := path.Join(remote, name)
|
||||
newPath := filepath.Join(fsDirPath, name)
|
||||
// Follow symlinks if required
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
localPath := filepath.Join(fsDirPath, name)
|
||||
fi, err = os.Stat(localPath)
|
||||
fi, err = os.Stat(newPath)
|
||||
if os.IsNotExist(err) {
|
||||
// Skip bad symlinks
|
||||
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
|
||||
fs.Errorf(newRemote, "Listing error: %v", err)
|
||||
err = accounting.Stats(ctx).Error(err)
|
||||
accounting.Stats(ctx).Error(err)
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
@@ -421,7 +431,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Ignore directories which are symlinks. These are junction points under windows which
|
||||
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
||||
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
|
||||
d := fs.NewDir(newRemote, fi.ModTime())
|
||||
d := fs.NewDir(f.dirNames.Save(newRemote, f.cleanRemote(newRemote)), fi.ModTime())
|
||||
entries = append(entries, d)
|
||||
}
|
||||
} else {
|
||||
@@ -429,7 +439,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
newRemote += linkSuffix
|
||||
}
|
||||
fso, err := f.newObjectWithInfo(newRemote, fi)
|
||||
fso, err := f.newObjectWithInfo(newRemote, newPath, fi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -442,28 +452,67 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) cleanRemote(dir, filename string) (remote string) {
|
||||
remote = path.Join(dir, enc.ToStandardName(filename))
|
||||
|
||||
if !utf8.ValidString(filename) {
|
||||
f.warnedMu.Lock()
|
||||
if _, ok := f.warned[remote]; !ok {
|
||||
fs.Logf(f, "Replacing invalid UTF-8 characters in %q", remote)
|
||||
f.warned[remote] = struct{}{}
|
||||
// cleanRemote makes string a valid UTF-8 string for remote strings.
|
||||
//
|
||||
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
|
||||
// It also normalises the UTF-8 and converts the slashes if necessary.
|
||||
func (f *Fs) cleanRemote(name string) string {
|
||||
if !utf8.ValidString(name) {
|
||||
f.wmu.Lock()
|
||||
if _, ok := f.warned[name]; !ok {
|
||||
fs.Logf(f, "Replacing invalid UTF-8 characters in %q", name)
|
||||
f.warned[name] = struct{}{}
|
||||
}
|
||||
f.warnedMu.Unlock()
|
||||
f.wmu.Unlock()
|
||||
name = string([]rune(name))
|
||||
}
|
||||
return
|
||||
name = filepath.ToSlash(name)
|
||||
return name
|
||||
}
|
||||
|
||||
func (f *Fs) localPath(name string) string {
|
||||
return filepath.Join(f.root, filepath.FromSlash(enc.FromStandardPath(name)))
|
||||
// mapper maps raw to cleaned directory names
|
||||
type mapper struct {
|
||||
mu sync.RWMutex // mutex to protect the below
|
||||
m map[string]string // map of un-normalised directory names
|
||||
}
|
||||
|
||||
func newMapper() *mapper {
|
||||
return &mapper{
|
||||
m: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup a directory name to make a local name (reverses
|
||||
// cleanDirName)
|
||||
//
|
||||
// FIXME this is temporary before we make a proper Directory object
|
||||
func (m *mapper) Load(in string) string {
|
||||
m.mu.RLock()
|
||||
out, ok := m.m[in]
|
||||
m.mu.RUnlock()
|
||||
if ok {
|
||||
return out
|
||||
}
|
||||
return in
|
||||
}
|
||||
|
||||
// Cleans a directory name recording if it needed to be altered
|
||||
//
|
||||
// FIXME this is temporary before we make a proper Directory object
|
||||
func (m *mapper) Save(in, out string) string {
|
||||
if in != out {
|
||||
m.mu.Lock()
|
||||
m.m[out] = in
|
||||
m.mu.Unlock()
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Put the Object to the local filesystem
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
remote := src.Remote()
|
||||
// Temporary Object under construction - info filled in by Update()
|
||||
o := f.newObject(src.Remote())
|
||||
o := f.newObject(remote, "")
|
||||
err := o.Update(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -479,13 +528,13 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
|
||||
localPath := f.localPath(dir)
|
||||
err := os.MkdirAll(localPath, 0777)
|
||||
root := f.cleanPath(filepath.Join(f.root, dir))
|
||||
err := os.MkdirAll(root, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir == "" {
|
||||
fi, err := f.lstat(localPath)
|
||||
fi, err := f.lstat(root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -498,7 +547,8 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
//
|
||||
// If it isn't empty it will return an error
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return os.Remove(f.localPath(dir))
|
||||
root := f.cleanPath(filepath.Join(f.root, dir))
|
||||
return os.Remove(root)
|
||||
}
|
||||
|
||||
// Precision of the file system
|
||||
@@ -594,7 +644,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// Temporary Object under construction
|
||||
dstObj := f.newObject(remote)
|
||||
dstObj := f.newObject(remote, "")
|
||||
|
||||
// Check it is a file if it exists
|
||||
err := dstObj.lstat()
|
||||
@@ -651,8 +701,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcPath := srcFs.localPath(srcRemote)
|
||||
dstPath := f.localPath(dstRemote)
|
||||
srcPath := f.cleanPath(filepath.Join(srcFs.root, srcRemote))
|
||||
dstPath := f.cleanPath(filepath.Join(f.root, dstRemote))
|
||||
|
||||
// Check if destination exists
|
||||
_, err := os.Lstat(dstPath)
|
||||
@@ -686,7 +736,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Supported()
|
||||
return hash.Supported
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -786,6 +836,13 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
|
||||
// Storable returns a boolean showing if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
// Check for control characters in the remote name and show non storable
|
||||
for _, c := range o.Remote() {
|
||||
if c >= 0x00 && c < 0x20 || c == 0x7F {
|
||||
fs.Logf(o.fs, "Can't store file with control characters: %q", o.Remote())
|
||||
return false
|
||||
}
|
||||
}
|
||||
mode := o.mode
|
||||
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
|
||||
if !o.fs.opt.SkipSymlinks {
|
||||
@@ -820,10 +877,10 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
|
||||
return 0, errors.Wrap(err, "can't read status of source file while transferring")
|
||||
}
|
||||
if file.o.size != fi.Size() {
|
||||
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size()))
|
||||
return 0, errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size())
|
||||
}
|
||||
if !file.o.modTime.Equal(fi.ModTime()) {
|
||||
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime()))
|
||||
return 0, errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1030,7 +1087,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// It truncates any existing object
|
||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
// Temporary Object under construction
|
||||
o := f.newObject(remote)
|
||||
o := f.newObject(remote, "")
|
||||
|
||||
err := o.mkdirAll()
|
||||
if err != nil {
|
||||
@@ -1082,7 +1139,29 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
return remove(o.path)
|
||||
}
|
||||
|
||||
func cleanRootPath(s string, noUNC bool) string {
|
||||
// cleanPathFragment cleans an OS path fragment which is part of a
|
||||
// bigger path and not necessarily absolute
|
||||
func cleanPathFragment(s string) string {
|
||||
if s == "" {
|
||||
return s
|
||||
}
|
||||
s = filepath.Clean(s)
|
||||
if runtime.GOOS == "windows" {
|
||||
s = strings.Replace(s, `/`, `\`, -1)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// cleanPath cleans and makes absolute the path passed in and returns
|
||||
// an OS path.
|
||||
//
|
||||
// The input might be in OS form or rclone form or a mixture, but the
|
||||
// output is in OS form.
|
||||
//
|
||||
// On windows it makes the path UNC also and replaces any characters
|
||||
// Windows can't deal with with their replacements.
|
||||
func (f *Fs) cleanPath(s string) string {
|
||||
s = cleanPathFragment(s)
|
||||
if runtime.GOOS == "windows" {
|
||||
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
|
||||
s2, err := filepath.Abs(s)
|
||||
@@ -1090,24 +1169,19 @@ func cleanRootPath(s string, noUNC bool) string {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
s = filepath.ToSlash(s)
|
||||
vol := filepath.VolumeName(s)
|
||||
s = vol + enc.FromStandardPath(s[len(vol):])
|
||||
s = filepath.FromSlash(s)
|
||||
|
||||
if !noUNC {
|
||||
if !f.opt.NoUNC {
|
||||
// Convert to UNC
|
||||
s = uncPath(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
s = cleanWindowsName(f, s)
|
||||
} else {
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
}
|
||||
s = enc.FromStandardPath(s)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -1116,21 +1190,63 @@ var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
|
||||
|
||||
// uncPath converts an absolute Windows path
|
||||
// to a UNC long path.
|
||||
func uncPath(l string) string {
|
||||
func uncPath(s string) string {
|
||||
// UNC can NOT use "/", so convert all to "\"
|
||||
s = strings.Replace(s, `/`, `\`, -1)
|
||||
|
||||
// If prefix is "\\", we already have a UNC path or server.
|
||||
if strings.HasPrefix(l, `\\`) {
|
||||
if strings.HasPrefix(s, `\\`) {
|
||||
// If already long path, just keep it
|
||||
if strings.HasPrefix(l, `\\?\`) {
|
||||
return l
|
||||
if strings.HasPrefix(s, `\\?\`) {
|
||||
return s
|
||||
}
|
||||
|
||||
// Trim "\\" from path and add UNC prefix.
|
||||
return `\\?\UNC\` + strings.TrimPrefix(l, `\\`)
|
||||
return `\\?\UNC\` + strings.TrimPrefix(s, `\\`)
|
||||
}
|
||||
if isAbsWinDrive.MatchString(l) {
|
||||
return `\\?\` + l
|
||||
if isAbsWinDrive.MatchString(s) {
|
||||
return `\\?\` + s
|
||||
}
|
||||
return l
|
||||
return s
|
||||
}
|
||||
|
||||
// cleanWindowsName will clean invalid Windows characters replacing them with _
|
||||
func cleanWindowsName(f *Fs, name string) string {
|
||||
original := name
|
||||
var name2 string
|
||||
if strings.HasPrefix(name, `\\?\`) {
|
||||
name2 = `\\?\`
|
||||
name = strings.TrimPrefix(name, `\\?\`)
|
||||
}
|
||||
if strings.HasPrefix(name, `//?/`) {
|
||||
name2 = `//?/`
|
||||
name = strings.TrimPrefix(name, `//?/`)
|
||||
}
|
||||
// Colon is allowed as part of a drive name X:\
|
||||
colonAt := strings.Index(name, ":")
|
||||
if colonAt > 0 && colonAt < 3 && len(name) > colonAt+1 {
|
||||
// Copy to name2, which is unfiltered
|
||||
name2 += name[0 : colonAt+1]
|
||||
name = name[colonAt+1:]
|
||||
}
|
||||
|
||||
name2 += strings.Map(func(r rune) rune {
|
||||
switch r {
|
||||
case '<', '>', '"', '|', '?', '*', ':':
|
||||
return '_'
|
||||
}
|
||||
return r
|
||||
}, name)
|
||||
|
||||
if name2 != original && f != nil {
|
||||
f.wmu.Lock()
|
||||
if _, ok := f.warned[name]; !ok {
|
||||
fs.Logf(f, "Replacing invalid characters in %q to %q", name, name2)
|
||||
f.warned[name] = struct{}{}
|
||||
}
|
||||
f.wmu.Unlock()
|
||||
}
|
||||
return name2
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
|
||||
@@ -25,6 +25,19 @@ func TestMain(m *testing.M) {
|
||||
fstest.TestMain(m)
|
||||
}
|
||||
|
||||
func TestMapper(t *testing.T) {
|
||||
m := newMapper()
|
||||
assert.Equal(t, m.m, map[string]string{})
|
||||
assert.Equal(t, "potato", m.Save("potato", "potato"))
|
||||
assert.Equal(t, m.m, map[string]string{})
|
||||
assert.Equal(t, "-r'áö", m.Save("-r?'a´o¨", "-r'áö"))
|
||||
assert.Equal(t, m.m, map[string]string{
|
||||
"-r'áö": "-r?'a´o¨",
|
||||
})
|
||||
assert.Equal(t, "potato", m.Load("potato"))
|
||||
assert.Equal(t, "-r?'a´o¨", m.Load("-r'áö"))
|
||||
}
|
||||
|
||||
// Test copy with source file that's updating
|
||||
func TestUpdatingCheck(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
@@ -44,7 +57,7 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
o := &Object{size: fi.Size(), modTime: fi.ModTime(), fs: &Fs{}}
|
||||
wrappedFd := readers.NewLimitedReadCloser(fd, -1)
|
||||
hash, err := hash.NewMultiHasherTypes(hash.Supported())
|
||||
hash, err := hash.NewMultiHasherTypes(hash.Supported)
|
||||
require.NoError(t, err)
|
||||
in := localOpenFile{
|
||||
o: o,
|
||||
|
||||
@@ -1,26 +1,29 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var uncTestPaths = []string{
|
||||
`C:\Ba*d\P|a?t<h>\Windows\Folder`,
|
||||
`C:\Windows\Folder`,
|
||||
`\\?\C:\Windows\Folder`,
|
||||
`\\?\UNC\server\share\Desktop`,
|
||||
`\\?\unC\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`\\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`C:\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`C:\AbsoluteToRoot\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`\\server\share\Desktop`,
|
||||
`\\?\UNC\\share\folder\Desktop`,
|
||||
`\\server\share`,
|
||||
"C:\\Ba*d\\P|a?t<h>\\Windows\\Folder",
|
||||
"C:/Ba*d/P|a?t<h>/Windows\\Folder",
|
||||
"C:\\Windows\\Folder",
|
||||
"\\\\?\\C:\\Windows\\Folder",
|
||||
"//?/C:/Windows/Folder",
|
||||
"\\\\?\\UNC\\server\\share\\Desktop",
|
||||
"\\\\?\\unC\\server\\share\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
|
||||
"\\\\server\\share\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
|
||||
"C:\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
|
||||
"C:\\AbsoluteToRoot\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
|
||||
"\\\\server\\share\\Desktop",
|
||||
"\\\\?\\UNC\\\\share\\folder\\Desktop",
|
||||
"\\\\server\\share",
|
||||
}
|
||||
|
||||
var uncTestPathsResults = []string{
|
||||
`\\?\C:\Ba*d\P|a?t<h>\Windows\Folder`,
|
||||
`\\?\C:\Ba*d\P|a?t<h>\Windows\Folder`,
|
||||
`\\?\C:\Windows\Folder`,
|
||||
`\\?\C:\Windows\Folder`,
|
||||
`\\?\C:\Windows\Folder`,
|
||||
`\\?\UNC\server\share\Desktop`,
|
||||
@@ -48,23 +51,38 @@ func TestUncPaths(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test Windows character replacements
|
||||
var testsWindows = [][2]string{
|
||||
{`c:\temp`, `c:\temp`},
|
||||
{`\\?\UNC\theserver\dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
|
||||
{`//?/UNC/theserver/dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
|
||||
{`c:/temp`, `c:\temp`},
|
||||
{`C:/temp/file.txt`, `C:\temp\file.txt`},
|
||||
{`c:\!\"#¤%&/()=;:*^?+-`, `c:\!\"#¤%&\()=;:*^?+-`},
|
||||
{`c:\<>"|?*:&\<>"|?*:&\<>"|?*:&`, `c:\<>"|?*:&\<>"|?*:&\<>"|?*:&`},
|
||||
var utf8Tests = [][2]string{
|
||||
{"ABC", "ABC"},
|
||||
{string([]byte{0x80}), "<22>"},
|
||||
{string([]byte{'a', 0x80, 'b'}), "a<>b"},
|
||||
}
|
||||
|
||||
func TestCleanWindows(t *testing.T) {
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skipf("windows only")
|
||||
}
|
||||
for _, test := range testsWindows {
|
||||
got := cleanRootPath(test[0], true)
|
||||
func TestCleanRemote(t *testing.T) {
|
||||
f := &Fs{}
|
||||
f.warned = make(map[string]struct{})
|
||||
for _, test := range utf8Tests {
|
||||
got := f.cleanRemote(test[0])
|
||||
expect := test[1]
|
||||
if got != expect {
|
||||
t.Fatalf("got %q, expected %q", got, expect)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test Windows character replacements
|
||||
var testsWindows = [][2]string{
|
||||
{`c:\temp`, `c:\temp`},
|
||||
{`\\?\UNC\theserver\dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
|
||||
{`//?/UNC/theserver/dir\file.txt`, `//?/UNC/theserver/dir\file.txt`},
|
||||
{"c:/temp", "c:/temp"},
|
||||
{"/temp/file.txt", "/temp/file.txt"},
|
||||
{`!\"#¤%&/()=;:*^?+-`, "!\\_#¤%&/()=;__^_+-"},
|
||||
{`<>"|?*:&\<>"|?*:&\<>"|?*:&`, "_______&\\_______&\\_______&"},
|
||||
}
|
||||
|
||||
func TestCleanWindows(t *testing.T) {
|
||||
for _, test := range testsWindows {
|
||||
got := cleanWindowsName(nil, test[0])
|
||||
expect := test[1]
|
||||
if got != expect {
|
||||
t.Fatalf("got %q, expected %q", got, expect)
|
||||
|
||||
@@ -1,107 +0,0 @@
|
||||
package api
|
||||
|
||||
// BIN protocol constants
|
||||
const (
|
||||
BinContentType = "application/x-www-form-urlencoded"
|
||||
TreeIDLength = 12
|
||||
DunnoNodeIDLength = 16
|
||||
)
|
||||
|
||||
// Operations in binary protocol
|
||||
const (
|
||||
OperationAddFile = 103 // 0x67
|
||||
OperationRename = 105 // 0x69
|
||||
OperationCreateFolder = 106 // 0x6A
|
||||
OperationFolderList = 117 // 0x75
|
||||
OperationSharedFoldersList = 121 // 0x79
|
||||
// TODO investigate opcodes below
|
||||
Operation154MaybeItemInfo = 154 // 0x9A
|
||||
Operation102MaybeAbout = 102 // 0x66
|
||||
Operation104MaybeDelete = 104 // 0x68
|
||||
)
|
||||
|
||||
// CreateDir protocol constants
|
||||
const (
|
||||
MkdirResultOK = 0
|
||||
MkdirResultSourceNotExists = 1
|
||||
MkdirResultAlreadyExists = 4
|
||||
MkdirResultExistsDifferentCase = 9
|
||||
MkdirResultInvalidName = 10
|
||||
MkdirResultFailed254 = 254
|
||||
)
|
||||
|
||||
// Move result codes
|
||||
const (
|
||||
MoveResultOK = 0
|
||||
MoveResultSourceNotExists = 1
|
||||
MoveResultFailed002 = 2
|
||||
MoveResultAlreadyExists = 4
|
||||
MoveResultFailed005 = 5
|
||||
MoveResultFailed254 = 254
|
||||
)
|
||||
|
||||
// AddFile result codes
|
||||
const (
|
||||
AddResultOK = 0
|
||||
AddResultError01 = 1
|
||||
AddResultDunno04 = 4
|
||||
AddResultWrongPath = 5
|
||||
AddResultNoFreeSpace = 7
|
||||
AddResultDunno09 = 9
|
||||
AddResultInvalidName = 10
|
||||
AddResultNotModified = 12
|
||||
AddResultFailedA = 253
|
||||
AddResultFailedB = 254
|
||||
)
|
||||
|
||||
// List request options
|
||||
const (
|
||||
ListOptTotalSpace = 1
|
||||
ListOptDelete = 2
|
||||
ListOptFingerprint = 4
|
||||
ListOptUnknown8 = 8
|
||||
ListOptUnknown16 = 16
|
||||
ListOptFolderSize = 32
|
||||
ListOptUsedSpace = 64
|
||||
ListOptUnknown128 = 128
|
||||
ListOptUnknown256 = 256
|
||||
)
|
||||
|
||||
// ListOptDefaults ...
|
||||
const ListOptDefaults = ListOptUnknown128 | ListOptUnknown256 | ListOptFolderSize | ListOptTotalSpace | ListOptUsedSpace
|
||||
|
||||
// List parse flags
|
||||
const (
|
||||
ListParseDone = 0
|
||||
ListParseReadItem = 1
|
||||
ListParsePin = 2
|
||||
ListParsePinUpper = 3
|
||||
ListParseUnknown15 = 15
|
||||
)
|
||||
|
||||
// List operation results
|
||||
const (
|
||||
ListResultOK = 0
|
||||
ListResultNotExists = 1
|
||||
ListResultDunno02 = 2
|
||||
ListResultDunno03 = 3
|
||||
ListResultAlreadyExists04 = 4
|
||||
ListResultDunno05 = 5
|
||||
ListResultDunno06 = 6
|
||||
ListResultDunno07 = 7
|
||||
ListResultDunno08 = 8
|
||||
ListResultAlreadyExists09 = 9
|
||||
ListResultDunno10 = 10
|
||||
ListResultDunno11 = 11
|
||||
ListResultDunno12 = 12
|
||||
ListResultFailedB = 253
|
||||
ListResultFailedA = 254
|
||||
)
|
||||
|
||||
// Directory item types
|
||||
const (
|
||||
ListItemMountPoint = 0
|
||||
ListItemFile = 1
|
||||
ListItemFolder = 2
|
||||
ListItemSharedFolder = 3
|
||||
)
|
||||
@@ -1,225 +0,0 @@
|
||||
package api
|
||||
|
||||
// BIN protocol helpers
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
// protocol errors
|
||||
var (
|
||||
ErrorPrematureEOF = errors.New("Premature EOF")
|
||||
ErrorInvalidLength = errors.New("Invalid length")
|
||||
ErrorZeroTerminate = errors.New("String must end with zero")
|
||||
)
|
||||
|
||||
// BinWriter is a binary protocol writer
|
||||
type BinWriter struct {
|
||||
b *bytes.Buffer // growing byte buffer
|
||||
a []byte // temporary buffer for next varint
|
||||
}
|
||||
|
||||
// NewBinWriter creates a binary protocol helper
|
||||
func NewBinWriter() *BinWriter {
|
||||
return &BinWriter{
|
||||
b: new(bytes.Buffer),
|
||||
a: make([]byte, binary.MaxVarintLen64),
|
||||
}
|
||||
}
|
||||
|
||||
// Bytes returns binary data
|
||||
func (w *BinWriter) Bytes() []byte {
|
||||
return w.b.Bytes()
|
||||
}
|
||||
|
||||
// Reader returns io.Reader with binary data
|
||||
func (w *BinWriter) Reader() io.Reader {
|
||||
return bytes.NewReader(w.b.Bytes())
|
||||
}
|
||||
|
||||
// WritePu16 writes a short as unsigned varint
|
||||
func (w *BinWriter) WritePu16(val int) {
|
||||
if val < 0 || val > 65535 {
|
||||
log.Fatalf("Invalid UInt16 %v", val)
|
||||
}
|
||||
w.WritePu64(int64(val))
|
||||
}
|
||||
|
||||
// WritePu32 writes a signed long as unsigned varint
|
||||
func (w *BinWriter) WritePu32(val int64) {
|
||||
if val < 0 || val > 4294967295 {
|
||||
log.Fatalf("Invalid UInt32 %v", val)
|
||||
}
|
||||
w.WritePu64(val)
|
||||
}
|
||||
|
||||
// WritePu64 writes an unsigned (actually, signed) long as unsigned varint
|
||||
func (w *BinWriter) WritePu64(val int64) {
|
||||
if val < 0 {
|
||||
log.Fatalf("Invalid UInt64 %v", val)
|
||||
}
|
||||
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
|
||||
}
|
||||
|
||||
// WriteString writes a zero-terminated string
|
||||
func (w *BinWriter) WriteString(str string) {
|
||||
buf := []byte(str)
|
||||
w.WritePu64(int64(len(buf) + 1))
|
||||
w.b.Write(buf)
|
||||
w.b.WriteByte(0)
|
||||
}
|
||||
|
||||
// Write writes a byte buffer
|
||||
func (w *BinWriter) Write(buf []byte) {
|
||||
w.b.Write(buf)
|
||||
}
|
||||
|
||||
// WriteWithLength writes a byte buffer prepended with its length as varint
|
||||
func (w *BinWriter) WriteWithLength(buf []byte) {
|
||||
w.WritePu64(int64(len(buf)))
|
||||
w.b.Write(buf)
|
||||
}
|
||||
|
||||
// BinReader is a binary protocol reader helper
|
||||
type BinReader struct {
|
||||
b *bufio.Reader
|
||||
count *readers.CountingReader
|
||||
err error // keeps the first error encountered
|
||||
}
|
||||
|
||||
// NewBinReader creates a binary protocol reader helper
|
||||
func NewBinReader(reader io.Reader) *BinReader {
|
||||
r := &BinReader{}
|
||||
r.count = readers.NewCountingReader(reader)
|
||||
r.b = bufio.NewReader(r.count)
|
||||
return r
|
||||
}
|
||||
|
||||
// Count returns number of bytes read
|
||||
func (r *BinReader) Count() uint64 {
|
||||
return r.count.BytesRead()
|
||||
}
|
||||
|
||||
// Error returns first encountered error or nil
|
||||
func (r *BinReader) Error() error {
|
||||
return r.err
|
||||
}
|
||||
|
||||
// check() keeps the first error encountered in a stream
|
||||
func (r *BinReader) check(err error) bool {
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
if r.err == nil {
|
||||
// keep the first error
|
||||
r.err = err
|
||||
}
|
||||
if err != io.EOF {
|
||||
log.Fatalf("Error parsing response: %v", err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ReadByteAsInt reads a single byte as uint32, returns -1 for EOF or errors
|
||||
func (r *BinReader) ReadByteAsInt() int {
|
||||
if octet, err := r.b.ReadByte(); r.check(err) {
|
||||
return int(octet)
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// ReadByteAsShort reads a single byte as uint16, returns -1 for EOF or errors
|
||||
func (r *BinReader) ReadByteAsShort() int16 {
|
||||
if octet, err := r.b.ReadByte(); r.check(err) {
|
||||
return int16(octet)
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// ReadIntSpl reads two bytes as little-endian uint16, returns -1 for EOF or errors
|
||||
func (r *BinReader) ReadIntSpl() int {
|
||||
var val uint16
|
||||
if r.check(binary.Read(r.b, binary.LittleEndian, &val)) {
|
||||
return int(val)
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// ReadULong returns uint64 equivalent of -1 for EOF or errors
|
||||
func (r *BinReader) ReadULong() uint64 {
|
||||
if val, err := binary.ReadUvarint(r.b); r.check(err) {
|
||||
return val
|
||||
}
|
||||
return 0xffffffffffffffff
|
||||
}
|
||||
|
||||
// ReadPu32 returns -1 for EOF or errors
|
||||
func (r *BinReader) ReadPu32() int64 {
|
||||
if val, err := binary.ReadUvarint(r.b); r.check(err) {
|
||||
return int64(val)
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// ReadNBytes reads given number of bytes, returns invalid data for EOF or errors
|
||||
func (r *BinReader) ReadNBytes(len int) []byte {
|
||||
buf := make([]byte, len)
|
||||
n, err := r.b.Read(buf)
|
||||
if r.check(err) {
|
||||
return buf
|
||||
}
|
||||
if n != len {
|
||||
r.check(ErrorPrematureEOF)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
// ReadBytesByLength reads buffer length and its bytes
|
||||
func (r *BinReader) ReadBytesByLength() []byte {
|
||||
len := r.ReadPu32()
|
||||
if len < 0 {
|
||||
r.check(ErrorInvalidLength)
|
||||
return []byte{}
|
||||
}
|
||||
return r.ReadNBytes(int(len))
|
||||
}
|
||||
|
||||
// ReadString reads a zero-terminated string with length
|
||||
func (r *BinReader) ReadString() string {
|
||||
len := int(r.ReadPu32())
|
||||
if len < 1 {
|
||||
r.check(ErrorInvalidLength)
|
||||
return ""
|
||||
}
|
||||
buf := make([]byte, len-1)
|
||||
n, err := r.b.Read(buf)
|
||||
if !r.check(err) {
|
||||
return ""
|
||||
}
|
||||
if n != len-1 {
|
||||
r.check(ErrorPrematureEOF)
|
||||
return ""
|
||||
}
|
||||
zeroByte, err := r.b.ReadByte()
|
||||
if !r.check(err) {
|
||||
return ""
|
||||
}
|
||||
if zeroByte != 0 {
|
||||
r.check(ErrorZeroTerminate)
|
||||
return ""
|
||||
}
|
||||
return string(buf)
|
||||
}
|
||||
|
||||
// ReadDate reads a Unix encoded time
|
||||
func (r *BinReader) ReadDate() time.Time {
|
||||
return time.Unix(r.ReadPu32(), 0)
|
||||
}
|
||||
@@ -1,248 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// M1 protocol constants and structures
|
||||
const (
|
||||
APIServerURL = "https://cloud.mail.ru"
|
||||
PublicLinkURL = "https://cloud.mail.ru/public/"
|
||||
DispatchServerURL = "https://dispatcher.cloud.mail.ru"
|
||||
OAuthURL = "https://o2.mail.ru/token"
|
||||
OAuthClientID = "cloud-win"
|
||||
)
|
||||
|
||||
// ServerErrorResponse represents erroneous API response.
|
||||
type ServerErrorResponse struct {
|
||||
Message string `json:"body"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
func (e *ServerErrorResponse) Error() string {
|
||||
return fmt.Sprintf("server error %d (%s)", e.Status, e.Message)
|
||||
}
|
||||
|
||||
// FileErrorResponse represents erroneous API response for a file
|
||||
type FileErrorResponse struct {
|
||||
Body struct {
|
||||
Home struct {
|
||||
Value string `json:"value"`
|
||||
Error string `json:"error"`
|
||||
} `json:"home"`
|
||||
} `json:"body"`
|
||||
Status int `json:"status"`
|
||||
Account string `json:"email,omitempty"`
|
||||
Time int64 `json:"time,omitempty"`
|
||||
Message string // non-json, calculated field
|
||||
}
|
||||
|
||||
func (e *FileErrorResponse) Error() string {
|
||||
return fmt.Sprintf("file error %d (%s)", e.Status, e.Body.Home.Error)
|
||||
}
|
||||
|
||||
// UserInfoResponse contains account metadata
|
||||
type UserInfoResponse struct {
|
||||
Body struct {
|
||||
AccountType string `json:"account_type"`
|
||||
AccountVerified bool `json:"account_verified"`
|
||||
Cloud struct {
|
||||
Beta struct {
|
||||
Allowed bool `json:"allowed"`
|
||||
Asked bool `json:"asked"`
|
||||
} `json:"beta"`
|
||||
Billing struct {
|
||||
ActiveCostID string `json:"active_cost_id"`
|
||||
ActiveRateID string `json:"active_rate_id"`
|
||||
AutoProlong bool `json:"auto_prolong"`
|
||||
Basequota int64 `json:"basequota"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Expires int `json:"expires"`
|
||||
Prolong bool `json:"prolong"`
|
||||
Promocodes struct {
|
||||
} `json:"promocodes"`
|
||||
Subscription []interface{} `json:"subscription"`
|
||||
Version string `json:"version"`
|
||||
} `json:"billing"`
|
||||
Bonuses struct {
|
||||
CameraUpload bool `json:"camera_upload"`
|
||||
Complete bool `json:"complete"`
|
||||
Desktop bool `json:"desktop"`
|
||||
Feedback bool `json:"feedback"`
|
||||
Links bool `json:"links"`
|
||||
Mobile bool `json:"mobile"`
|
||||
Registration bool `json:"registration"`
|
||||
} `json:"bonuses"`
|
||||
Enable struct {
|
||||
Sharing bool `json:"sharing"`
|
||||
} `json:"enable"`
|
||||
FileSizeLimit int64 `json:"file_size_limit"`
|
||||
Space struct {
|
||||
BytesTotal int64 `json:"bytes_total"`
|
||||
BytesUsed int `json:"bytes_used"`
|
||||
Overquota bool `json:"overquota"`
|
||||
} `json:"space"`
|
||||
} `json:"cloud"`
|
||||
Cloudflags struct {
|
||||
Exists bool `json:"exists"`
|
||||
} `json:"cloudflags"`
|
||||
Domain string `json:"domain"`
|
||||
Login string `json:"login"`
|
||||
Newbie bool `json:"newbie"`
|
||||
UI struct {
|
||||
ExpandLoader bool `json:"expand_loader"`
|
||||
Kind string `json:"kind"`
|
||||
Sidebar bool `json:"sidebar"`
|
||||
Sort struct {
|
||||
Order string `json:"order"`
|
||||
Type string `json:"type"`
|
||||
} `json:"sort"`
|
||||
Thumbs bool `json:"thumbs"`
|
||||
} `json:"ui"`
|
||||
} `json:"body"`
|
||||
Email string `json:"email"`
|
||||
Status int `json:"status"`
|
||||
Time int64 `json:"time"`
|
||||
}
|
||||
|
||||
// ListItem ...
|
||||
type ListItem struct {
|
||||
Count struct {
|
||||
Folders int `json:"folders"`
|
||||
Files int `json:"files"`
|
||||
} `json:"count,omitempty"`
|
||||
Kind string `json:"kind"`
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Home string `json:"home"`
|
||||
Size int64 `json:"size"`
|
||||
Mtime int64 `json:"mtime,omitempty"`
|
||||
Hash string `json:"hash,omitempty"`
|
||||
VirusScan string `json:"virus_scan,omitempty"`
|
||||
Tree string `json:"tree,omitempty"`
|
||||
Grev int `json:"grev,omitempty"`
|
||||
Rev int `json:"rev,omitempty"`
|
||||
}
|
||||
|
||||
// ItemInfoResponse ...
|
||||
type ItemInfoResponse struct {
|
||||
Email string `json:"email"`
|
||||
Body ListItem `json:"body"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
// FolderInfoResponse ...
|
||||
type FolderInfoResponse struct {
|
||||
Body struct {
|
||||
Count struct {
|
||||
Folders int `json:"folders"`
|
||||
Files int `json:"files"`
|
||||
} `json:"count"`
|
||||
Tree string `json:"tree"`
|
||||
Name string `json:"name"`
|
||||
Grev int `json:"grev"`
|
||||
Size int64 `json:"size"`
|
||||
Sort struct {
|
||||
Order string `json:"order"`
|
||||
Type string `json:"type"`
|
||||
} `json:"sort"`
|
||||
Kind string `json:"kind"`
|
||||
Rev int `json:"rev"`
|
||||
Type string `json:"type"`
|
||||
Home string `json:"home"`
|
||||
List []ListItem `json:"list"`
|
||||
} `json:"body,omitempty"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
Email string `json:"email"`
|
||||
}
|
||||
|
||||
// ShardInfoResponse ...
|
||||
type ShardInfoResponse struct {
|
||||
Email string `json:"email"`
|
||||
Body struct {
|
||||
Video []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"video"`
|
||||
ViewDirect []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"view_direct"`
|
||||
WeblinkView []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_view"`
|
||||
WeblinkVideo []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_video"`
|
||||
WeblinkGet []struct {
|
||||
Count int `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_get"`
|
||||
Stock []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"stock"`
|
||||
WeblinkThumbnails []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_thumbnails"`
|
||||
PublicUpload []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"public_upload"`
|
||||
Auth []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"auth"`
|
||||
Web []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"web"`
|
||||
View []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"view"`
|
||||
Upload []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"upload"`
|
||||
Get []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"get"`
|
||||
Thumbnails []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"thumbnails"`
|
||||
} `json:"body"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
// CleanupResponse ...
|
||||
type CleanupResponse struct {
|
||||
Email string `json:"email"`
|
||||
Time int64 `json:"time"`
|
||||
StatusStr string `json:"status"`
|
||||
}
|
||||
|
||||
// GenericResponse ...
|
||||
type GenericResponse struct {
|
||||
Email string `json:"email"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
// ignore other fields
|
||||
}
|
||||
|
||||
// GenericBodyResponse ...
|
||||
type GenericBodyResponse struct {
|
||||
Email string `json:"email"`
|
||||
Body string `json:"body"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,18 +0,0 @@
|
||||
// Test Mailru filesystem interface
|
||||
package mailru_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/mailru"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestMailru:",
|
||||
NilObject: (*mailru.Object)(nil),
|
||||
SkipBadWindowsCharacters: true,
|
||||
})
|
||||
}
|
||||
@@ -1,134 +0,0 @@
|
||||
// Package mrhash implements the mailru hash, which is a modified SHA1.
|
||||
// If file size is less than or equal to the SHA1 block size (20 bytes),
|
||||
// its hash is simply its data right-padded with zero bytes.
|
||||
// Hash sum of a larger file is computed as a SHA1 sum of the file data
|
||||
// bytes concatenated with a decimal representation of the data length.
|
||||
package mrhash
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"hash"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockSize of the checksum in bytes.
|
||||
BlockSize = sha1.BlockSize
|
||||
// Size of the checksum in bytes.
|
||||
Size = sha1.Size
|
||||
startString = "mrCloud"
|
||||
hashError = "hash function returned error"
|
||||
)
|
||||
|
||||
// Global errors
|
||||
var (
|
||||
ErrorInvalidHash = errors.New("invalid hash")
|
||||
)
|
||||
|
||||
type digest struct {
|
||||
total int // bytes written into hash so far
|
||||
sha hash.Hash // underlying SHA1
|
||||
small []byte // small content
|
||||
}
|
||||
|
||||
// New returns a new hash.Hash computing the Mailru checksum.
|
||||
func New() hash.Hash {
|
||||
d := &digest{}
|
||||
d.Reset()
|
||||
return d
|
||||
}
|
||||
|
||||
// Write writes len(p) bytes from p to the underlying data stream. It returns
|
||||
// the number of bytes written from p (0 <= n <= len(p)) and any error
|
||||
// encountered that caused the write to stop early. Write must return a non-nil
|
||||
// error if it returns n < len(p). Write must not modify the slice data, even
|
||||
// temporarily.
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
func (d *digest) Write(p []byte) (n int, err error) {
|
||||
n, err = d.sha.Write(p)
|
||||
if err != nil {
|
||||
panic(hashError)
|
||||
}
|
||||
d.total += n
|
||||
if d.total <= Size {
|
||||
d.small = append(d.small, p...)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
// It does not change the underlying hash state.
|
||||
func (d *digest) Sum(b []byte) []byte {
|
||||
// If content is small, return it padded to Size
|
||||
if d.total <= Size {
|
||||
padded := make([]byte, Size)
|
||||
copy(padded, d.small)
|
||||
return append(b, padded...)
|
||||
}
|
||||
endString := strconv.Itoa(d.total)
|
||||
copy, err := cloneSHA1(d.sha)
|
||||
if err == nil {
|
||||
_, err = copy.Write([]byte(endString))
|
||||
}
|
||||
if err != nil {
|
||||
panic(hashError)
|
||||
}
|
||||
return copy.Sum(b)
|
||||
}
|
||||
|
||||
// cloneSHA1 clones state of SHA1 hash
|
||||
func cloneSHA1(orig hash.Hash) (clone hash.Hash, err error) {
|
||||
state, err := orig.(encoding.BinaryMarshaler).MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clone = sha1.New()
|
||||
err = clone.(encoding.BinaryUnmarshaler).UnmarshalBinary(state)
|
||||
return
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
func (d *digest) Reset() {
|
||||
d.sha = sha1.New()
|
||||
_, _ = d.sha.Write([]byte(startString))
|
||||
d.total = 0
|
||||
}
|
||||
|
||||
// Size returns the number of bytes Sum will return.
|
||||
func (d *digest) Size() int {
|
||||
return Size
|
||||
}
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (d *digest) BlockSize() int {
|
||||
return BlockSize
|
||||
}
|
||||
|
||||
// Sum returns the Mailru checksum of the data.
|
||||
func Sum(data []byte) []byte {
|
||||
var d digest
|
||||
d.Reset()
|
||||
_, _ = d.Write(data)
|
||||
return d.Sum(nil)
|
||||
}
|
||||
|
||||
// DecodeString converts a string to the Mailru hash
|
||||
func DecodeString(s string) ([]byte, error) {
|
||||
b, err := hex.DecodeString(s)
|
||||
if err != nil || len(b) != Size {
|
||||
return nil, ErrorInvalidHash
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// must implement this interface
|
||||
var (
|
||||
_ hash.Hash = (*digest)(nil)
|
||||
)
|
||||
@@ -1,81 +0,0 @@
|
||||
package mrhash_test
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/mailru/mrhash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func testChunk(t *testing.T, chunk int) {
|
||||
data := make([]byte, chunk)
|
||||
for i := 0; i < chunk; i++ {
|
||||
data[i] = 'A'
|
||||
}
|
||||
for _, test := range []struct {
|
||||
n int
|
||||
want string
|
||||
}{
|
||||
{0, "0000000000000000000000000000000000000000"},
|
||||
{1, "4100000000000000000000000000000000000000"},
|
||||
{2, "4141000000000000000000000000000000000000"},
|
||||
{19, "4141414141414141414141414141414141414100"},
|
||||
{20, "4141414141414141414141414141414141414141"},
|
||||
{21, "eb1d05e78a18691a5aa196a6c2b60cd40b5faafb"},
|
||||
{22, "037e6d960601118a0639afbeff30fe716c66ed2d"},
|
||||
{4096, "45a16aa192502b010280fb5b44274c601a91fd9f"},
|
||||
{4194303, "fa019d5bd26498cf6abe35e0d61801bf19bf704b"},
|
||||
{4194304, "5ed0e07aa6ea5c1beb9402b4d807258f27d40773"},
|
||||
{4194305, "67bd0b9247db92e0e7d7e29a0947a50fedcb5452"},
|
||||
{8388607, "41a8e2eb044c2e242971b5445d7be2a13fc0dd84"},
|
||||
{8388608, "267a970917c624c11fe624276ec60233a66dc2c0"},
|
||||
{8388609, "37b60b308d553d2732aefb62b3ea88f74acfa13f"},
|
||||
} {
|
||||
d := mrhash.New()
|
||||
var toWrite int
|
||||
for toWrite = test.n; toWrite >= chunk; toWrite -= chunk {
|
||||
n, err := d.Write(data)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, chunk, n)
|
||||
}
|
||||
n, err := d.Write(data[:toWrite])
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, toWrite, n)
|
||||
got1 := hex.EncodeToString(d.Sum(nil))
|
||||
assert.Equal(t, test.want, got1, fmt.Sprintf("when testing length %d", n))
|
||||
got2 := hex.EncodeToString(d.Sum(nil))
|
||||
assert.Equal(t, test.want, got2, fmt.Sprintf("when testing length %d (2nd sum)", n))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashChunk16M(t *testing.T) { testChunk(t, 16*1024*1024) }
|
||||
func TestHashChunk8M(t *testing.T) { testChunk(t, 8*1024*1024) }
|
||||
func TestHashChunk4M(t *testing.T) { testChunk(t, 4*1024*1024) }
|
||||
func TestHashChunk2M(t *testing.T) { testChunk(t, 2*1024*1024) }
|
||||
func TestHashChunk1M(t *testing.T) { testChunk(t, 1*1024*1024) }
|
||||
func TestHashChunk64k(t *testing.T) { testChunk(t, 64*1024) }
|
||||
func TestHashChunk32k(t *testing.T) { testChunk(t, 32*1024) }
|
||||
func TestHashChunk2048(t *testing.T) { testChunk(t, 2048) }
|
||||
func TestHashChunk2047(t *testing.T) { testChunk(t, 2047) }
|
||||
|
||||
func TestSumCalledTwice(t *testing.T) {
|
||||
d := mrhash.New()
|
||||
assert.NotPanics(t, func() { d.Sum(nil) })
|
||||
d.Reset()
|
||||
assert.NotPanics(t, func() { d.Sum(nil) })
|
||||
assert.NotPanics(t, func() { d.Sum(nil) })
|
||||
_, _ = d.Write([]byte{1})
|
||||
assert.NotPanics(t, func() { d.Sum(nil) })
|
||||
}
|
||||
|
||||
func TestSize(t *testing.T) {
|
||||
d := mrhash.New()
|
||||
assert.Equal(t, 20, d.Size())
|
||||
}
|
||||
|
||||
func TestBlockSize(t *testing.T) {
|
||||
d := mrhash.New()
|
||||
assert.Equal(t, 64, d.BlockSize())
|
||||
}
|
||||
@@ -29,7 +29,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
@@ -37,8 +36,6 @@ import (
|
||||
mega "github.com/t3rm1n4l/go-mega"
|
||||
)
|
||||
|
||||
const enc = encodings.Mega
|
||||
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
@@ -248,15 +245,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// splitNodePath splits nodePath into / separated parts, returning nil if it
|
||||
// should refer to the root.
|
||||
// It also encodes the parts into backend specific encoding
|
||||
// should refer to the root
|
||||
func splitNodePath(nodePath string) (parts []string) {
|
||||
nodePath = path.Clean(nodePath)
|
||||
if nodePath == "." || nodePath == "/" {
|
||||
parts = strings.Split(nodePath, "/")
|
||||
if len(parts) == 1 && (parts[0] == "." || parts[0] == "/") {
|
||||
return nil
|
||||
}
|
||||
nodePath = enc.FromStandardPath(nodePath)
|
||||
return strings.Split(nodePath, "/")
|
||||
return parts
|
||||
}
|
||||
|
||||
// findNode looks up the node for the path of the name given from the root given
|
||||
@@ -422,7 +418,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
errors := 0
|
||||
// similar to f.deleteNode(trash) but with HardDelete as true
|
||||
for _, item := range items {
|
||||
fs.Debugf(f, "Deleting trash %q", enc.ToStandardName(item.GetName()))
|
||||
fs.Debugf(f, "Deleting trash %q", item.GetName())
|
||||
deleteErr := f.pacer.Call(func() (bool, error) {
|
||||
err := f.srv.Delete(item, true)
|
||||
return shouldRetry(err)
|
||||
@@ -504,7 +500,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.list(ctx, dirNode, func(info *mega.Node) bool {
|
||||
remote := path.Join(dir, enc.ToStandardName(info.GetName()))
|
||||
remote := path.Join(dir, info.GetName())
|
||||
switch info.GetType() {
|
||||
case mega.FOLDER, mega.ROOT, mega.INBOX, mega.TRASH:
|
||||
d := fs.NewDir(remote, info.GetTimeStamp()).SetID(info.GetHash())
|
||||
@@ -726,7 +722,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
||||
if srcLeaf != dstLeaf {
|
||||
//log.Printf("rename %q to %q", srcLeaf, dstLeaf)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.srv.Rename(info, enc.FromStandardName(dstLeaf))
|
||||
err = f.srv.Rename(info, dstLeaf)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -875,13 +871,13 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
}
|
||||
// move them into place
|
||||
for _, info := range infos {
|
||||
fs.Infof(srcDir, "merging %q", enc.ToStandardName(info.GetName()))
|
||||
fs.Infof(srcDir, "merging %q", info.GetName())
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.srv.Move(info, dstDirNode)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", enc.ToStandardName(info.GetName()), srcDir)
|
||||
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.GetName(), srcDir)
|
||||
}
|
||||
}
|
||||
// rmdir (into trash) the now empty source directory
|
||||
@@ -1124,7 +1120,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
var u *mega.Upload
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
u, err = o.fs.srv.NewUpload(dirNode, enc.FromStandardName(leaf), size)
|
||||
u, err = o.fs.srv.NewUpload(dirNode, leaf, size)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -15,18 +15,17 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/onedrive/api"
|
||||
"github.com/rclone/rclone/backend/onedrive/quickxorhash"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
@@ -35,8 +34,6 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.OneDrive
|
||||
|
||||
const (
|
||||
rcloneClientID = "b15665d9-eda6-4092-8539-0eec376afd59"
|
||||
rcloneEncryptedClientSecret = "_JUdzh3LnKNqSPcf4Wu5fgMFIQOI8glZu_akYgR8yf6egowNBg-R"
|
||||
@@ -66,20 +63,15 @@ var (
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
|
||||
// QuickXorHashType is the hash.Type for OneDrive
|
||||
QuickXorHashType hash.Type
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
QuickXorHashType = hash.RegisterHash("QuickXorHash", 40, quickxorhash.New)
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "onedrive",
|
||||
Description: "Microsoft OneDrive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
ctx := context.TODO()
|
||||
err := oauthutil.Config("onedrive", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
@@ -151,7 +143,7 @@ func init() {
|
||||
}
|
||||
|
||||
sites := siteResponse{}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &sites)
|
||||
_, err := srv.CallJSON(&opts, nil, &sites)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available sites: %v", err)
|
||||
}
|
||||
@@ -180,7 +172,7 @@ func init() {
|
||||
// query Microsoft Graph
|
||||
if finalDriveID == "" {
|
||||
drives := drivesResponse{}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &drives)
|
||||
_, err := srv.CallJSON(&opts, nil, &drives)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available drives: %v", err)
|
||||
}
|
||||
@@ -202,7 +194,7 @@ func init() {
|
||||
RootURL: graphURL,
|
||||
Path: "/drives/" + finalDriveID + "/root"}
|
||||
var rootItem api.Item
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &rootItem)
|
||||
_, err = srv.CallJSON(&opts, nil, &rootItem)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query root for drive %s: %v", finalDriveID, err)
|
||||
}
|
||||
@@ -225,9 +217,9 @@ func init() {
|
||||
Help: "Microsoft App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
|
||||
Help: `Chunk size to upload files with - must be multiple of 320k.
|
||||
|
||||
Above this size files will be chunked - must be multiple of 320k (327,680 bytes). Note
|
||||
Above this size files will be chunked - must be multiple of 320k. Note
|
||||
that the chunks will be buffered into memory.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
@@ -351,15 +343,10 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
// instead of simply using `drives/driveID/root:/itemPath` because it works for
|
||||
// "shared with me" folders in OneDrive Personal (See #2536, #2778)
|
||||
// This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
|
||||
//
|
||||
// If `relPath` == '', do not append the slash (See #3664)
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
if relPath != "" {
|
||||
relPath = "/" + withTrailingColon(rest.URLPathEscape(enc.FromStandardPath(relPath)))
|
||||
}
|
||||
opts := newOptsCall(normalizedID, "GET", ":"+relPath)
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
opts := newOptsCall(normalizedID, "GET", ":/"+withTrailingColon(rest.URLPathEscape(replaceReservedChars(relPath))))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -380,11 +367,11 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root:/" + rest.URLPathEscape(enc.FromStandardPath(path)),
|
||||
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
|
||||
}
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info, resp, err
|
||||
@@ -439,7 +426,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
}
|
||||
}
|
||||
|
||||
return f.readMetaDataForPathRelativeToID(ctx, baseNormalizedID, relPath)
|
||||
return f.readMetaDataForPathRelativeToID(baseNormalizedID, relPath)
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
@@ -605,7 +592,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
if !ok {
|
||||
return "", false, errors.New("couldn't find parent ID")
|
||||
}
|
||||
info, resp, err := f.readMetaDataForPathRelativeToID(ctx, pathID, leaf)
|
||||
info, resp, err := f.readMetaDataForPathRelativeToID(pathID, leaf)
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
return "", false, nil
|
||||
@@ -628,11 +615,11 @@ func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, e
|
||||
var info *api.Item
|
||||
opts := newOptsCall(dirID, "POST", "/children")
|
||||
mkdir := api.CreateItemRequest{
|
||||
Name: enc.FromStandardName(leaf),
|
||||
Name: replaceReservedChars(leaf),
|
||||
ConflictBehavior: "fail",
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
|
||||
resp, err = f.srv.CallJSON(&opts, &mkdir, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -655,7 +642,7 @@ type listAllFn func(*api.Item) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
// Top parameter asks for bigger pages of data
|
||||
// https://dev.onedrive.com/odata/optional-query-parameters.htm
|
||||
opts := newOptsCall(dirID, "GET", "/children?$top=1000")
|
||||
@@ -664,7 +651,7 @@ OUTER:
|
||||
var result api.ListChildrenResponse
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -688,7 +675,7 @@ OUTER:
|
||||
if item.Deleted != nil {
|
||||
continue
|
||||
}
|
||||
item.Name = enc.ToStandardName(item.GetName())
|
||||
item.Name = restoreReservedChars(item.GetName())
|
||||
if fn(item) {
|
||||
found = true
|
||||
break OUTER
|
||||
@@ -722,7 +709,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return nil, err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
||||
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
|
||||
if !f.opt.ExposeOneNoteFiles && info.GetPackageType() == api.PackageTypeOneNote {
|
||||
fs.Debugf(info.Name, "OneNote file not shown in directory listing")
|
||||
return false
|
||||
@@ -806,12 +793,12 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
}
|
||||
|
||||
// deleteObject removes an object by ID
|
||||
func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
func (f *Fs) deleteObject(id string) error {
|
||||
opts := newOptsCall(id, "DELETE", "")
|
||||
opts.NoResponse = true
|
||||
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(ctx, &opts)
|
||||
resp, err := f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
@@ -834,7 +821,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
}
|
||||
if check {
|
||||
// check to see if there are any items
|
||||
found, err := f.listAll(ctx, rootID, false, false, func(item *api.Item) bool {
|
||||
found, err := f.listAll(rootID, false, false, func(item *api.Item) bool {
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
@@ -844,7 +831,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
}
|
||||
err = f.deleteObject(ctx, rootID)
|
||||
err = f.deleteObject(rootID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -925,8 +912,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
srcPath := srcObj.rootPath()
|
||||
dstPath := f.rootPath(remote)
|
||||
srcPath := srcObj.fs.rootSlash() + srcObj.remote
|
||||
dstPath := f.rootSlash() + remote
|
||||
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
||||
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
}
|
||||
@@ -944,7 +931,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
||||
|
||||
replacedLeaf := enc.FromStandardName(leaf)
|
||||
replacedLeaf := replaceReservedChars(leaf)
|
||||
copyReq := api.CopyItemRequest{
|
||||
Name: &replacedLeaf,
|
||||
ParentReference: api.ItemReference{
|
||||
@@ -954,7 +941,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©Req, nil)
|
||||
resp, err = f.srv.CallJSON(&opts, ©Req, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1028,7 +1015,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
opts := newOptsCall(srcObj.id, "PATCH", "")
|
||||
|
||||
move := api.MoveItemRequest{
|
||||
Name: enc.FromStandardName(leaf),
|
||||
Name: replaceReservedChars(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
DriveID: dstDriveID,
|
||||
ID: id,
|
||||
@@ -1042,7 +1029,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var resp *http.Response
|
||||
var info api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||
resp, err = f.srv.CallJSON(&opts, &move, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1135,7 +1122,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// Get timestamps of src so they can be preserved
|
||||
srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(ctx, srcID, "")
|
||||
srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(srcID, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1143,7 +1130,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
// Do the move
|
||||
opts := newOptsCall(srcID, "PATCH", "")
|
||||
move := api.MoveItemRequest{
|
||||
Name: enc.FromStandardName(leaf),
|
||||
Name: replaceReservedChars(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
DriveID: dstDriveID,
|
||||
ID: parsedDstDirID,
|
||||
@@ -1157,7 +1144,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
var resp *http.Response
|
||||
var info api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||
resp, err = f.srv.CallJSON(&opts, &move, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1183,7 +1170,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &drive)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &drive)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1204,12 +1191,12 @@ func (f *Fs) Hashes() hash.Set {
|
||||
if f.driveType == driveTypePersonal {
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
return hash.Set(QuickXorHashType)
|
||||
return hash.Set(hash.QuickXorHash)
|
||||
}
|
||||
|
||||
// PublicLink returns a link for downloading without accout.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
info, _, err := f.readMetaDataForPath(ctx, f.rootPath(remote))
|
||||
info, _, err := f.readMetaDataForPath(ctx, f.srvPath(remote))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -1223,7 +1210,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
var resp *http.Response
|
||||
var result api.CreateShareLinkResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &share, &result)
|
||||
resp, err = f.srv.CallJSON(&opts, &share, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1253,19 +1240,9 @@ func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// rootPath returns a path for use in server given a remote
|
||||
func (f *Fs) rootPath(remote string) string {
|
||||
return f.rootSlash() + remote
|
||||
}
|
||||
|
||||
// rootPath returns a path for use in local functions
|
||||
func (o *Object) rootPath() string {
|
||||
return o.fs.rootPath(o.remote)
|
||||
}
|
||||
|
||||
// srvPath returns a path for use in server given a remote
|
||||
func (f *Fs) srvPath(remote string) string {
|
||||
return enc.FromStandardPath(f.rootSlash() + remote)
|
||||
return replaceReservedChars(f.rootSlash() + remote)
|
||||
}
|
||||
|
||||
// srvPath returns a path for use in server
|
||||
@@ -1280,7 +1257,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
return o.sha1, nil
|
||||
}
|
||||
} else {
|
||||
if t == QuickXorHashType {
|
||||
if t == hash.QuickXorHash {
|
||||
return o.quickxorhash, nil
|
||||
}
|
||||
}
|
||||
@@ -1342,7 +1319,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if o.hasMetaData {
|
||||
return nil
|
||||
}
|
||||
info, _, err := o.fs.readMetaDataForPath(ctx, o.rootPath())
|
||||
info, _, err := o.fs.readMetaDataForPath(ctx, o.srvPath())
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.ErrorInfo.Code == "itemNotFound" {
|
||||
@@ -1377,7 +1354,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
||||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(enc.FromStandardName(leaf))),
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(leaf)),
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
@@ -1393,7 +1370,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
||||
}
|
||||
var info *api.Item
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
|
||||
resp, err := o.fs.srv.CallJSON(&opts, &update, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info, err
|
||||
@@ -1428,7 +1405,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
opts.Options = options
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1451,8 +1428,7 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: rootURL,
|
||||
Path: fmt.Sprintf("/%s/items/%s:/%s:/createUploadSession",
|
||||
drive, id, rest.URLPathEscape(enc.FromStandardName(leaf))),
|
||||
Path: "/" + drive + "/items/" + id + ":/" + rest.URLPathEscape(replaceReservedChars(leaf)) + ":/createUploadSession",
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
@@ -1465,7 +1441,7 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
|
||||
createRequest.Item.FileSystemInfo.LastModifiedDateTime = api.Timestamp(modTime)
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &createRequest, &response)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &createRequest, &response)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
|
||||
// Make the error more user-friendly
|
||||
@@ -1478,7 +1454,7 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
|
||||
}
|
||||
|
||||
// uploadFragment uploads a part
|
||||
func (o *Object) uploadFragment(ctx context.Context, url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64) (info *api.Item, err error) {
|
||||
func (o *Object) uploadFragment(url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64) (info *api.Item, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: url,
|
||||
@@ -1491,7 +1467,7 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
|
||||
var body []byte
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, _ = chunk.Seek(0, io.SeekStart)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
@@ -1511,7 +1487,7 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
|
||||
}
|
||||
|
||||
// cancelUploadSession cancels an upload session
|
||||
func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error) {
|
||||
func (o *Object) cancelUploadSession(url string) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
RootURL: url,
|
||||
@@ -1519,7 +1495,7 @@ func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return
|
||||
@@ -1541,7 +1517,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Cancelling multipart upload")
|
||||
cancelErr := o.cancelUploadSession(ctx, uploadURL)
|
||||
cancelErr := o.cancelUploadSession(uploadURL)
|
||||
if cancelErr != nil {
|
||||
fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr)
|
||||
}
|
||||
@@ -1577,7 +1553,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
}
|
||||
seg := readers.NewRepeatableReader(io.LimitReader(in, n))
|
||||
fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
|
||||
info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n)
|
||||
info, err = o.uploadFragment(uploadURL, position, size, seg, n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1604,7 +1580,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(enc.FromStandardName(leaf)) + ":/content",
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
}
|
||||
@@ -1618,7 +1594,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &info)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
|
||||
// Make the error more user-friendly
|
||||
@@ -1670,7 +1646,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.deleteObject(ctx, o.id)
|
||||
return o.fs.deleteObject(o.id)
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
|
||||
91
backend/onedrive/replace.go
Normal file
91
backend/onedrive/replace.go
Normal file
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
Translate file names for one drive
|
||||
|
||||
OneDrive reserved characters
|
||||
|
||||
The following characters are OneDrive reserved characters, and can't
|
||||
be used in OneDrive folder and file names.
|
||||
|
||||
onedrive-reserved = "/" / "\" / "*" / "<" / ">" / "?" / ":" / "|"
|
||||
onedrive-business-reserved
|
||||
= "/" / "\" / "*" / "<" / ">" / "?" / ":" / "|" / "#" / "%"
|
||||
|
||||
Note: Folder names can't end with a period (.).
|
||||
|
||||
Note: OneDrive for Business file or folder names cannot begin with a
|
||||
tilde ('~').
|
||||
|
||||
*/
|
||||
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// Onedrive has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
':': ':', // FULLWIDTH COLON
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
'#': '#', // FULLWIDTH NUMBER SIGN
|
||||
'%': '%', // FULLWIDTH PERCENT SIGN
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
'.': '.', // FULLWIDTH FULL STOP
|
||||
'~': '~', // FULLWIDTH TILDE
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
fixEndingInPeriod = regexp.MustCompile(`\.(/|$)`)
|
||||
fixStartingWithTilde = regexp.MustCompile(`(/|^)~`)
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// Folder names can't end with a period '.'
|
||||
in = fixEndingInPeriod.ReplaceAllString(in, string(charMap['.'])+"$1")
|
||||
// OneDrive for Business file or folder names cannot begin with a tilde '~'
|
||||
in = fixStartingWithTilde.ReplaceAllString(in, "$1"+string(charMap['~']))
|
||||
// Apparently file names can't start with space either
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Replace reserved characters
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != '.' && c != '~' && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
30
backend/onedrive/replace_test.go
Normal file
30
backend/onedrive/replace_test.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package onedrive
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\*<>?:|#%".~`, `\*<>?:|#%".~`},
|
||||
{`\*<>?:|#%".~/\*<>?:|#%".~`, `\*<>?:|#%".~/\*<>?:|#%".~`},
|
||||
{" leading space", "␠leading space"},
|
||||
{"~leading tilde", "~leading tilde"},
|
||||
{"trailing dot.", "trailing dot."},
|
||||
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
|
||||
{"~leading tilde/~leading tilde/~leading tilde", "~leading tilde/~leading tilde/~leading tilde"},
|
||||
{"trailing dot./trailing dot./trailing dot.", "trailing dot./trailing dot./trailing dot."},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -26,8 +25,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const enc = encodings.OpenDrive
|
||||
|
||||
const (
|
||||
defaultEndpoint = "https://dev.opendrive.com/api/v1"
|
||||
minSleep = 10 * time.Millisecond
|
||||
@@ -164,7 +161,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
Method: "POST",
|
||||
Path: "/session/login.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &account, &f.session)
|
||||
resp, err = f.srv.CallJSON(&opts, &account, &f.session)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -249,7 +246,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
}
|
||||
|
||||
// deleteObject removes an object by ID
|
||||
func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
func (f *Fs) deleteObject(id string) error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
removeDirData := removeFolder{SessionID: f.session.SessionID, FolderID: id}
|
||||
opts := rest.Opts{
|
||||
@@ -257,7 +254,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
NoResponse: true,
|
||||
Path: "/folder/remove.json",
|
||||
}
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &removeDirData, nil)
|
||||
resp, err := f.srv.CallJSON(&opts, &removeDirData, nil)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
@@ -278,14 +275,14 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
item, err := f.readMetaDataForFolderID(ctx, rootID)
|
||||
item, err := f.readMetaDataForFolderID(rootID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if check && len(item.Files) != 0 {
|
||||
return errors.New("folder not empty")
|
||||
}
|
||||
err = f.deleteObject(ctx, rootID)
|
||||
err = f.deleteObject(rootID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -356,7 +353,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Method: "POST",
|
||||
Path: "/file/move_copy.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©FileData, &response)
|
||||
resp, err = f.srv.CallJSON(&opts, ©FileData, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -413,7 +410,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Method: "POST",
|
||||
Path: "/file/move_copy.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©FileData, &response)
|
||||
resp, err = f.srv.CallJSON(&opts, ©FileData, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -512,7 +509,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
Method: "POST",
|
||||
Path: "/folder/move_copy.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &moveFolderData, &response)
|
||||
resp, err = f.srv.CallJSON(&opts, &moveFolderData, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -588,18 +585,18 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return o, enc.FromStandardName(leaf), directoryID, nil
|
||||
return o, leaf, directoryID, nil
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForFolderID(ctx context.Context, id string) (info *FolderList, err error) {
|
||||
func (f *Fs) readMetaDataForFolderID(id string) (info *FolderList, err error) {
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folder/list.json/" + f.session.SessionID + "/" + id,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -639,16 +636,12 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
var resp *http.Response
|
||||
response := createFileResponse{}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
createFileData := createFile{
|
||||
SessionID: o.fs.session.SessionID,
|
||||
FolderID: directoryID,
|
||||
Name: leaf,
|
||||
}
|
||||
createFileData := createFile{SessionID: o.fs.session.SessionID, FolderID: directoryID, Name: replaceReservedChars(leaf)}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/upload/create_file.json",
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &createFileData, &response)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &createFileData, &response)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -690,7 +683,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
createDirData := createFolder{
|
||||
SessionID: f.session.SessionID,
|
||||
FolderName: enc.FromStandardName(leaf),
|
||||
FolderName: replaceReservedChars(leaf),
|
||||
FolderSubParent: pathID,
|
||||
FolderIsPublic: 0,
|
||||
FolderPublicUpl: 0,
|
||||
@@ -701,7 +694,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
Method: "POST",
|
||||
Path: "/folder.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &createDirData, &response)
|
||||
resp, err = f.srv.CallJSON(&opts, &createDirData, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -729,15 +722,15 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
Method: "GET",
|
||||
Path: "/folder/list.json/" + f.session.SessionID + "/" + pathID,
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &folderList)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &folderList)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", false, errors.Wrap(err, "failed to get folder list")
|
||||
}
|
||||
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
for _, folder := range folderList.Folders {
|
||||
folder.Name = restoreReservedChars(folder.Name)
|
||||
// fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID)
|
||||
|
||||
if leaf == folder.Name {
|
||||
@@ -776,7 +769,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
folderList := FolderList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &folderList)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &folderList)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -784,7 +777,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
|
||||
for _, folder := range folderList.Folders {
|
||||
folder.Name = enc.ToStandardName(folder.Name)
|
||||
folder.Name = restoreReservedChars(folder.Name)
|
||||
// fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID)
|
||||
remote := path.Join(dir, folder.Name)
|
||||
// cache the directory ID for later lookups
|
||||
@@ -795,7 +788,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
|
||||
for _, file := range folderList.Files {
|
||||
file.Name = enc.ToStandardName(file.Name)
|
||||
file.Name = restoreReservedChars(file.Name)
|
||||
// fs.Debugf(nil, "File: %s (%s)", file.Name, file.FileID)
|
||||
remote := path.Join(dir, file.Name)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, &file)
|
||||
@@ -858,13 +851,9 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
NoResponse: true,
|
||||
Path: "/file/filesettings.json",
|
||||
}
|
||||
update := modTimeFile{
|
||||
SessionID: o.fs.session.SessionID,
|
||||
FileID: o.id,
|
||||
FileModificationTime: strconv.FormatInt(modTime.Unix(), 10),
|
||||
}
|
||||
update := modTimeFile{SessionID: o.fs.session.SessionID, FileID: o.id, FileModificationTime: strconv.FormatInt(modTime.Unix(), 10)}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, nil)
|
||||
resp, err := o.fs.srv.CallJSON(&opts, &update, nil)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -884,7 +873,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -903,7 +892,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
NoResponse: true,
|
||||
Path: "/file.json/" + o.fs.session.SessionID + "/" + o.id,
|
||||
}
|
||||
resp, err := o.fs.srv.Call(ctx, &opts)
|
||||
resp, err := o.fs.srv.Call(&opts)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
@@ -931,7 +920,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Method: "POST",
|
||||
Path: "/upload/open_file_upload.json",
|
||||
}
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &openUploadData, &openResponse)
|
||||
resp, err := o.fs.srv.CallJSON(&opts, &openUploadData, &openResponse)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -977,7 +966,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
MultipartFileName: o.remote, // ..name of the file for the attached file
|
||||
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &reply)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &reply)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1000,7 +989,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Method: "POST",
|
||||
Path: "/upload/close_file_upload.json",
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &closeUploadData, &closeResponse)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &closeUploadData, &closeResponse)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1026,7 +1015,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
NoResponse: true,
|
||||
Path: "/file/access.json",
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &update, nil)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &update, nil)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1049,10 +1038,9 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: fmt.Sprintf("/folder/itembyname.json/%s/%s?name=%s",
|
||||
o.fs.session.SessionID, directoryID, url.QueryEscape(enc.FromStandardName(leaf))),
|
||||
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + url.QueryEscape(replaceReservedChars(leaf)),
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &folderList)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &folderList)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
78
backend/opendrive/replace.go
Normal file
78
backend/opendrive/replace.go
Normal file
@@ -0,0 +1,78 @@
|
||||
/*
|
||||
Translate file names for OpenDrive
|
||||
|
||||
OpenDrive reserved characters
|
||||
|
||||
The following characters are OpenDrive reserved characters, and can't
|
||||
be used in OpenDrive folder and file names.
|
||||
|
||||
\ / : * ? " < > |
|
||||
|
||||
OpenDrive files and folders can't have leading or trailing spaces also.
|
||||
|
||||
*/
|
||||
|
||||
package opendrive
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// OpenDrive has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
':': ':', // FULLWIDTH COLON
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
|
||||
invCharMap map[rune]rune
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// Filenames can't start with space
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Filenames can't end with space
|
||||
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
28
backend/opendrive/replace_test.go
Normal file
28
backend/opendrive/replace_test.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package opendrive
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\*<>?:|#%".~`, `\*<>?:|#%".~`},
|
||||
{`\*<>?:|#%".~/\*<>?:|#%".~`, `\*<>?:|#%".~/\*<>?:|#%".~`},
|
||||
{" leading space", "␠leading space"},
|
||||
{" path/ leading spaces", "␠path/␠ leading spaces"},
|
||||
{"trailing space ", "trailing space␠"},
|
||||
{"trailing spaces /path ", "trailing spaces ␠/path␠"},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
@@ -36,8 +35,6 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.Pcloud
|
||||
|
||||
const (
|
||||
rcloneClientID = "DnONSzyJXpm"
|
||||
rcloneEncryptedClientSecret = "ej1OIF39VOQQ0PXaSdK9ztkLw3tdLNscW2157TKNQdQKkICR4uU7aFg4eFM"
|
||||
@@ -178,6 +175,21 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return doRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// substitute reserved characters for pcloud
|
||||
//
|
||||
// Generally all characters are allowed in filenames, except the NULL
|
||||
// byte, forward and backslash (/,\ and \0)
|
||||
func replaceReservedChars(x string) string {
|
||||
// Backslash for FULLWIDTH REVERSE SOLIDUS
|
||||
return strings.Replace(x, "\\", "\", -1)
|
||||
}
|
||||
|
||||
// restore reserved characters for pcloud
|
||||
func restoreReservedChars(x string) string {
|
||||
// FULLWIDTH REVERSE SOLIDUS for Backslash
|
||||
return strings.Replace(x, "\", "\\", -1)
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||
@@ -189,7 +201,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
return nil, err
|
||||
}
|
||||
|
||||
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
|
||||
found, err := f.listAll(directoryID, false, true, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
info = item
|
||||
return true
|
||||
@@ -322,7 +334,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
pathIDOut = item.ID
|
||||
return true
|
||||
@@ -342,10 +354,10 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
Path: "/createfolder",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("name", enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("name", replaceReservedChars(leaf))
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(pathID))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -388,7 +400,7 @@ type listAllFn func(*api.Item) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/listfolder",
|
||||
@@ -400,7 +412,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
var result api.ItemResult
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -418,7 +430,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
continue
|
||||
}
|
||||
}
|
||||
item.Name = enc.ToStandardName(item.Name)
|
||||
item.Name = restoreReservedChars(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break
|
||||
@@ -446,7 +458,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return nil, err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
||||
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
|
||||
remote := path.Join(dir, info.Name)
|
||||
if info.IsFolder {
|
||||
// cache the directory ID for later lookups
|
||||
@@ -551,7 +563,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
var resp *http.Response
|
||||
var result api.ItemResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -610,13 +622,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id))
|
||||
opts.Parameters.Set("toname", enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("toname", replaceReservedChars(leaf))
|
||||
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("mtime", fmt.Sprintf("%d", srcObj.modTime.Unix()))
|
||||
var resp *http.Response
|
||||
var result api.ItemResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -654,7 +666,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
var resp *http.Response
|
||||
var result api.Error
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
err = result.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -689,12 +701,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id))
|
||||
opts.Parameters.Set("toname", enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("toname", replaceReservedChars(leaf))
|
||||
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
||||
var resp *http.Response
|
||||
var result api.ItemResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -786,12 +798,12 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(srcID))
|
||||
opts.Parameters.Set("toname", enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("toname", replaceReservedChars(leaf))
|
||||
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
||||
var resp *http.Response
|
||||
var result api.ItemResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -818,7 +830,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var resp *http.Response
|
||||
var q api.UserInfo
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &q)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &q)
|
||||
err = q.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -869,7 +881,7 @@ func (o *Object) getHashes(ctx context.Context) (err error) {
|
||||
}
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -972,7 +984,7 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
|
||||
// downloadURL fetches the download link
|
||||
func (o *Object) downloadURL(ctx context.Context) (URL string, err error) {
|
||||
func (o *Object) downloadURL() (URL string, err error) {
|
||||
if o.id == "" {
|
||||
return "", errors.New("can't download - no id")
|
||||
}
|
||||
@@ -988,7 +1000,7 @@ func (o *Object) downloadURL(ctx context.Context) (URL string, err error) {
|
||||
}
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -1004,7 +1016,7 @@ func (o *Object) downloadURL(ctx context.Context) (URL string, err error) {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
url, err := o.downloadURL(ctx)
|
||||
url, err := o.downloadURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1015,7 +1027,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1066,7 +1078,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
}
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
leaf = replaceReservedChars(leaf)
|
||||
opts.Parameters.Set("filename", leaf)
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("nopartial", "1")
|
||||
@@ -1092,7 +1104,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -1122,7 +1134,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
var result api.ItemResult
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err := o.fs.srv.CallJSON(&opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
// object storage system.
|
||||
package premiumizeme
|
||||
|
||||
/*
|
||||
/* FIXME
|
||||
escaping needs fixing
|
||||
|
||||
Run of rclone info
|
||||
stringNeedsEscaping = []rune{
|
||||
0x00, 0x0A, 0x0D, 0x22, 0x2F, 0x5C, 0xBF, 0xFE
|
||||
@@ -34,7 +36,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -46,8 +47,6 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.PremiumizeMe
|
||||
|
||||
const (
|
||||
rcloneClientID = "658922194"
|
||||
rcloneEncryptedClientSecret = "B5YIvQoRIhcpAYs8HYeyjb9gK-ftmZEbqdh_gNfc4RgO9Q"
|
||||
@@ -171,6 +170,24 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// substitute reserved characters
|
||||
func replaceReservedChars(x string) string {
|
||||
// Backslash for FULLWIDTH REVERSE SOLIDUS
|
||||
x = strings.Replace(x, "\\", "\", -1)
|
||||
// Double quote for FULLWIDTH QUOTATION MARK
|
||||
x = strings.Replace(x, `"`, """, -1)
|
||||
return x
|
||||
}
|
||||
|
||||
// restore reserved characters
|
||||
func restoreReservedChars(x string) string {
|
||||
// FULLWIDTH QUOTATION MARK for Double quote
|
||||
x = strings.Replace(x, """, `"`, -1)
|
||||
// FULLWIDTH REVERSE SOLIDUS for Backslash
|
||||
x = strings.Replace(x, "\", "\\", -1)
|
||||
return x
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string, directoriesOnly bool, filesOnly bool) (info *api.Item, err error) {
|
||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||
@@ -183,7 +200,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, directoriesOn
|
||||
}
|
||||
|
||||
lcLeaf := strings.ToLower(leaf)
|
||||
found, err := f.listAll(ctx, directoryID, directoriesOnly, filesOnly, func(item *api.Item) bool {
|
||||
found, err := f.listAll(directoryID, directoriesOnly, filesOnly, func(item *api.Item) bool {
|
||||
if strings.ToLower(item.Name) == lcLeaf {
|
||||
info = item
|
||||
return true
|
||||
@@ -344,7 +361,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
pathIDOut = item.ID
|
||||
return true
|
||||
@@ -364,12 +381,12 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
Path: "/folder/create",
|
||||
Parameters: f.baseParams(),
|
||||
MultipartParams: url.Values{
|
||||
"name": {enc.FromStandardName(leaf)},
|
||||
"name": {replaceReservedChars(leaf)},
|
||||
"parent_id": {pathID},
|
||||
},
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -394,7 +411,7 @@ type listAllFn func(*api.Item) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folder/list",
|
||||
@@ -406,7 +423,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
var result api.FolderListResponse
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -429,7 +446,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
|
||||
continue
|
||||
}
|
||||
item.Name = enc.ToStandardName(item.Name)
|
||||
item.Name = restoreReservedChars(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break
|
||||
@@ -458,7 +475,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return nil, err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
||||
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
|
||||
remote := path.Join(dir, info.Name)
|
||||
if info.Type == api.ItemTypeFolder {
|
||||
// cache the directory ID for later lookups
|
||||
@@ -572,7 +589,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
|
||||
// need to check if empty as it will delete recursively by default
|
||||
if check {
|
||||
found, err := f.listAll(ctx, rootID, false, false, func(item *api.Item) bool {
|
||||
found, err := f.listAll(rootID, false, false, func(item *api.Item) bool {
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
@@ -594,7 +611,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
var resp *http.Response
|
||||
var result api.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -637,8 +654,8 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
// between directories and a separate one to rename them. We try to
|
||||
// call the minimum number of API calls.
|
||||
func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDirectoryID, newDirectoryID string) (err error) {
|
||||
newLeaf = enc.FromStandardName(newLeaf)
|
||||
oldLeaf = enc.FromStandardName(oldLeaf)
|
||||
newLeaf = replaceReservedChars(newLeaf)
|
||||
oldLeaf = replaceReservedChars(oldLeaf)
|
||||
doRenameLeaf := oldLeaf != newLeaf
|
||||
doMove := oldDirectoryID != newDirectoryID
|
||||
|
||||
@@ -669,11 +686,11 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
|
||||
} else {
|
||||
opts.MultipartParams.Set("folders[]", id)
|
||||
}
|
||||
//replacedLeaf := enc.FromStandardName(leaf)
|
||||
//replacedLeaf := replaceReservedChars(leaf)
|
||||
var resp *http.Response
|
||||
var result api.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -843,7 +860,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
Parameters: f.baseParams(),
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -891,7 +908,7 @@ func (o *Object) Remote() string {
|
||||
|
||||
// srvPath returns a path for use in server
|
||||
func (o *Object) srvPath() string {
|
||||
return enc.FromStandardPath(o.fs.rootSlash() + o.remote)
|
||||
return replaceReservedChars(o.fs.rootSlash() + o.remote)
|
||||
}
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
@@ -975,7 +992,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1006,7 +1023,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
leaf = replaceReservedChars(leaf)
|
||||
|
||||
var resp *http.Response
|
||||
var info api.FolderUploadinfoResponse
|
||||
@@ -1019,7 +1036,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
},
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &info)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
@@ -1079,7 +1096,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
var result api.Response
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1121,7 +1138,7 @@ func (f *Fs) renameLeaf(ctx context.Context, isFile bool, id string, newLeaf str
|
||||
var resp *http.Response
|
||||
var result api.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1146,7 +1163,7 @@ func (f *Fs) remove(ctx context.Context, id string) (err error) {
|
||||
var resp *http.Response
|
||||
var result api.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
package putio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/putdotio/go-putio/putio"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
)
|
||||
|
||||
func checkStatusCode(resp *http.Response, expected int) error {
|
||||
if resp.StatusCode != expected {
|
||||
return &statusCodeError{response: resp}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type statusCodeError struct {
|
||||
response *http.Response
|
||||
}
|
||||
|
||||
func (e *statusCodeError) Error() string {
|
||||
return fmt.Sprintf("unexpected status code (%d) response while doing %s to %s", e.response.StatusCode, e.response.Request.Method, e.response.Request.URL.String())
|
||||
}
|
||||
|
||||
func (e *statusCodeError) Temporary() bool {
|
||||
return e.response.StatusCode == 429 || e.response.StatusCode >= 500
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok {
|
||||
err = &statusCodeError{response: perr.Response}
|
||||
}
|
||||
if fserrors.ShouldRetry(err) {
|
||||
return true, err
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/putdotio/go-putio/putio"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
@@ -57,6 +58,23 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
if fserrors.ShouldRetry(err) {
|
||||
return true, err
|
||||
}
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok {
|
||||
if perr.Response.StatusCode == 429 || perr.Response.StatusCode >= 500 {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
||||
// defer log.Trace(name, "root=%v", root)("f=%+v, err=%v", &f, &err)
|
||||
@@ -127,7 +145,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
var entry putio.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "creating folder. part: %s, parentID: %d", leaf, parentID)
|
||||
entry, err = f.client.Files.CreateFolder(ctx, enc.FromStandardName(leaf), parentID)
|
||||
entry, err = f.client.Files.CreateFolder(ctx, leaf, parentID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return itoa(entry.ID), err
|
||||
@@ -154,11 +172,11 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
return
|
||||
}
|
||||
for _, child := range children {
|
||||
if enc.ToStandardName(child.Name) == leaf {
|
||||
if child.Name == leaf {
|
||||
found = true
|
||||
pathIDOut = itoa(child.ID)
|
||||
if !child.IsDir() {
|
||||
err = fs.ErrorIsFile
|
||||
err = fs.ErrorNotAFile
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -196,7 +214,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return
|
||||
}
|
||||
for _, child := range children {
|
||||
remote := path.Join(dir, enc.ToStandardName(child.Name))
|
||||
remote := path.Join(dir, child.Name)
|
||||
// fs.Debugf(f, "child: %s", remote)
|
||||
if child.IsDir() {
|
||||
f.dirCache.Put(remote, itoa(child.ID))
|
||||
@@ -271,10 +289,9 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.Header.Set("tus-resumable", "1.0.0")
|
||||
req.Header.Set("upload-length", strconv.FormatInt(size, 10))
|
||||
b64name := base64.StdEncoding.EncodeToString([]byte(enc.FromStandardName(name)))
|
||||
b64name := base64.StdEncoding.EncodeToString([]byte(name))
|
||||
b64true := base64.StdEncoding.EncodeToString([]byte("true"))
|
||||
b64parentID := base64.StdEncoding.EncodeToString([]byte(parentID))
|
||||
b64modifiedAt := base64.StdEncoding.EncodeToString([]byte(modTime.Format(time.RFC3339)))
|
||||
@@ -300,133 +317,73 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
|
||||
}
|
||||
|
||||
func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.Reader) (fileID int64, err error) {
|
||||
// defer log.Trace(f, "location=%v, size=%v", location, size)("fileID=%v, err=%v", &fileID, &err)
|
||||
// defer log.Trace(f, "location=%v, size=%v", location, size)("fileID=%v, err=%v", fileID, &err)
|
||||
if size == 0 {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(f, "Sending zero length chunk")
|
||||
_, fileID, err = f.transferChunk(ctx, location, 0, bytes.NewReader([]byte{}), 0)
|
||||
fileID, err = f.transferChunk(ctx, location, 0, bytes.NewReader([]byte{}), 0)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return
|
||||
}
|
||||
var clientOffset int64
|
||||
var offsetMismatch bool
|
||||
var start int64
|
||||
buf := make([]byte, defaultChunkSize)
|
||||
for clientOffset < size {
|
||||
chunkSize := size - clientOffset
|
||||
if chunkSize >= int64(defaultChunkSize) {
|
||||
chunkSize = int64(defaultChunkSize)
|
||||
for start < size {
|
||||
reqSize := size - start
|
||||
if reqSize >= int64(defaultChunkSize) {
|
||||
reqSize = int64(defaultChunkSize)
|
||||
}
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
chunkStart := clientOffset
|
||||
reqSize := chunkSize
|
||||
transferOffset := clientOffset
|
||||
fs.Debugf(f, "chunkStart: %d, reqSize: %d", chunkStart, reqSize)
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, reqSize)
|
||||
|
||||
// Transfer the chunk
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
if offsetMismatch {
|
||||
// Get file offset and seek to the position
|
||||
offset, err := f.getServerOffset(ctx, location)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
}
|
||||
sentBytes := offset - chunkStart
|
||||
fs.Debugf(f, "sentBytes: %d", sentBytes)
|
||||
_, err = chunk.Seek(sentBytes, io.SeekStart)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
}
|
||||
transferOffset = offset
|
||||
reqSize = chunkSize - sentBytes
|
||||
offsetMismatch = false
|
||||
}
|
||||
fs.Debugf(f, "Sending chunk. transferOffset: %d length: %d", transferOffset, reqSize)
|
||||
var serverOffset int64
|
||||
serverOffset, fileID, err = f.transferChunk(ctx, location, transferOffset, chunk, reqSize)
|
||||
if cerr, ok := err.(*statusCodeError); ok && cerr.response.StatusCode == 409 {
|
||||
offsetMismatch = true
|
||||
return true, err
|
||||
}
|
||||
if serverOffset != (transferOffset + reqSize) {
|
||||
offsetMismatch = true
|
||||
return true, errors.New("connection broken")
|
||||
}
|
||||
fs.Debugf(f, "Sending chunk. start: %d length: %d", start, reqSize)
|
||||
// TODO get file offset and seek to the position
|
||||
fileID, err = f.transferChunk(ctx, location, start, chunk, reqSize)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
clientOffset += chunkSize
|
||||
start += reqSize
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) getServerOffset(ctx context.Context, location string) (offset int64, err error) {
|
||||
// defer log.Trace(f, "location=%v", location)("offset=%v, err=%v", &offset, &err)
|
||||
req, err := f.makeUploadHeadRequest(ctx, location)
|
||||
func (f *Fs) transferChunk(ctx context.Context, location string, start int64, chunk io.ReadSeeker, chunkSize int64) (fileID int64, err error) {
|
||||
// defer log.Trace(f, "location=%v, start=%v, chunkSize=%v", location, start, chunkSize)("fileID=%v, err=%v", fileID, &err)
|
||||
_, _ = chunk.Seek(0, io.SeekStart)
|
||||
req, err := f.makeUploadPatchRequest(location, chunk, start, chunkSize)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
resp, err := f.oAuthClient.Do(req)
|
||||
req = req.WithContext(ctx)
|
||||
res, err := f.oAuthClient.Do(req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = checkStatusCode(resp, 200)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseInt(resp.Header.Get("upload-offset"), 10, 64)
|
||||
}
|
||||
|
||||
func (f *Fs) transferChunk(ctx context.Context, location string, start int64, chunk io.ReadSeeker, chunkSize int64) (serverOffset, fileID int64, err error) {
|
||||
// defer log.Trace(f, "location=%v, start=%v, chunkSize=%v", location, start, chunkSize)("fileID=%v, err=%v", &fileID, &err)
|
||||
req, err := f.makeUploadPatchRequest(ctx, location, chunk, start, chunkSize)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
resp, err := f.oAuthClient.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
_ = res.Body.Close()
|
||||
}()
|
||||
err = checkStatusCode(resp, 204)
|
||||
if err != nil {
|
||||
return
|
||||
if res.StatusCode != 204 {
|
||||
return 0, fmt.Errorf("unexpected status code while transferring chunk: %d", res.StatusCode)
|
||||
}
|
||||
serverOffset, err = strconv.ParseInt(resp.Header.Get("upload-offset"), 10, 64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
sfid := resp.Header.Get("putio-file-id")
|
||||
sfid := res.Header.Get("putio-file-id")
|
||||
if sfid != "" {
|
||||
fileID, err = strconv.ParseInt(sfid, 10, 64)
|
||||
if err != nil {
|
||||
return
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return
|
||||
return fileID, nil
|
||||
}
|
||||
|
||||
func (f *Fs) makeUploadHeadRequest(ctx context.Context, location string) (*http.Request, error) {
|
||||
req, err := http.NewRequest("HEAD", location, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.Header.Set("tus-resumable", "1.0.0")
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (f *Fs) makeUploadPatchRequest(ctx context.Context, location string, in io.Reader, offset, length int64) (*http.Request, error) {
|
||||
func (f *Fs) makeUploadPatchRequest(location string, in io.Reader, offset, length int64) (*http.Request, error) {
|
||||
req, err := http.NewRequest("PATCH", location, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.Header.Set("tus-resumable", "1.0.0")
|
||||
req.Header.Set("upload-offset", strconv.FormatInt(offset, 10))
|
||||
req.Header.Set("content-length", strconv.FormatInt(length, 10))
|
||||
@@ -546,7 +503,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
|
||||
params.Set("parent_id", directoryID)
|
||||
params.Set("name", enc.FromStandardName(leaf))
|
||||
params.Set("name", leaf)
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/copy", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -585,7 +542,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
|
||||
params.Set("parent_id", directoryID)
|
||||
params.Set("name", enc.FromStandardName(leaf))
|
||||
params.Set("name", leaf)
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/move", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -674,7 +631,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
params := url.Values{}
|
||||
params.Set("file_id", srcID)
|
||||
params.Set("parent_id", dstDirectoryID)
|
||||
params.Set("name", enc.FromStandardName(leaf))
|
||||
params.Set("name", leaf)
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/move", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
||||
@@ -137,7 +137,7 @@ func (o *Object) readEntry(ctx context.Context) (f *putio.File, err error) {
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(o, "requesting child. directoryID: %s, name: %s", directoryID, leaf)
|
||||
req, err := o.fs.client.NewRequest(ctx, "GET", "/v2/files/"+directoryID+"/child?name="+url.QueryEscape(enc.FromStandardName(leaf)), nil)
|
||||
req, err := o.fs.client.NewRequest(ctx, "GET", "/v2/files/"+directoryID+"/child?name="+url.PathEscape(leaf), nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -147,12 +147,6 @@ func (o *Object) readEntry(ctx context.Context) (f *putio.File, err error) {
|
||||
}
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.File.IsDir() {
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
return &resp.File, err
|
||||
}
|
||||
|
||||
@@ -229,11 +223,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
var resp *http.Response
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequest(http.MethodGet, storageURL, nil)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req, _ := http.NewRequest(http.MethodGet, storageURL, nil)
|
||||
req.Header.Set("User-Agent", o.fs.client.UserAgent)
|
||||
|
||||
// merge headers with extra headers
|
||||
|
||||
@@ -8,25 +8,11 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
/*
|
||||
// TestPutio
|
||||
stringNeedsEscaping = []rune{
|
||||
'/', '\x00'
|
||||
}
|
||||
maxFileLength = 255
|
||||
canWriteUnnormalized = true
|
||||
canReadUnnormalized = true
|
||||
canReadRenormalized = true
|
||||
canStream = false
|
||||
*/
|
||||
const enc = encodings.Putio
|
||||
|
||||
// Constants
|
||||
const (
|
||||
rcloneClientID = "4131"
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
@@ -30,8 +29,6 @@ import (
|
||||
qs "github.com/yunify/qingstor-sdk-go/v3/service"
|
||||
)
|
||||
|
||||
const enc = encodings.QingStor
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -187,8 +184,7 @@ func parsePath(path string) (root string) {
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return enc.FromStandardName(bucketName), enc.FromStandardPath(bucketPath)
|
||||
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the object
|
||||
@@ -269,7 +265,7 @@ func qsServiceConnection(opt *Options) (*qs.Service, error) {
|
||||
cf.Protocol = protocol
|
||||
cf.Host = host
|
||||
cf.Port = port
|
||||
// unsupported in v3.1: cf.ConnectionRetries = opt.ConnectionRetries
|
||||
cf.ConnectionRetries = opt.ConnectionRetries
|
||||
cf.Connection = fshttp.NewClient(fs.Config)
|
||||
|
||||
return qs.Init(cf)
|
||||
@@ -357,8 +353,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
|
||||
_, err = bucketInit.HeadObject(encodedDirectory, &qs.HeadObjectInput{})
|
||||
_, err = bucketInit.HeadObject(f.rootDirectory, &qs.HeadObjectInput{})
|
||||
if err == nil {
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
@@ -555,7 +550,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
continue
|
||||
}
|
||||
remote := *commonPrefix
|
||||
remote = enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
@@ -575,13 +569,12 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
|
||||
for _, object := range resp.Keys {
|
||||
remote := qs.StringValue(object.Key)
|
||||
remote = enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
key := qs.StringValue(object.Key)
|
||||
if !strings.HasPrefix(key, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", key)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
remote := key[len(prefix):]
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
@@ -653,7 +646,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
}
|
||||
|
||||
for _, bucket := range resp.Buckets {
|
||||
d := fs.NewDir(enc.ToStandardName(qs.StringValue(bucket.Name)), qs.TimeValue(bucket.Created))
|
||||
d := fs.NewDir(qs.StringValue(bucket.Name), qs.TimeValue(bucket.Created))
|
||||
entries = append(entries, d)
|
||||
}
|
||||
return entries, nil
|
||||
|
||||
261
backend/s3/s3.go
261
backend/s3/s3.go
@@ -17,16 +17,12 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
@@ -45,7 +41,6 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -55,8 +50,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const enc = encodings.S3
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -694,37 +687,16 @@ The minimum is 0 and the maximum is 5GB.`,
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff or files with unknown
|
||||
size (eg from "rclone rcat" or uploaded with "rclone mount" or google
|
||||
photos or google docs) they will be uploaded as multipart uploads
|
||||
using this chunk size.
|
||||
When uploading files larger than upload_cutoff they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
|
||||
Note that "--s3-upload-concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
|
||||
If you are transferring large files over high speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.
|
||||
|
||||
Rclone will automatically increase the chunk size when uploading a
|
||||
large file of known size to stay below the 10,000 chunks limit.
|
||||
|
||||
Files of unknown size are uploaded with the configured
|
||||
chunk_size. Since the default chunk size is 5MB and there can be at
|
||||
most 10,000 chunks, this means that by default the maximum size of
|
||||
file you can stream upload is 48GB. If you wish to stream upload
|
||||
larger files then you will need to increase chunk_size.`,
|
||||
enough memory, then increasing this will speed up the transfers.`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_cutoff",
|
||||
Help: `Cutoff for switching to multipart copy
|
||||
|
||||
Any files larger than this that need to be server side copied will be
|
||||
copied in chunks of this size.
|
||||
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
Default: fs.SizeSuffix(maxSizeForCopy),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: "Don't store MD5 checksum with object metadata",
|
||||
@@ -776,28 +748,18 @@ Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
|
||||
See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html)`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "leave_parts_on_error",
|
||||
Provider: "AWS",
|
||||
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
|
||||
|
||||
It should be set to true for resuming uploads across different sessions.
|
||||
|
||||
WARNING: Storing parts of an incomplete multipart upload counts towards space usage on S3 and will add additional costs if not cleaned up.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Constants
|
||||
const (
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
listChunkSize = 1000 // number of items to read at once
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
listChunkSize = 1000 // number of items to read at once
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
|
||||
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
|
||||
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
@@ -819,7 +781,6 @@ type Options struct {
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SessionToken string `config:"session_token"`
|
||||
@@ -827,7 +788,6 @@ type Options struct {
|
||||
ForcePathStyle bool `config:"force_path_style"`
|
||||
V2Auth bool `config:"v2_auth"`
|
||||
UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@@ -939,8 +899,7 @@ func parsePath(path string) (root string) {
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return enc.FromStandardName(bucketName), enc.FromStandardPath(bucketPath)
|
||||
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the object
|
||||
@@ -983,7 +942,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
Client: ec2metadata.New(session.New(), &aws.Config{
|
||||
HTTPClient: lowTimeoutClient,
|
||||
}),
|
||||
ExpiryWindow: 3 * time.Minute,
|
||||
ExpiryWindow: 3,
|
||||
},
|
||||
}
|
||||
cred := credentials.NewChainCredentials(providers)
|
||||
@@ -1136,10 +1095,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}).Fill(f)
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
// Check to see if the object exists
|
||||
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
|
||||
req := s3.HeadObjectInput{
|
||||
Bucket: &f.rootBucket,
|
||||
Key: &encodedDirectory,
|
||||
Key: &f.rootDirectory,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.c.HeadObject(&req)
|
||||
@@ -1260,22 +1218,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
delimiter = "/"
|
||||
}
|
||||
var marker *string
|
||||
// URL encode the listings so we can use control characters in object names
|
||||
// See: https://github.com/aws/aws-sdk-go/issues/1914
|
||||
//
|
||||
// However this doesn't work perfectly under Ceph (and hence DigitalOcean/Dreamhost) because
|
||||
// it doesn't encode CommonPrefixes.
|
||||
// See: https://tracker.ceph.com/issues/41870
|
||||
//
|
||||
// This does not work under IBM COS also: See https://github.com/rclone/rclone/issues/3345
|
||||
// though maybe it does on some versions.
|
||||
//
|
||||
// This does work with minio but was only added relatively recently
|
||||
// https://github.com/minio/minio/pull/7265
|
||||
//
|
||||
// So we enable only on providers we know supports it properly, all others can retry when a
|
||||
// XML Syntax error is detected.
|
||||
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba")
|
||||
for {
|
||||
// FIXME need to implement ALL loop
|
||||
req := s3.ListObjectsInput{
|
||||
@@ -1285,26 +1227,10 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
MaxKeys: &maxKeys,
|
||||
Marker: marker,
|
||||
}
|
||||
if urlEncodeListings {
|
||||
req.EncodingType = aws.String(s3.EncodingTypeUrl)
|
||||
}
|
||||
var resp *s3.ListObjectsOutput
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.c.ListObjectsWithContext(ctx, &req)
|
||||
if err != nil && !urlEncodeListings {
|
||||
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
||||
if origErr := awsErr.OrigErr(); origErr != nil {
|
||||
if _, ok := origErr.(*xml.SyntaxError); ok {
|
||||
// Retry the listing with URL encoding as there were characters that XML can't encode
|
||||
urlEncodeListings = true
|
||||
req.EncodingType = aws.String(s3.EncodingTypeUrl)
|
||||
fs.Debugf(f, "Retrying listing because of characters which can't be XML encoded")
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1333,14 +1259,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
continue
|
||||
}
|
||||
remote := *commonPrefix.Prefix
|
||||
if urlEncodeListings {
|
||||
remote, err = url.QueryUnescape(remote)
|
||||
if err != nil {
|
||||
fs.Logf(f, "failed to URL decode %q in listing common prefix: %v", *commonPrefix.Prefix, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
remote = enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
@@ -1360,14 +1278,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
for _, object := range resp.Contents {
|
||||
remote := aws.StringValue(object.Key)
|
||||
if urlEncodeListings {
|
||||
remote, err = url.QueryUnescape(remote)
|
||||
if err != nil {
|
||||
fs.Logf(f, "failed to URL decode %q in listing: %v", aws.StringValue(object.Key), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
remote = enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
@@ -1398,12 +1308,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
} else {
|
||||
marker = resp.NextMarker
|
||||
}
|
||||
if urlEncodeListings {
|
||||
*marker, err = url.QueryUnescape(*marker)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to URL decode NextMarker %q", *marker)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1458,7 +1362,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
return nil, err
|
||||
}
|
||||
for _, bucket := range resp.Buckets {
|
||||
bucketName := enc.ToStandardName(aws.StringValue(bucket.Name))
|
||||
bucketName := aws.StringValue(bucket.Name)
|
||||
f.cache.MarkOK(bucketName)
|
||||
d := fs.NewDir(bucketName, aws.TimeValue(bucket.CreationDate))
|
||||
entries = append(entries, d)
|
||||
@@ -1654,7 +1558,7 @@ func pathEscape(s string) string {
|
||||
//
|
||||
// It adds the boiler plate to the req passed in and calls the s3
|
||||
// method
|
||||
func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, srcSize int64) error {
|
||||
func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string) error {
|
||||
req.Bucket = &dstBucket
|
||||
req.ACL = &f.opt.ACL
|
||||
req.Key = &dstPath
|
||||
@@ -1669,113 +1573,12 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
|
||||
if req.StorageClass == nil && f.opt.StorageClass != "" {
|
||||
req.StorageClass = &f.opt.StorageClass
|
||||
}
|
||||
|
||||
if srcSize >= int64(f.opt.CopyCutoff) {
|
||||
return f.copyMultipart(ctx, req, dstBucket, dstPath, srcBucket, srcPath, srcSize)
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.CopyObjectWithContext(ctx, req)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
}
|
||||
|
||||
func calculateRange(partSize, partIndex, numParts, totalSize int64) string {
|
||||
start := partIndex * partSize
|
||||
var ends string
|
||||
if partIndex == numParts-1 {
|
||||
if totalSize >= 1 {
|
||||
ends = strconv.FormatInt(totalSize-1, 10)
|
||||
}
|
||||
} else {
|
||||
ends = strconv.FormatInt(start+partSize-1, 10)
|
||||
}
|
||||
return fmt.Sprintf("bytes=%v-%v", start, ends)
|
||||
}
|
||||
|
||||
func (f *Fs) copyMultipart(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, srcSize int64) (err error) {
|
||||
var cout *s3.CreateMultipartUploadOutput
|
||||
if err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
cout, err = f.c.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
|
||||
Bucket: &dstBucket,
|
||||
Key: &dstPath,
|
||||
})
|
||||
return f.shouldRetry(err)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
uid := cout.UploadId
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// We can try to abort the upload, but ignore the error.
|
||||
_ = f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.AbortMultipartUploadWithContext(ctx, &s3.AbortMultipartUploadInput{
|
||||
Bucket: &dstBucket,
|
||||
Key: &dstPath,
|
||||
UploadId: uid,
|
||||
RequestPayer: req.RequestPayer,
|
||||
})
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
}
|
||||
}()
|
||||
|
||||
partSize := int64(f.opt.CopyCutoff)
|
||||
numParts := (srcSize-1)/partSize + 1
|
||||
|
||||
var parts []*s3.CompletedPart
|
||||
for partNum := int64(1); partNum <= numParts; partNum++ {
|
||||
if err := f.pacer.Call(func() (bool, error) {
|
||||
partNum := partNum
|
||||
uploadPartReq := &s3.UploadPartCopyInput{
|
||||
Bucket: &dstBucket,
|
||||
Key: &dstPath,
|
||||
PartNumber: &partNum,
|
||||
UploadId: uid,
|
||||
CopySourceRange: aws.String(calculateRange(partSize, partNum-1, numParts, srcSize)),
|
||||
// Args copy from req
|
||||
CopySource: req.CopySource,
|
||||
CopySourceIfMatch: req.CopySourceIfMatch,
|
||||
CopySourceIfModifiedSince: req.CopySourceIfModifiedSince,
|
||||
CopySourceIfNoneMatch: req.CopySourceIfNoneMatch,
|
||||
CopySourceIfUnmodifiedSince: req.CopySourceIfUnmodifiedSince,
|
||||
CopySourceSSECustomerAlgorithm: req.CopySourceSSECustomerAlgorithm,
|
||||
CopySourceSSECustomerKey: req.CopySourceSSECustomerKey,
|
||||
CopySourceSSECustomerKeyMD5: req.CopySourceSSECustomerKeyMD5,
|
||||
RequestPayer: req.RequestPayer,
|
||||
SSECustomerAlgorithm: req.SSECustomerAlgorithm,
|
||||
SSECustomerKey: req.SSECustomerKey,
|
||||
SSECustomerKeyMD5: req.SSECustomerKeyMD5,
|
||||
}
|
||||
uout, err := f.c.UploadPartCopyWithContext(ctx, uploadPartReq)
|
||||
if err != nil {
|
||||
return f.shouldRetry(err)
|
||||
}
|
||||
parts = append(parts, &s3.CompletedPart{
|
||||
PartNumber: &partNum,
|
||||
ETag: uout.CopyPartResult.ETag,
|
||||
})
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
|
||||
Bucket: &dstBucket,
|
||||
Key: &dstPath,
|
||||
MultipartUpload: &s3.CompletedMultipartUpload{
|
||||
Parts: parts,
|
||||
},
|
||||
RequestPayer: req.RequestPayer,
|
||||
UploadId: uid,
|
||||
})
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
@@ -1800,7 +1603,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
req := s3.CopyObjectInput{
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
|
||||
}
|
||||
err = f.copy(ctx, &req, dstBucket, dstPath, srcBucket, srcPath, srcObj.Size())
|
||||
err = f.copy(ctx, &req, dstBucket, dstPath, srcBucket, srcPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1900,9 +1703,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
o.etag = aws.StringValue(resp.ETag)
|
||||
o.bytes = size
|
||||
o.meta = resp.Metadata
|
||||
if o.meta == nil {
|
||||
o.meta = map[string]*string{}
|
||||
}
|
||||
o.storageClass = aws.StringValue(resp.StorageClass)
|
||||
if resp.LastModified == nil {
|
||||
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
|
||||
@@ -1949,6 +1749,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
}
|
||||
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
|
||||
|
||||
if o.bytes >= maxSizeForCopy {
|
||||
fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Can't update metadata here, so return this error to force a recopy
|
||||
if o.storageClass == "GLACIER" || o.storageClass == "DEEP_ARCHIVE" {
|
||||
return fs.ErrorCantSetModTime
|
||||
@@ -1961,7 +1766,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
Metadata: o.meta,
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveReplace), // replace metadata with that passed in
|
||||
}
|
||||
return o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o.bytes)
|
||||
return o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath)
|
||||
}
|
||||
|
||||
// Storable raturns a boolean indicating if this object is storable
|
||||
@@ -2005,8 +1810,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
// Update the Object from in with modTime and size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
bucket, bucketPath := o.split()
|
||||
@@ -2022,18 +1825,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if multipart {
|
||||
uploader = s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = o.fs.opt.UploadConcurrency
|
||||
u.LeavePartsOnError = o.fs.opt.LeavePartsOnError
|
||||
u.LeavePartsOnError = false
|
||||
u.S3 = o.fs.c
|
||||
u.PartSize = int64(o.fs.opt.ChunkSize)
|
||||
|
||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||
// buffers here (default 5MB). With a maximum number of parts (10,000) this will be a file of
|
||||
// 48GB which seems like a not too unreasonable limit.
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(o.fs, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
o.fs.opt.ChunkSize, fs.SizeSuffix(u.PartSize*s3manager.MaxUploadParts))
|
||||
})
|
||||
// Make parts as small as possible while still being able to upload to the
|
||||
// S3 file size limit. Rounded up to nearest MB.
|
||||
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
return
|
||||
}
|
||||
// Adjust PartSize until the number of parts is small enough.
|
||||
@@ -2052,7 +1851,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// read the md5sum if available for non multpart and if
|
||||
// disable checksum isn't present.
|
||||
var md5sum string
|
||||
if !multipart && !o.fs.opt.DisableChecksum {
|
||||
if !multipart || !o.fs.opt.DisableChecksum {
|
||||
hash, err := src.Hash(ctx, hash.MD5)
|
||||
if err == nil && matchMd5.MatchString(hash) {
|
||||
hashBytes, err := hex.DecodeString(hash)
|
||||
@@ -2126,10 +1925,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return errors.Wrap(err, "s3 upload: sign request")
|
||||
}
|
||||
|
||||
if o.fs.opt.V2Auth && headers == nil {
|
||||
headers = putObj.HTTPRequest.Header
|
||||
}
|
||||
|
||||
// Set request to nil if empty so as not to make chunked encoding
|
||||
if size == 0 {
|
||||
in = nil
|
||||
@@ -2140,7 +1935,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "s3 upload: new request")
|
||||
}
|
||||
httpReq = httpReq.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
httpReq = httpReq.WithContext(ctx)
|
||||
|
||||
// set the headers we signed and the length
|
||||
httpReq.Header = headers
|
||||
@@ -2205,7 +2000,7 @@ func (o *Object) SetTier(tier string) (err error) {
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
|
||||
StorageClass: aws.String(tier),
|
||||
}
|
||||
err = o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o.bytes)
|
||||
err = o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -29,17 +29,15 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
sshagent "github.com/xanzy/ssh-agent"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
const (
|
||||
connectionsPerSecond = 10 // don't make more than this many ssh connections/s
|
||||
hashCommandNotSupported = "none"
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -88,19 +86,8 @@ requested from the ssh-agent. This allows to avoid ` + "`Too many authentication
|
||||
when the ssh-agent contains many keys.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "use_insecure_cipher",
|
||||
Help: `Enable the use of insecure ciphers and key exchange methods.
|
||||
|
||||
This enables the use of the the following insecure ciphers and key exchange methods:
|
||||
|
||||
- aes128-cbc
|
||||
- aes192-cbc
|
||||
- aes256-cbc
|
||||
- 3des-cbc
|
||||
- diffie-hellman-group-exchange-sha256
|
||||
- diffie-hellman-group-exchange-sha1
|
||||
|
||||
Those algorithms are insecure and may allow plaintext data to be recovered by an attacker.`,
|
||||
Name: "use_insecure_cipher",
|
||||
Help: "Enable the use of the aes128-cbc cipher and diffie-hellman-group-exchange-sha256, diffie-hellman-group-exchange-sha1 key exchange. Those algorithms are insecure and may allow plaintext data to be recovered by an attacker.",
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
@@ -116,14 +103,9 @@ Those algorithms are insecure and may allow plaintext data to be recovered by an
|
||||
Default: false,
|
||||
Help: "Disable the execution of SSH commands to determine if remote file hashing is available.\nLeave blank or set to false to enable hashing (recommended), set to true to disable hashing.",
|
||||
}, {
|
||||
Name: "ask_password",
|
||||
Default: false,
|
||||
Help: `Allow asking for SFTP password when needed.
|
||||
|
||||
If this is set and no password is supplied then rclone will:
|
||||
- ask for a password
|
||||
- not contact the ssh agent
|
||||
`,
|
||||
Name: "ask_password",
|
||||
Default: false,
|
||||
Help: "Allow asking for SFTP password when needed.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "path_override",
|
||||
@@ -156,11 +138,6 @@ Home directory can be found in a shared folder called "home"
|
||||
Default: "",
|
||||
Help: "The command used to read sha1 hashes. Leave blank for autodetect.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Default: false,
|
||||
Help: "Set to skip any symlinks and any other non regular files.",
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -182,7 +159,6 @@ type Options struct {
|
||||
SetModTime bool `config:"set_modtime"`
|
||||
Md5sumCommand string `config:"md5sum_command"`
|
||||
Sha1sumCommand string `config:"sha1sum_command"`
|
||||
SkipLinks bool `config:"skip_links"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -198,7 +174,7 @@ type Fs struct {
|
||||
cachedHashes *hash.Set
|
||||
poolMu sync.Mutex
|
||||
pool []*conn
|
||||
pacer *fs.Pacer // pacer for operations
|
||||
connLimit *rate.Limiter // for limiting number of connections per second
|
||||
}
|
||||
|
||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
@@ -278,6 +254,10 @@ func (c *conn) closed() error {
|
||||
// Open a new connection to the SFTP server.
|
||||
func (f *Fs) sftpConnection() (c *conn, err error) {
|
||||
// Rate limit rate of new connections
|
||||
err = f.connLimit.Wait(context.Background())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "limiter failed in connect")
|
||||
}
|
||||
c = &conn{
|
||||
err: make(chan error, 1),
|
||||
}
|
||||
@@ -311,14 +291,7 @@ func (f *Fs) getSftpConnection() (c *conn, err error) {
|
||||
if c != nil {
|
||||
return c, nil
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
c, err = f.sftpConnection()
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return c, err
|
||||
return f.sftpConnection()
|
||||
}
|
||||
|
||||
// Return an SFTP connection to the pool
|
||||
@@ -385,13 +358,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
if opt.UseInsecureCipher {
|
||||
sshConfig.Config.SetDefaults()
|
||||
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc", "aes192-cbc", "aes256-cbc", "3des-cbc")
|
||||
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc")
|
||||
sshConfig.Config.KeyExchanges = append(sshConfig.Config.KeyExchanges, "diffie-hellman-group-exchange-sha1", "diffie-hellman-group-exchange-sha256")
|
||||
}
|
||||
|
||||
keyFile := env.ShellExpand(opt.KeyFile)
|
||||
// Add ssh agent-auth if no password or file specified
|
||||
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword) || opt.KeyUseAgent {
|
||||
if (opt.Pass == "" && keyFile == "") || opt.KeyUseAgent {
|
||||
sshAgentClient, _, err := sshagent.New()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't connect to ssh-agent")
|
||||
@@ -476,7 +449,7 @@ func NewFsWithConnection(ctx context.Context, name string, root string, m config
|
||||
config: sshConfig,
|
||||
url: "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root,
|
||||
mkdirLock: newStringLock(),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
connLimit: rate.NewLimiter(rate.Limit(connectionsPerSecond), 1),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
@@ -606,16 +579,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
remote := path.Join(dir, info.Name())
|
||||
// If file is a symlink (not a regular file is the best cross platform test we can do), do a stat to
|
||||
// pick up the size and type of the destination, instead of the size and type of the symlink.
|
||||
if !info.Mode().IsRegular() && !info.IsDir() {
|
||||
if f.opt.SkipLinks {
|
||||
// skip non regular file if SkipLinks is set
|
||||
continue
|
||||
}
|
||||
if !info.Mode().IsRegular() {
|
||||
oldInfo := info
|
||||
info, err = f.stat(remote)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
fs.Errorf(remote, "stat of non-regular file failed: %v", err)
|
||||
fs.Errorf(remote, "stat of non-regular file/dir failed: %v", err)
|
||||
}
|
||||
info = oldInfo
|
||||
}
|
||||
@@ -976,7 +945,6 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
escapedPath = shellEscape(path.Join(o.fs.opt.PathOverride, o.remote))
|
||||
}
|
||||
err = session.Run(hashCmd + " " + escapedPath)
|
||||
fs.Debugf(nil, "sftp cmd = %s", escapedPath)
|
||||
if err != nil {
|
||||
_ = session.Close()
|
||||
fs.Debugf(o, "Failed to calculate %v hash: %v (%s)", r, err, bytes.TrimSpace(stderr.Bytes()))
|
||||
@@ -984,10 +952,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
}
|
||||
|
||||
_ = session.Close()
|
||||
b := stdout.Bytes()
|
||||
fs.Debugf(nil, "sftp output = %q", b)
|
||||
str := parseHash(b)
|
||||
fs.Debugf(nil, "sftp hash = %q", str)
|
||||
str := parseHash(stdout.Bytes())
|
||||
if r == hash.MD5 {
|
||||
o.md5sum = &str
|
||||
} else if r == hash.SHA1 {
|
||||
@@ -996,7 +961,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
return str, nil
|
||||
}
|
||||
|
||||
var shellEscapeRegex = regexp.MustCompile("[^A-Za-z0-9_.,:/\\@\u0080-\uFFFFFFFF\n-]")
|
||||
var shellEscapeRegex = regexp.MustCompile(`[^A-Za-z0-9_.,:/@\n-]`)
|
||||
|
||||
// Escape a string s.t. it cannot cause unintended behavior
|
||||
// when sending it to a shell.
|
||||
@@ -1009,9 +974,7 @@ func shellEscape(str string) string {
|
||||
// an invocation of md5sum/sha1sum to a hash string
|
||||
// as expected by the rest of this application
|
||||
func parseHash(bytes []byte) string {
|
||||
// For strings with backslash *sum writes a leading \
|
||||
// https://unix.stackexchange.com/q/313733/94054
|
||||
return strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0] // Split at hash / filename separator
|
||||
return strings.Split(string(bytes), " ")[0] // Split at hash / filename separator
|
||||
}
|
||||
|
||||
// Parses the byte array output from the SSH session
|
||||
|
||||
@@ -1,152 +0,0 @@
|
||||
// Package api contains definitions for using the premiumize.me API
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ListRequestSelect should be used in $select for Items/Children
|
||||
const ListRequestSelect = "odata.count,FileCount,Name,FileName,CreationDate,IsHidden,FileSizeBytes,odata.type,Id,Hash,ClientModifiedDate"
|
||||
|
||||
// ListResponse is returned from the Items/Children call
|
||||
type ListResponse struct {
|
||||
OdataCount int `json:"odata.count"`
|
||||
Value []Item `json:"value"`
|
||||
}
|
||||
|
||||
// Item Types
|
||||
const (
|
||||
ItemTypeFolder = "ShareFile.Api.Models.Folder"
|
||||
ItemTypeFile = "ShareFile.Api.Models.File"
|
||||
)
|
||||
|
||||
// Item refers to a file or folder
|
||||
type Item struct {
|
||||
FileCount int32 `json:"FileCount,omitempty"`
|
||||
Name string `json:"Name,omitempty"`
|
||||
FileName string `json:"FileName,omitempty"`
|
||||
CreatedAt time.Time `json:"CreationDate,omitempty"`
|
||||
ModifiedAt time.Time `json:"ClientModifiedDate,omitempty"`
|
||||
IsHidden bool `json:"IsHidden,omitempty"`
|
||||
Size int64 `json:"FileSizeBytes,omitempty"`
|
||||
Type string `json:"odata.type,omitempty"`
|
||||
ID string `json:"Id,omitempty"`
|
||||
Hash string `json:"Hash,omitempty"`
|
||||
}
|
||||
|
||||
// Error is an odata error return
|
||||
type Error struct {
|
||||
Code string `json:"code"`
|
||||
Message struct {
|
||||
Lang string `json:"lang"`
|
||||
Value string `json:"value"`
|
||||
} `json:"message"`
|
||||
Reason string `json:"reason"`
|
||||
}
|
||||
|
||||
// Satisfy error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s: %s: %s", e.Message.Value, e.Code, e.Reason)
|
||||
}
|
||||
|
||||
// Check Error satisfies error interface
|
||||
var _ error = &Error{}
|
||||
|
||||
// DownloadSpecification is the response to /Items/Download
|
||||
type DownloadSpecification struct {
|
||||
Token string `json:"DownloadToken"`
|
||||
URL string `json:"DownloadUrl"`
|
||||
Metadata string `json:"odata.metadata"`
|
||||
Type string `json:"odata.type"`
|
||||
}
|
||||
|
||||
// UploadRequest is set to /Items/Upload2 to receive an UploadSpecification
|
||||
type UploadRequest struct {
|
||||
Method string `json:"method"` // Upload method: one of: standard, streamed or threaded
|
||||
Raw bool `json:"raw"` // Raw post if true or MIME upload if false
|
||||
Filename string `json:"fileName"` // Uploaded item file name.
|
||||
Filesize *int64 `json:"fileSize,omitempty"` // Uploaded item file size.
|
||||
Overwrite bool `json:"overwrite"` // Indicates whether items with the same name will be overwritten or not.
|
||||
CreatedDate time.Time `json:"ClientCreatedDate"` // Created Date of this Item.
|
||||
ModifiedDate time.Time `json:"ClientModifiedDate"` // Modified Date of this Item.
|
||||
BatchID string `json:"batchId,omitempty"` // Indicates part of a batch. Batched uploads do not send notification until the whole batch is completed.
|
||||
BatchLast *bool `json:"batchLast,omitempty"` // Indicates is the last in a batch. Upload notifications for the whole batch are sent after this upload.
|
||||
CanResume *bool `json:"canResume,omitempty"` // Indicates uploader supports resume.
|
||||
StartOver *bool `json:"startOver,omitempty"` // Indicates uploader wants to restart the file - i.e., ignore previous failed upload attempts.
|
||||
Tool string `json:"tool,omitempty"` // Identifies the uploader tool.
|
||||
Title string `json:"title,omitempty"` // Item Title
|
||||
Details string `json:"details,omitempty"` // Item description
|
||||
IsSend *bool `json:"isSend,omitempty"` // Indicates that this upload is part of a Send operation
|
||||
SendGUID string `json:"sendGuid,omitempty"` // Used if IsSend is true. Specifies which Send operation this upload is part of.
|
||||
OpID string `json:"opid,omitempty"` // Used for Asynchronous copy/move operations - called by Zones to push files to other Zones
|
||||
ThreadCount *int `json:"threadCount,omitempty"` // Specifies the number of threads the threaded uploader will use. Only used is method is threaded, ignored otherwise
|
||||
Notify *bool `json:"notify,omitempty"` // Indicates whether users will be notified of this upload - based on folder preferences
|
||||
ExpirationDays *int `json:"expirationDays,omitempty"` // File expiration days
|
||||
BaseFileID string `json:"baseFileId,omitempty"` // Used to check conflict in file during File Upload.
|
||||
}
|
||||
|
||||
// UploadSpecification is returned from /Items/Upload
|
||||
type UploadSpecification struct {
|
||||
Method string `json:"Method"` // The Upload method that must be used for this upload
|
||||
PrepareURI string `json:"PrepareUri"` // If provided, clients must issue a request to this Uri before uploading any data.
|
||||
ChunkURI string `json:"ChunkUri"` // Specifies the URI the client must send the file data to
|
||||
FinishURI string `json:"FinishUri"` // If provided, specifies the final call the client must perform to finish the upload process
|
||||
ProgressData string `json:"ProgressData"` // Allows the client to check progress of standard uploads
|
||||
IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supproted.
|
||||
ResumeIndex int64 `json:"ResumeIndex"` // Specifies the initial index for resuming, if IsResume is true.
|
||||
ResumeOffset int64 `json:"ResumeOffset"` // Specifies the initial file offset by bytes, if IsResume is true
|
||||
ResumeFileHash string `json:"ResumeFileHash"` // Specifies the MD5 hash of the first ResumeOffset bytes of the partial file found at the server
|
||||
MaxNumberOfThreads int `json:"MaxNumberOfThreads"` // Specifies the max number of chunks that can be sent simultaneously for threaded uploads
|
||||
}
|
||||
|
||||
// UploadFinishResponse is returnes from calling UploadSpecification.FinishURI
|
||||
type UploadFinishResponse struct {
|
||||
Error bool `json:"error"`
|
||||
ErrorMessage string `json:"errorMessage"`
|
||||
ErrorCode int `json:"errorCode"`
|
||||
Value []struct {
|
||||
UploadID string `json:"uploadid"`
|
||||
ParentID string `json:"parentid"`
|
||||
ID string `json:"id"`
|
||||
StreamID string `json:"streamid"`
|
||||
FileName string `json:"filename"`
|
||||
DisplayName string `json:"displayname"`
|
||||
Size int `json:"size"`
|
||||
Md5 string `json:"md5"`
|
||||
} `json:"value"`
|
||||
}
|
||||
|
||||
// ID returns the ID of the first response if available
|
||||
func (finish *UploadFinishResponse) ID() (string, error) {
|
||||
if finish.Error {
|
||||
return "", errors.Errorf("upload failed: %s (%d)", finish.ErrorMessage, finish.ErrorCode)
|
||||
}
|
||||
if len(finish.Value) == 0 {
|
||||
return "", errors.New("upload failed: no results returned")
|
||||
}
|
||||
return finish.Value[0].ID, nil
|
||||
}
|
||||
|
||||
// Parent is the ID of the parent folder
|
||||
type Parent struct {
|
||||
ID string `json:"Id,omitempty"`
|
||||
}
|
||||
|
||||
// Zone is where the data is stored
|
||||
type Zone struct {
|
||||
ID string `json:"Id,omitempty"`
|
||||
}
|
||||
|
||||
// UpdateItemRequest is sent to PATCH /v3/Items(id)
|
||||
type UpdateItemRequest struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
FileName string `json:"FileName,omitempty"`
|
||||
Description string `json:"Description,omitempty"`
|
||||
ExpirationDate *time.Time `json:"ExpirationDate,omitempty"`
|
||||
Parent *Parent `json:"Parent,omitempty"`
|
||||
Zone *Zone `json:"Zone,omitempty"`
|
||||
ModifiedAt *time.Time `json:"ClientModifiedDate,omitempty"`
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/shurcooL/vfsgen"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var AssetDir http.FileSystem = http.Dir("./tzdata")
|
||||
err := vfsgen.Generate(AssetDir, vfsgen.Options{
|
||||
PackageName: "sharefile",
|
||||
BuildTags: "!dev",
|
||||
VariableName: "tzdata",
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,34 +0,0 @@
|
||||
// Test filesystem interface
|
||||
package sharefile
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestSharefile:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
CeilChunkSize: fstests.NextPowerOfTwo,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
File diff suppressed because one or more lines are too long
@@ -1,18 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
# Extract just the America/New_York timezone from
|
||||
tzinfo=$(go env GOROOT)/lib/time/zoneinfo.zip
|
||||
|
||||
rm -rf tzdata
|
||||
mkdir tzdata
|
||||
cd tzdata
|
||||
unzip ${tzinfo} America/New_York
|
||||
|
||||
cd ..
|
||||
# Make the embedded assets
|
||||
go run generate_tzdata.go
|
||||
|
||||
# tidy up
|
||||
rm -rf tzdata
|
||||
@@ -1,261 +0,0 @@
|
||||
// Upload large files for sharefile
|
||||
//
|
||||
// Docs - https://api.sharefile.com/rest/docs/resource.aspx?name=Items#Upload_File
|
||||
|
||||
package sharefile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/sharefile/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// largeUpload is used to control the upload of large files which need chunking
|
||||
type largeUpload struct {
|
||||
ctx context.Context
|
||||
f *Fs // parent Fs
|
||||
o *Object // object being uploaded
|
||||
in io.Reader // read the data from here
|
||||
wrap accounting.WrapFn // account parts being transferred
|
||||
size int64 // total size
|
||||
parts int64 // calculated number of parts, if known
|
||||
info *api.UploadSpecification // where to post chunks etc
|
||||
threads int // number of threads to use in upload
|
||||
streamed bool // set if using streamed upload
|
||||
}
|
||||
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, info *api.UploadSpecification) (up *largeUpload, err error) {
|
||||
size := src.Size()
|
||||
parts := int64(-1)
|
||||
if size >= 0 {
|
||||
parts = size / int64(o.fs.opt.ChunkSize)
|
||||
if size%int64(o.fs.opt.ChunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
}
|
||||
|
||||
var streamed bool
|
||||
switch strings.ToLower(info.Method) {
|
||||
case "streamed":
|
||||
streamed = true
|
||||
case "threaded":
|
||||
streamed = false
|
||||
default:
|
||||
return nil, errors.Errorf("can't use method %q with newLargeUpload", info.Method)
|
||||
}
|
||||
|
||||
threads := fs.Config.Transfers
|
||||
if threads > info.MaxNumberOfThreads {
|
||||
threads = info.MaxNumberOfThreads
|
||||
}
|
||||
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
up = &largeUpload{
|
||||
ctx: ctx,
|
||||
f: f,
|
||||
o: o,
|
||||
in: in,
|
||||
wrap: wrap,
|
||||
size: size,
|
||||
threads: threads,
|
||||
info: info,
|
||||
parts: parts,
|
||||
streamed: streamed,
|
||||
}
|
||||
return up, nil
|
||||
}
|
||||
|
||||
// parse the api.UploadFinishResponse in respBody
|
||||
func (up *largeUpload) parseUploadFinishResponse(respBody []byte) (err error) {
|
||||
var finish api.UploadFinishResponse
|
||||
err = json.Unmarshal(respBody, &finish)
|
||||
if err != nil {
|
||||
// Sometimes the unmarshal fails in which case return the body
|
||||
return errors.Errorf("upload: bad response: %q", bytes.TrimSpace(respBody))
|
||||
}
|
||||
return up.o.checkUploadResponse(up.ctx, &finish)
|
||||
}
|
||||
|
||||
// Transfer a chunk
|
||||
func (up *largeUpload) transferChunk(ctx context.Context, part int64, offset int64, body []byte, fileHash string) error {
|
||||
md5sumRaw := md5.Sum(body)
|
||||
md5sum := hex.EncodeToString(md5sumRaw[:])
|
||||
size := int64(len(body))
|
||||
|
||||
// Add some more parameters to the ChunkURI
|
||||
u := up.info.ChunkURI
|
||||
u += fmt.Sprintf("&index=%d&byteOffset=%d&hash=%s&fmt=json",
|
||||
part, offset, md5sum,
|
||||
)
|
||||
if fileHash != "" {
|
||||
u += fmt.Sprintf("&finish=true&fileSize=%d&fileHash=%s",
|
||||
offset+int64(len(body)),
|
||||
fileHash,
|
||||
)
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: u,
|
||||
ContentLength: &size,
|
||||
}
|
||||
var respBody []byte
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||
opts.Body = up.wrap(bytes.NewReader(body))
|
||||
resp, err := up.f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
|
||||
} else {
|
||||
respBody, err = rest.ReadBody(resp)
|
||||
}
|
||||
// retry all errors now that the multipart upload has started
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
|
||||
return err
|
||||
}
|
||||
// If last chunk and using "streamed" transfer, get the response back now
|
||||
if up.streamed && fileHash != "" {
|
||||
return up.parseUploadFinishResponse(respBody)
|
||||
}
|
||||
fs.Debugf(up.o, "Done sending chunk %d", part)
|
||||
return nil
|
||||
}
|
||||
|
||||
// finish closes off the large upload and reads the metadata
|
||||
func (up *largeUpload) finish(ctx context.Context) error {
|
||||
fs.Debugf(up.o, "Finishing large file upload")
|
||||
// For a streamed transfer we will already have read the info
|
||||
if up.streamed {
|
||||
return nil
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: up.info.FinishURI,
|
||||
}
|
||||
var respBody []byte
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
respBody, err = rest.ReadBody(resp)
|
||||
// retry all errors now that the multipart upload has started
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.parseUploadFinishResponse(respBody)
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
func (up *largeUpload) Upload(ctx context.Context) error {
|
||||
if up.parts >= 0 {
|
||||
fs.Debugf(up.o, "Starting upload of large file in %d chunks", up.parts)
|
||||
} else {
|
||||
fs.Debugf(up.o, "Starting streaming upload of large file")
|
||||
}
|
||||
var (
|
||||
offset int64
|
||||
errs = make(chan error, 1)
|
||||
wg sync.WaitGroup
|
||||
err error
|
||||
wholeFileHash = md5.New()
|
||||
eof = false
|
||||
)
|
||||
outer:
|
||||
for part := int64(0); !eof; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
// Get a block of memory
|
||||
buf := up.f.getUploadBlock()
|
||||
|
||||
// Read the chunk
|
||||
var n int
|
||||
n, err = readers.ReadFill(up.in, buf)
|
||||
if err == io.EOF {
|
||||
eof = true
|
||||
buf = buf[:n]
|
||||
err = nil
|
||||
} else if err != nil {
|
||||
up.f.putUploadBlock(buf)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Hash it
|
||||
_, _ = io.Copy(wholeFileHash, bytes.NewBuffer(buf))
|
||||
|
||||
// Get file hash if was last chunk
|
||||
fileHash := ""
|
||||
if eof {
|
||||
fileHash = hex.EncodeToString(wholeFileHash.Sum(nil))
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
wg.Add(1)
|
||||
transferChunk := func(part, offset int64, buf []byte, fileHash string) {
|
||||
defer wg.Done()
|
||||
defer up.f.putUploadBlock(buf)
|
||||
err := up.transferChunk(ctx, part, offset, buf, fileHash)
|
||||
if err != nil {
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
if up.streamed {
|
||||
transferChunk(part, offset, buf, fileHash) // streamed
|
||||
} else {
|
||||
go transferChunk(part, offset, buf, fileHash) // multithreaded
|
||||
}
|
||||
|
||||
offset += int64(n)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// check size read is correct
|
||||
if eof && err == nil && up.size >= 0 && up.size != offset {
|
||||
err = errors.Errorf("upload: short read: read %d bytes expected %d", up.size, offset)
|
||||
}
|
||||
|
||||
// read any errors
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// finish regardless of errors
|
||||
finishErr := up.finish(ctx)
|
||||
if err == nil {
|
||||
err = finishErr
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -18,7 +17,6 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -62,8 +60,6 @@ copy operations.`,
|
||||
Advanced: true,
|
||||
}}
|
||||
|
||||
const enc = encodings.Swift
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -324,8 +320,7 @@ func parsePath(path string) (root string) {
|
||||
// split returns container and containerPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (container, containerPath string) {
|
||||
container, containerPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return enc.FromStandardName(container), enc.FromStandardPath(containerPath)
|
||||
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
}
|
||||
|
||||
// split returns container and containerPath from the object
|
||||
@@ -446,10 +441,9 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
||||
// Check to see if the object exists - ignoring directory markers
|
||||
var info swift.Object
|
||||
var err error
|
||||
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
info, rxHeaders, err = f.c.Object(f.rootContainer, encodedDirectory)
|
||||
info, rxHeaders, err = f.c.Object(f.rootContainer, f.rootDirectory)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
if err == nil && info.ContentType != directoryMarkerContentType {
|
||||
@@ -531,10 +525,10 @@ type listFn func(remote string, object *swift.Object, isDirectory bool) error
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer bool, recurse bool, fn listFn) error {
|
||||
if prefix != "" && !strings.HasSuffix(prefix, "/") {
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
if directory != "" && !strings.HasSuffix(directory, "/") {
|
||||
if directory != "" {
|
||||
directory += "/"
|
||||
}
|
||||
// Options for ObjectsWalk
|
||||
@@ -559,18 +553,17 @@ func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer
|
||||
if !recurse {
|
||||
isDirectory = strings.HasSuffix(object.Name, "/")
|
||||
}
|
||||
remote := enc.ToStandardPath(object.Name)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
if !strings.HasPrefix(object.Name, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", object.Name)
|
||||
continue
|
||||
}
|
||||
if remote == prefix {
|
||||
if object.Name == prefix {
|
||||
// If we have zero length directory markers ending in / then swift
|
||||
// will return them in the listing for the directory which causes
|
||||
// duplicate directories. Ignore them here.
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
remote := object.Name[len(prefix):]
|
||||
if addContainer {
|
||||
remote = path.Join(container, remote)
|
||||
}
|
||||
@@ -642,7 +635,7 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
|
||||
}
|
||||
for _, container := range containers {
|
||||
f.cache.MarkOK(container.Name)
|
||||
d := fs.NewDir(enc.ToStandardName(container.Name), time.Time{}).SetSize(container.Bytes).SetItems(container.Count)
|
||||
d := fs.NewDir(container.Name, time.Time{}).SetSize(container.Bytes).SetItems(container.Count)
|
||||
entries = append(entries, d)
|
||||
}
|
||||
return entries, nil
|
||||
@@ -953,18 +946,6 @@ func (o *Object) isStaticLargeObject() (bool, error) {
|
||||
return o.hasHeader("X-Static-Large-Object")
|
||||
}
|
||||
|
||||
func (o *Object) isInContainerVersioning(container string) (bool, error) {
|
||||
_, headers, err := o.fs.c.Container(container)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
xHistoryLocation := headers["X-History-Location"]
|
||||
if len(xHistoryLocation) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
@@ -1096,8 +1077,9 @@ func min(x, y int64) int64 {
|
||||
//
|
||||
// if except is passed in then segments with that prefix won't be deleted
|
||||
func (o *Object) removeSegments(except string) error {
|
||||
segmentsContainer, prefix, err := o.getSegmentsDlo()
|
||||
err = o.fs.listContainerRoot(segmentsContainer, prefix, "", false, true, func(remote string, object *swift.Object, isDirectory bool) error {
|
||||
container, containerPath := o.split()
|
||||
segmentsContainer := container + "_segments"
|
||||
err := o.fs.listContainerRoot(segmentsContainer, containerPath, "", false, true, func(remote string, object *swift.Object, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
}
|
||||
@@ -1126,23 +1108,6 @@ func (o *Object) removeSegments(except string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) getSegmentsDlo() (segmentsContainer string, prefix string, err error) {
|
||||
if err = o.readMetaData(); err != nil {
|
||||
return
|
||||
}
|
||||
dirManifest := o.headers["X-Object-Manifest"]
|
||||
dirManifest, err = url.PathUnescape(dirManifest)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
delimiter := strings.Index(dirManifest, "/")
|
||||
if len(dirManifest) == 0 || delimiter < 0 {
|
||||
err = errors.New("Missing or wrong structure of manifest of Dynamic large object")
|
||||
return
|
||||
}
|
||||
return dirManifest[:delimiter], dirManifest[delimiter+1:], nil
|
||||
}
|
||||
|
||||
// urlEncode encodes a string so that it is a valid URL
|
||||
//
|
||||
// We don't use any of Go's standard methods as we need `/` not
|
||||
@@ -1329,9 +1294,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
container, containerPath := o.split()
|
||||
|
||||
isDynamicLargeObject, err := o.isDynamicLargeObject()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Remove file/manifest first
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(container, containerPath)
|
||||
@@ -1340,22 +1308,12 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isDynamicLargeObject, err := o.isDynamicLargeObject()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// ...then segments if required
|
||||
if isDynamicLargeObject {
|
||||
isInContainerVersioning, err := o.isInContainerVersioning(container)
|
||||
err = o.removeSegments("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isInContainerVersioning {
|
||||
err = o.removeSegments("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,9 +3,7 @@ package odrvcookie
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
@@ -33,13 +31,13 @@ type CookieResponse struct {
|
||||
FedAuth http.Cookie
|
||||
}
|
||||
|
||||
// SharepointSuccessResponse holds a response from a successful microsoft login
|
||||
type SharepointSuccessResponse struct {
|
||||
// SuccessResponse hold a response from the sharepoint webdav
|
||||
type SuccessResponse struct {
|
||||
XMLName xml.Name `xml:"Envelope"`
|
||||
Body SuccessResponseBody `xml:"Body"`
|
||||
Succ SuccessResponseBody `xml:"Body"`
|
||||
}
|
||||
|
||||
// SuccessResponseBody is the body of a successful response, it holds the token
|
||||
// SuccessResponseBody is the body of a success response, it holds the token
|
||||
type SuccessResponseBody struct {
|
||||
XMLName xml.Name
|
||||
Type string `xml:"RequestSecurityTokenResponse>TokenType"`
|
||||
@@ -48,24 +46,6 @@ type SuccessResponseBody struct {
|
||||
Token string `xml:"RequestSecurityTokenResponse>RequestedSecurityToken>BinarySecurityToken"`
|
||||
}
|
||||
|
||||
// SharepointError holds a error response microsoft login
|
||||
type SharepointError struct {
|
||||
XMLName xml.Name `xml:"Envelope"`
|
||||
Body ErrorResponseBody `xml:"Body"`
|
||||
}
|
||||
|
||||
func (e *SharepointError) Error() string {
|
||||
return fmt.Sprintf("%s: %s (%s)", e.Body.FaultCode, e.Body.Reason, e.Body.Detail)
|
||||
}
|
||||
|
||||
// ErrorResponseBody contains the body of a erroneous repsonse
|
||||
type ErrorResponseBody struct {
|
||||
XMLName xml.Name
|
||||
FaultCode string `xml:"Fault>Code>Subcode>Value"`
|
||||
Reason string `xml:"Fault>Reason>Text"`
|
||||
Detail string `xml:"Fault>Detail>error>internalerror>text"`
|
||||
}
|
||||
|
||||
// reqString is a template that gets populated with the user data in order to retrieve a "BinarySecurityToken"
|
||||
const reqString = `<s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope"
|
||||
xmlns:a="http://www.w3.org/2005/08/addressing"
|
||||
@@ -111,15 +91,15 @@ func New(pUser, pPass, pEndpoint string) CookieAuth {
|
||||
|
||||
// Cookies creates a CookieResponse. It fetches the auth token and then
|
||||
// retrieves the Cookies
|
||||
func (ca *CookieAuth) Cookies(ctx context.Context) (*CookieResponse, error) {
|
||||
tokenResp, err := ca.getSPToken(ctx)
|
||||
func (ca *CookieAuth) Cookies() (*CookieResponse, error) {
|
||||
tokenResp, err := ca.getSPToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ca.getSPCookie(tokenResp)
|
||||
}
|
||||
|
||||
func (ca *CookieAuth) getSPCookie(conf *SharepointSuccessResponse) (*CookieResponse, error) {
|
||||
func (ca *CookieAuth) getSPCookie(conf *SuccessResponse) (*CookieResponse, error) {
|
||||
spRoot, err := url.Parse(ca.endpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error while constructing endpoint URL")
|
||||
@@ -142,7 +122,7 @@ func (ca *CookieAuth) getSPCookie(conf *SharepointSuccessResponse) (*CookieRespo
|
||||
}
|
||||
|
||||
// Send the previously acquired Token as a Post parameter
|
||||
if _, err = client.Post(u.String(), "text/xml", strings.NewReader(conf.Body.Token)); err != nil {
|
||||
if _, err = client.Post(u.String(), "text/xml", strings.NewReader(conf.Succ.Token)); err != nil {
|
||||
return nil, errors.Wrap(err, "Error while grabbing cookies from endpoint: %v")
|
||||
}
|
||||
|
||||
@@ -160,7 +140,7 @@ func (ca *CookieAuth) getSPCookie(conf *SharepointSuccessResponse) (*CookieRespo
|
||||
return &cookieResponse, nil
|
||||
}
|
||||
|
||||
func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SharepointSuccessResponse, err error) {
|
||||
func (ca *CookieAuth) getSPToken() (conf *SuccessResponse, err error) {
|
||||
reqData := map[string]interface{}{
|
||||
"Username": ca.user,
|
||||
"Password": ca.pass,
|
||||
@@ -180,7 +160,6 @@ func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SharepointSuccessRe
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
resp, err := client.Do(req)
|
||||
@@ -196,21 +175,12 @@ func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SharepointSuccessRe
|
||||
}
|
||||
s := respBuf.Bytes()
|
||||
|
||||
conf = &SharepointSuccessResponse{}
|
||||
conf = &SuccessResponse{}
|
||||
err = xml.Unmarshal(s, conf)
|
||||
if conf.Body.Token == "" {
|
||||
// xml Unmarshal won't fail if the response doesn't contain a token
|
||||
// However, the token will be empty
|
||||
sErr := &SharepointError{}
|
||||
|
||||
errSErr := xml.Unmarshal(s, sErr)
|
||||
if errSErr == nil {
|
||||
return nil, sErr
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// FIXME: Try to parse with FailedResponse struct (check for server error code)
|
||||
return nil, errors.Wrap(err, "Error while reading endpoint response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -113,8 +113,7 @@ type Fs struct {
|
||||
canStream bool // set if can stream
|
||||
useOCMtime bool // set if can use X-OC-Mtime
|
||||
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
|
||||
hasMD5 bool // set if can use owncloud style checksums for MD5
|
||||
hasSHA1 bool // set if can use owncloud style checksums for SHA1
|
||||
hasChecksums bool // set if can use owncloud style checksums
|
||||
}
|
||||
|
||||
// Object describes a webdav object
|
||||
@@ -206,7 +205,7 @@ func itemIsDir(item *api.Response) bool {
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string) (info *api.Prop, err error) {
|
||||
func (f *Fs) readMetaDataForPath(path string, depth string) (info *api.Prop, err error) {
|
||||
// FIXME how do we read back additional properties?
|
||||
opts := rest.Opts{
|
||||
Method: "PROPFIND",
|
||||
@@ -216,13 +215,13 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
|
||||
},
|
||||
NoRedirect: true,
|
||||
}
|
||||
if f.hasMD5 || f.hasSHA1 {
|
||||
if f.hasChecksums {
|
||||
opts.Body = bytes.NewBuffer(owncloudProps)
|
||||
}
|
||||
var result api.Multistatus
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
@@ -230,7 +229,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
|
||||
switch apiErr.StatusCode {
|
||||
case http.StatusNotFound:
|
||||
if f.retryWithZeroDepth && depth != "0" {
|
||||
return f.readMetaDataForPath(ctx, path, "0")
|
||||
return f.readMetaDataForPath(path, "0")
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
case http.StatusMovedPermanently, http.StatusFound, http.StatusSeeOther:
|
||||
@@ -354,7 +353,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
}
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
err = f.setQuirks(ctx, opt.Vendor)
|
||||
err = f.setQuirks(opt.Vendor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -384,7 +383,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// sets the BearerToken up
|
||||
func (f *Fs) setBearerToken(token string) {
|
||||
f.opt.BearerToken = token
|
||||
f.srv.SetHeader("Authorization", "Bearer "+token)
|
||||
f.srv.SetHeader("Authorization", "BEARER "+token)
|
||||
}
|
||||
|
||||
// fetch the bearer token using the command
|
||||
@@ -425,30 +424,29 @@ func (f *Fs) fetchAndSetBearerToken() error {
|
||||
}
|
||||
|
||||
// setQuirks adjusts the Fs for the vendor passed in
|
||||
func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
||||
func (f *Fs) setQuirks(vendor string) error {
|
||||
switch vendor {
|
||||
case "owncloud":
|
||||
f.canStream = true
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.hasMD5 = true
|
||||
f.hasSHA1 = true
|
||||
f.hasChecksums = true
|
||||
case "nextcloud":
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.hasSHA1 = true
|
||||
f.hasChecksums = true
|
||||
case "sharepoint":
|
||||
// To mount sharepoint, two Cookies are required
|
||||
// They have to be set instead of BasicAuth
|
||||
f.srv.RemoveHeader("Authorization") // We don't need this Header if using cookies
|
||||
spCk := odrvcookie.New(f.opt.User, f.opt.Pass, f.endpointURL)
|
||||
spCookies, err := spCk.Cookies(ctx)
|
||||
spCookies, err := spCk.Cookies()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
odrvcookie.NewRenew(12*time.Hour, func() {
|
||||
spCookies, err := spCk.Cookies(ctx)
|
||||
spCookies, err := spCk.Cookies()
|
||||
if err != nil {
|
||||
fs.Errorf("could not renew cookies: %s", err.Error())
|
||||
return
|
||||
@@ -479,7 +477,7 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Prop) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *api.Prop) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -489,7 +487,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Pro
|
||||
// Set info
|
||||
err = o.setMetaData(info)
|
||||
} else {
|
||||
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
err = o.readMetaData() // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -500,7 +498,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Pro
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// Read the normal props, plus the checksums
|
||||
@@ -530,7 +528,7 @@ type listAllFn func(string, bool, *api.Prop) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, filesOnly bool, depth string, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(dir string, directoriesOnly bool, filesOnly bool, depth string, fn listAllFn) (found bool, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PROPFIND",
|
||||
Path: f.dirPath(dir), // FIXME Should not start with /
|
||||
@@ -538,13 +536,13 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
||||
"Depth": depth,
|
||||
},
|
||||
}
|
||||
if f.hasMD5 || f.hasSHA1 {
|
||||
if f.hasChecksums {
|
||||
opts.Body = bytes.NewBuffer(owncloudProps)
|
||||
}
|
||||
var result api.Multistatus
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -552,7 +550,7 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
||||
// does not exist
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
if f.retryWithZeroDepth && depth != "0" {
|
||||
return f.listAll(ctx, dir, directoriesOnly, filesOnly, "0", fn)
|
||||
return f.listAll(dir, directoriesOnly, filesOnly, "0", fn)
|
||||
}
|
||||
return found, fs.ErrorDirNotFound
|
||||
}
|
||||
@@ -627,14 +625,14 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
var iErr error
|
||||
_, err = f.listAll(ctx, dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool {
|
||||
_, err = f.listAll(dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool {
|
||||
if isDir {
|
||||
d := fs.NewDir(remote, time.Time(info.Modified))
|
||||
// .SetID(info.ID)
|
||||
// FIXME more info from dir? can set size, items?
|
||||
entries = append(entries, d)
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||
o, err := f.newObjectWithInfo(remote, info)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
@@ -698,7 +696,7 @@ func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
|
||||
}
|
||||
|
||||
// low level mkdir, only makes the directory, doesn't attempt to create parents
|
||||
func (f *Fs) _mkdir(ctx context.Context, dirPath string) error {
|
||||
func (f *Fs) _mkdir(dirPath string) error {
|
||||
// We assume the root is already created
|
||||
if dirPath == "" {
|
||||
return nil
|
||||
@@ -713,7 +711,7 @@ func (f *Fs) _mkdir(ctx context.Context, dirPath string) error {
|
||||
NoResponse: true,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(ctx, &opts)
|
||||
resp, err := f.srv.Call(&opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
@@ -729,13 +727,13 @@ func (f *Fs) _mkdir(ctx context.Context, dirPath string) error {
|
||||
// mkdir makes the directory and parents using native paths
|
||||
func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
err := f._mkdir(ctx, dirPath)
|
||||
err := f._mkdir(dirPath)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// parent does not exist so create it first then try again
|
||||
if apiErr.StatusCode == http.StatusConflict {
|
||||
err = f.mkParentDir(ctx, dirPath)
|
||||
if err == nil {
|
||||
err = f._mkdir(ctx, dirPath)
|
||||
err = f._mkdir(dirPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -751,17 +749,17 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
// dirNotEmpty returns true if the directory exists and is not Empty
|
||||
//
|
||||
// if the directory does not exist then err will be ErrorDirNotFound
|
||||
func (f *Fs) dirNotEmpty(ctx context.Context, dir string) (found bool, err error) {
|
||||
return f.listAll(ctx, dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool {
|
||||
func (f *Fs) dirNotEmpty(dir string) (found bool, err error) {
|
||||
return f.listAll(dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool {
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
if check {
|
||||
notEmpty, err := f.dirNotEmpty(ctx, dir)
|
||||
notEmpty, err := f.dirNotEmpty(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -777,7 +775,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, nil)
|
||||
resp, err = f.srv.CallXML(&opts, nil, nil)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -791,7 +789,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
return f.purgeCheck(dir, true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
@@ -837,10 +835,10 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
||||
},
|
||||
}
|
||||
if f.useOCMtime {
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1e9)
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1E9)
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
resp, err = f.srv.Call(&opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -872,7 +870,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
return f.purgeCheck("", false)
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
@@ -906,7 +904,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
dstPath := f.filePath(dstRemote)
|
||||
|
||||
// Check if destination exists
|
||||
_, err := f.dirNotEmpty(ctx, dstRemote)
|
||||
_, err := f.dirNotEmpty(dstRemote)
|
||||
if err == nil {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
@@ -936,7 +934,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
},
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
resp, err = f.srv.Call(&opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -947,14 +945,10 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
hashes := hash.Set(hash.None)
|
||||
if f.hasMD5 {
|
||||
hashes.Add(hash.MD5)
|
||||
if f.hasChecksums {
|
||||
return hash.NewHashSet(hash.MD5, hash.SHA1)
|
||||
}
|
||||
if f.hasSHA1 {
|
||||
hashes.Add(hash.SHA1)
|
||||
}
|
||||
return hashes
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
@@ -981,7 +975,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &q)
|
||||
resp, err = f.srv.CallXML(&opts, nil, &q)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1021,19 +1015,20 @@ func (o *Object) Remote() string {
|
||||
|
||||
// Hash returns the SHA1 or MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t == hash.MD5 && o.fs.hasMD5 {
|
||||
return o.md5, nil
|
||||
}
|
||||
if t == hash.SHA1 && o.fs.hasSHA1 {
|
||||
return o.sha1, nil
|
||||
if o.fs.hasChecksums {
|
||||
switch t {
|
||||
case hash.SHA1:
|
||||
return o.sha1, nil
|
||||
case hash.MD5:
|
||||
return o.md5, nil
|
||||
}
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
ctx := context.TODO()
|
||||
err := o.readMetaData(ctx)
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return 0
|
||||
@@ -1046,14 +1041,10 @@ func (o *Object) setMetaData(info *api.Prop) (err error) {
|
||||
o.hasMetaData = true
|
||||
o.size = info.Size
|
||||
o.modTime = time.Time(info.Modified)
|
||||
if o.fs.hasMD5 || o.fs.hasSHA1 {
|
||||
if o.fs.hasChecksums {
|
||||
hashes := info.Hashes()
|
||||
if o.fs.hasSHA1 {
|
||||
o.sha1 = hashes[hash.SHA1]
|
||||
}
|
||||
if o.fs.hasMD5 {
|
||||
o.md5 = hashes[hash.MD5]
|
||||
}
|
||||
o.sha1 = hashes[hash.SHA1]
|
||||
o.md5 = hashes[hash.MD5]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1061,11 +1052,11 @@ func (o *Object) setMetaData(info *api.Prop) (err error) {
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.hasMetaData {
|
||||
return nil
|
||||
}
|
||||
info, err := o.fs.readMetaDataForPath(ctx, o.remote, defaultDepth)
|
||||
info, err := o.fs.readMetaDataForPath(o.remote, defaultDepth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1077,7 +1068,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData(ctx)
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
@@ -1104,7 +1095,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1134,27 +1125,25 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
}
|
||||
if o.fs.useOCMtime || o.fs.hasMD5 || o.fs.hasSHA1 {
|
||||
if o.fs.useOCMtime || o.fs.hasChecksums {
|
||||
opts.ExtraHeaders = map[string]string{}
|
||||
if o.fs.useOCMtime {
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1e9)
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1E9)
|
||||
}
|
||||
// Set one upload checksum
|
||||
// Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5
|
||||
// Nextcloud stores the checksum you supply (SHA1 or MD5) but only stores one
|
||||
if o.fs.hasSHA1 {
|
||||
if o.fs.hasChecksums {
|
||||
// Set an upload checksum - prefer SHA1
|
||||
//
|
||||
// This is used as an upload integrity test. If we set
|
||||
// only SHA1 here, owncloud will calculate the MD5 too.
|
||||
if sha1, _ := src.Hash(ctx, hash.SHA1); sha1 != "" {
|
||||
opts.ExtraHeaders["OC-Checksum"] = "SHA1:" + sha1
|
||||
}
|
||||
}
|
||||
if o.fs.hasMD5 && opts.ExtraHeaders["OC-Checksum"] == "" {
|
||||
if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
|
||||
} else if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
|
||||
opts.ExtraHeaders["OC-Checksum"] = "MD5:" + md5
|
||||
}
|
||||
}
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1170,7 +1159,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
// read metadata from remote
|
||||
o.hasMetaData = false
|
||||
return o.readMetaData(ctx)
|
||||
return o.readMetaData()
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
@@ -1181,7 +1170,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
NoResponse: true,
|
||||
}
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Call(ctx, &opts)
|
||||
resp, err := o.fs.srv.Call(&opts)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
@@ -30,8 +29,6 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.Yandex
|
||||
|
||||
//oAuth
|
||||
const (
|
||||
rcloneClientID = "ac39b43b9eba4cae8ffb788c06d816a8"
|
||||
@@ -203,14 +200,14 @@ func (f *Fs) dirPath(file string) string {
|
||||
return path.Join(f.diskRoot, file) + "/"
|
||||
}
|
||||
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string, options *api.ResourceInfoRequestOptions) (*api.ResourceInfoResponse, error) {
|
||||
func (f *Fs) readMetaDataForPath(path string, options *api.ResourceInfoRequestOptions) (*api.ResourceInfoResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/resources",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(path))
|
||||
opts.Parameters.Set("path", path)
|
||||
|
||||
if options.SortMode != nil {
|
||||
opts.Parameters.Set("sort", options.SortMode.String())
|
||||
@@ -229,7 +226,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, options *api.
|
||||
var info api.ResourceInfoResponse
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -237,13 +234,11 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, options *api.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info.Name = enc.ToStandardName(info.Name)
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -289,7 +284,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
//request object meta info
|
||||
// Check to see if the object exists and is a file
|
||||
//request object meta info
|
||||
if info, err := f.readMetaDataForPath(ctx, f.diskRoot, &api.ResourceInfoRequestOptions{}); err != nil {
|
||||
if info, err := f.readMetaDataForPath(f.diskRoot, &api.ResourceInfoRequestOptions{}); err != nil {
|
||||
|
||||
} else {
|
||||
if info.ResourceType == "file" {
|
||||
@@ -306,7 +301,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.ResourceInfoResponse) (fs.DirEntry, error) {
|
||||
func (f *Fs) itemToDirEntry(remote string, object *api.ResourceInfoResponse) (fs.DirEntry, error) {
|
||||
switch object.ResourceType {
|
||||
case "dir":
|
||||
t, err := time.Parse(time.RFC3339Nano, object.Modified)
|
||||
@@ -316,7 +311,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.Reso
|
||||
d := fs.NewDir(remote, t).SetSize(object.Size)
|
||||
return d, nil
|
||||
case "file":
|
||||
o, err := f.newObjectWithInfo(ctx, remote, object)
|
||||
o, err := f.newObjectWithInfo(remote, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -348,7 +343,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
Limit: limit,
|
||||
Offset: offset,
|
||||
}
|
||||
info, err := f.readMetaDataForPath(ctx, root, opts)
|
||||
info, err := f.readMetaDataForPath(root, opts)
|
||||
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.ErrorResponse); ok {
|
||||
@@ -364,9 +359,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if info.ResourceType == "dir" {
|
||||
//list all subdirs
|
||||
for _, element := range info.Embedded.Items {
|
||||
element.Name = enc.ToStandardName(element.Name)
|
||||
remote := path.Join(dir, element.Name)
|
||||
entry, err := f.itemToDirEntry(ctx, remote, &element)
|
||||
entry, err := f.itemToDirEntry(remote, &element)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -392,7 +386,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.ResourceInfoResponse) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *api.ResourceInfoResponse) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -401,7 +395,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Res
|
||||
if info != nil {
|
||||
err = o.setMetaData(info)
|
||||
} else {
|
||||
err = o.readMetaData(ctx)
|
||||
err = o.readMetaData()
|
||||
if apiErr, ok := err.(*api.ErrorResponse); ok {
|
||||
// does not exist
|
||||
if apiErr.ErrorName == "DiskNotFoundError" {
|
||||
@@ -418,7 +412,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Res
|
||||
// NewObject finds the Object at remote. If it can't be found it
|
||||
// returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
@@ -452,7 +446,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
}
|
||||
|
||||
// CreateDir makes a directory
|
||||
func (f *Fs) CreateDir(ctx context.Context, path string) (err error) {
|
||||
func (f *Fs) CreateDir(path string) (err error) {
|
||||
//fmt.Printf("CreateDir: %s\n", path)
|
||||
|
||||
var resp *http.Response
|
||||
@@ -463,18 +457,14 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (err error) {
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
// If creating a directory with a : use (undocumented) disk: prefix
|
||||
if strings.IndexRune(path, ':') >= 0 {
|
||||
path = "disk:" + path
|
||||
}
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(path))
|
||||
opts.Parameters.Set("path", path)
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
resp, err = f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
// fmt.Printf("CreateDir %q Error: %s\n", path, err.Error())
|
||||
//fmt.Printf("CreateDir Error: %s\n", err.Error())
|
||||
return err
|
||||
}
|
||||
// fmt.Printf("...Id %q\n", *info.Id)
|
||||
@@ -484,7 +474,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (err error) {
|
||||
// This really needs improvement and especially proper error checking
|
||||
// but Yandex does not publish a List of possible errors and when they're
|
||||
// expected to occur.
|
||||
func (f *Fs) mkDirs(ctx context.Context, path string) (err error) {
|
||||
func (f *Fs) mkDirs(path string) (err error) {
|
||||
//trim filename from path
|
||||
//dirString := strings.TrimSuffix(path, filepath.Base(path))
|
||||
//trim "disk:" from path
|
||||
@@ -493,7 +483,7 @@ func (f *Fs) mkDirs(ctx context.Context, path string) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = f.CreateDir(ctx, dirString); err != nil {
|
||||
if err = f.CreateDir(dirString); err != nil {
|
||||
if apiErr, ok := err.(*api.ErrorResponse); ok {
|
||||
// allready exists
|
||||
if apiErr.ErrorName != "DiskPathPointsToExistentDirectoryError" {
|
||||
@@ -503,7 +493,7 @@ func (f *Fs) mkDirs(ctx context.Context, path string) (err error) {
|
||||
for _, element := range dirs {
|
||||
if element != "" {
|
||||
mkdirpath += element + "/" //path separator /
|
||||
if err = f.CreateDir(ctx, mkdirpath); err != nil {
|
||||
if err = f.CreateDir(mkdirpath); err != nil {
|
||||
// ignore errors while creating dirs
|
||||
}
|
||||
}
|
||||
@@ -515,7 +505,7 @@ func (f *Fs) mkDirs(ctx context.Context, path string) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *Fs) mkParentDirs(ctx context.Context, resPath string) error {
|
||||
func (f *Fs) mkParentDirs(resPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
// chop off trailing / if it exists
|
||||
if strings.HasSuffix(resPath, "/") {
|
||||
@@ -525,17 +515,17 @@ func (f *Fs) mkParentDirs(ctx context.Context, resPath string) error {
|
||||
if parent == "." {
|
||||
parent = ""
|
||||
}
|
||||
return f.mkDirs(ctx, parent)
|
||||
return f.mkDirs(parent)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
path := f.filePath(dir)
|
||||
return f.mkDirs(ctx, path)
|
||||
return f.mkDirs(path)
|
||||
}
|
||||
|
||||
// waitForJob waits for the job with status in url to complete
|
||||
func (f *Fs) waitForJob(ctx context.Context, location string) (err error) {
|
||||
func (f *Fs) waitForJob(location string) (err error) {
|
||||
opts := rest.Opts{
|
||||
RootURL: location,
|
||||
Method: "GET",
|
||||
@@ -545,7 +535,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string) (err error) {
|
||||
var resp *http.Response
|
||||
var body []byte
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
resp, err = f.srv.Call(&opts)
|
||||
if err != nil {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
@@ -574,20 +564,20 @@ func (f *Fs) waitForJob(ctx context.Context, location string) (err error) {
|
||||
return errors.Errorf("async operation didn't complete after %v", fs.Config.Timeout)
|
||||
}
|
||||
|
||||
func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) (err error) {
|
||||
func (f *Fs) delete(path string, hardDelete bool) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/resources",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(path))
|
||||
opts.Parameters.Set("path", path)
|
||||
opts.Parameters.Set("permanently", strconv.FormatBool(hardDelete))
|
||||
|
||||
var resp *http.Response
|
||||
var body []byte
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
resp, err = f.srv.Call(&opts)
|
||||
if err != nil {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
@@ -605,19 +595,19 @@ func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) (err erro
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "async info result not JSON: %q", body)
|
||||
}
|
||||
return f.waitForJob(ctx, info.HRef)
|
||||
return f.waitForJob(info.HRef)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// purgeCheck remotes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
root := f.filePath(dir)
|
||||
if check {
|
||||
//to comply with rclone logic we check if the directory is empty before delete.
|
||||
//send request to get list of objects in this directory.
|
||||
info, err := f.readMetaDataForPath(ctx, root, &api.ResourceInfoRequestOptions{})
|
||||
info, err := f.readMetaDataForPath(root, &api.ResourceInfoRequestOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rmdir failed")
|
||||
}
|
||||
@@ -626,14 +616,14 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
}
|
||||
}
|
||||
//delete directory
|
||||
return f.delete(ctx, root, false)
|
||||
return f.delete(root, false)
|
||||
}
|
||||
|
||||
// Rmdir deletes the container
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
return f.purgeCheck(dir, true)
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
@@ -642,25 +632,25 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
return f.purgeCheck("", false)
|
||||
}
|
||||
|
||||
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||
func (f *Fs) copyOrMove(ctx context.Context, method, src, dst string, overwrite bool) (err error) {
|
||||
func (f *Fs) copyOrMove(method, src, dst string, overwrite bool) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/resources/" + method,
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set("from", enc.FromStandardPath(src))
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(dst))
|
||||
opts.Parameters.Set("from", src)
|
||||
opts.Parameters.Set("path", dst)
|
||||
opts.Parameters.Set("overwrite", strconv.FormatBool(overwrite))
|
||||
|
||||
var resp *http.Response
|
||||
var body []byte
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
resp, err = f.srv.Call(&opts)
|
||||
if err != nil {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
@@ -678,7 +668,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dst string, overwrite
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "async info result not JSON: %q", body)
|
||||
}
|
||||
return f.waitForJob(ctx, info.HRef)
|
||||
return f.waitForJob(info.HRef)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -700,11 +690,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
dstPath := f.filePath(remote)
|
||||
err := f.mkParentDirs(ctx, dstPath)
|
||||
err := f.mkParentDirs(dstPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = f.copyOrMove(ctx, "copy", srcObj.filePath(), dstPath, false)
|
||||
err = f.copyOrMove("copy", srcObj.filePath(), dstPath, false)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't copy file")
|
||||
@@ -730,11 +720,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
dstPath := f.filePath(remote)
|
||||
err := f.mkParentDirs(ctx, dstPath)
|
||||
err := f.mkParentDirs(dstPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = f.copyOrMove(ctx, "move", srcObj.filePath(), dstPath, false)
|
||||
err = f.copyOrMove("move", srcObj.filePath(), dstPath, false)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't move file")
|
||||
@@ -768,12 +758,12 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return errors.New("can't move root directory")
|
||||
}
|
||||
|
||||
err := f.mkParentDirs(ctx, dstPath)
|
||||
err := f.mkParentDirs(dstPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.readMetaDataForPath(ctx, dstPath, &api.ResourceInfoRequestOptions{})
|
||||
_, err = f.readMetaDataForPath(dstPath, &api.ResourceInfoRequestOptions{})
|
||||
if apiErr, ok := err.(*api.ErrorResponse); ok {
|
||||
// does not exist
|
||||
if apiErr.ErrorName == "DiskNotFoundError" {
|
||||
@@ -785,7 +775,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
err = f.copyOrMove(ctx, "move", srcPath, dstPath, false)
|
||||
err = f.copyOrMove("move", srcPath, dstPath, false)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't move directory")
|
||||
@@ -803,16 +793,16 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: enc.FromStandardPath(path),
|
||||
Path: path,
|
||||
Parameters: url.Values{},
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(f.filePath(remote)))
|
||||
opts.Parameters.Set("path", f.filePath(remote))
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
resp, err = f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -829,7 +819,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
return "", errors.Wrap(err, "couldn't create public link")
|
||||
}
|
||||
|
||||
info, err := f.readMetaDataForPath(ctx, f.filePath(remote), &api.ResourceInfoRequestOptions{})
|
||||
info, err := f.readMetaDataForPath(f.filePath(remote), &api.ResourceInfoRequestOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -850,7 +840,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
resp, err = f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
@@ -867,7 +857,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var info api.DiskInfo
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -934,11 +924,11 @@ func (o *Object) setMetaData(info *api.ResourceInfoResponse) (err error) {
|
||||
}
|
||||
|
||||
// readMetaData reads ands sets the new metadata for a storage.Object
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.hasMetaData {
|
||||
return nil
|
||||
}
|
||||
info, err := o.fs.readMetaDataForPath(ctx, o.filePath(), &api.ResourceInfoRequestOptions{})
|
||||
info, err := o.fs.readMetaDataForPath(o.filePath(), &api.ResourceInfoRequestOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -953,7 +943,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData(ctx)
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
@@ -963,8 +953,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
ctx := context.TODO()
|
||||
err := o.readMetaData(ctx)
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return 0
|
||||
@@ -985,7 +974,7 @@ func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (o *Object) setCustomProperty(ctx context.Context, property string, value string) (err error) {
|
||||
func (o *Object) setCustomProperty(property string, value string) (err error) {
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "PATCH",
|
||||
@@ -994,14 +983,14 @@ func (o *Object) setCustomProperty(ctx context.Context, property string, value s
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(o.filePath()))
|
||||
opts.Parameters.Set("path", o.filePath())
|
||||
rcm := map[string]interface{}{
|
||||
property: value,
|
||||
}
|
||||
cpr := api.CustomPropertyResponse{CustomProperties: rcm}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &cpr, nil)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &cpr, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
@@ -1012,7 +1001,7 @@ func (o *Object) setCustomProperty(ctx context.Context, property string, value s
|
||||
// Commits the datastore
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
// set custom_property 'rclone_modified' of object to modTime
|
||||
err := o.setCustomProperty(ctx, "rclone_modified", modTime.Format(time.RFC3339Nano))
|
||||
err := o.setCustomProperty("rclone_modified", modTime.Format(time.RFC3339Nano))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1031,10 +1020,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(o.filePath()))
|
||||
opts.Parameters.Set("path", o.filePath())
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &dl)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &dl)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -1049,7 +1038,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1058,7 +1047,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeType string) (err error) {
|
||||
func (o *Object) upload(in io.Reader, overwrite bool, mimeType string) (err error) {
|
||||
// prepare upload
|
||||
var resp *http.Response
|
||||
var ur api.AsyncInfo
|
||||
@@ -1068,11 +1057,11 @@ func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeT
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(o.filePath()))
|
||||
opts.Parameters.Set("path", o.filePath())
|
||||
opts.Parameters.Set("overwrite", strconv.FormatBool(overwrite))
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &ur)
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &ur)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -1090,7 +1079,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeT
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -1108,13 +1097,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
remote := o.filePath()
|
||||
|
||||
//create full path to file before upload.
|
||||
err := o.fs.mkParentDirs(ctx, remote)
|
||||
err := o.fs.mkParentDirs(remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//upload file
|
||||
err = o.upload(ctx, in1, true, fs.MimeType(ctx, src))
|
||||
err = o.upload(in1, true, fs.MimeType(ctx, src))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1131,7 +1120,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.delete(ctx, o.filePath(), false)
|
||||
return o.fs.delete(o.filePath(), false)
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
This is a tool to decrypt file names in rclone logs.
|
||||
|
||||
@@ -43,13 +43,13 @@ def map_log_file(crypt_map, log_file):
|
||||
"""
|
||||
with open(log_file) as fd:
|
||||
for line in fd:
|
||||
for cipher, plain in crypt_map.items():
|
||||
for cipher, plain in crypt_map.iteritems():
|
||||
line = line.replace(cipher, plain)
|
||||
sys.stdout.write(line)
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 3:
|
||||
print("Syntax: %s <crypt-mapping-file> <log-file>" % sys.argv[0])
|
||||
print "Syntax: %s <crypt-mapping-file> <log-file>" % sys.argv[0]
|
||||
raise SystemExit(1)
|
||||
mapping_file, log_file = sys.argv[1:]
|
||||
crypt_map = read_crypt_map(mapping_file)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
#!/usr/bin/env python2
|
||||
"""
|
||||
Make backend documentation
|
||||
"""
|
||||
@@ -52,9 +52,9 @@ if __name__ == "__main__":
|
||||
for backend in find_backends():
|
||||
try:
|
||||
alter_doc(backend)
|
||||
except Exception as e:
|
||||
print("Failed adding docs for %s backend: %s" % (backend, e))
|
||||
except Exception, e:
|
||||
print "Failed adding docs for %s backend: %s" % (backend, e)
|
||||
failed += 1
|
||||
else:
|
||||
success += 1
|
||||
print("Added docs for %d backends with %d failures" % (success, failed))
|
||||
print "Added docs for %d backends with %d failures" % (success, failed)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/python3
|
||||
#!/usr/bin/python
|
||||
"""
|
||||
Generate a markdown changelog for the rclone project
|
||||
"""
|
||||
@@ -99,11 +99,10 @@ def process_log(log):
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 3:
|
||||
print("Syntax: %s vX.XX vX.XY" % sys.argv[0], file=sys.stderr)
|
||||
print >>sys.stderr, "Syntax: %s vX.XX vX.XY" % sys.argv[0]
|
||||
sys.exit(1)
|
||||
version, next_version = sys.argv[1], sys.argv[2]
|
||||
log = subprocess.check_output(["git", "log", '''--pretty=format:%H|%an|%aI|%s'''] + [version+".."+next_version])
|
||||
log = log.decode("utf-8")
|
||||
by_category = process_log(log)
|
||||
|
||||
# Output backends first so remaining in by_category are core items
|
||||
@@ -113,7 +112,7 @@ def main():
|
||||
out("local", title="Local")
|
||||
out("cache", title="Cache")
|
||||
out("crypt", title="Crypt")
|
||||
backend_names = sorted(x for x in list(by_category.keys()) if x in backends)
|
||||
backend_names = sorted(x for x in by_category.keys() if x in backends)
|
||||
for backend_name in backend_names:
|
||||
if backend_name in backend_titles:
|
||||
backend_title = backend_titles[backend_name]
|
||||
@@ -124,7 +123,7 @@ def main():
|
||||
# Split remaining in by_category into new features and fixes
|
||||
new_features = defaultdict(list)
|
||||
bugfixes = defaultdict(list)
|
||||
for name, messages in by_category.items():
|
||||
for name, messages in by_category.iteritems():
|
||||
for message in messages:
|
||||
if IS_FIX_RE.search(message):
|
||||
bugfixes[name].append(message)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
#!/usr/bin/env python2
|
||||
"""
|
||||
Make single page versions of the documentation for release and
|
||||
conversion into man pages etc.
|
||||
@@ -31,8 +31,6 @@ docs = [
|
||||
"b2.md",
|
||||
"box.md",
|
||||
"cache.md",
|
||||
"chunker.md",
|
||||
"sharefile.md",
|
||||
"crypt.md",
|
||||
"dropbox.md",
|
||||
"ftp.md",
|
||||
@@ -43,7 +41,6 @@ docs = [
|
||||
"hubic.md",
|
||||
"jottacloud.md",
|
||||
"koofr.md",
|
||||
"mailru.md",
|
||||
"mega.md",
|
||||
"azureblob.md",
|
||||
"onedrive.md",
|
||||
@@ -121,8 +118,8 @@ def check_docs(docpath):
|
||||
docs_set = set(docs)
|
||||
if files == docs_set:
|
||||
return
|
||||
print("Files on disk but not in docs variable: %s" % ", ".join(files - docs_set))
|
||||
print("Files in docs variable but not on disk: %s" % ", ".join(docs_set - files))
|
||||
print "Files on disk but not in docs variable: %s" % ", ".join(files - docs_set)
|
||||
print "Files in docs variable but not on disk: %s" % ", ".join(docs_set - files)
|
||||
raise ValueError("Missing files")
|
||||
|
||||
def read_command(command):
|
||||
@@ -145,7 +142,7 @@ def read_commands(docpath):
|
||||
|
||||
def main():
|
||||
check_docs(docpath)
|
||||
command_docs = read_commands(docpath).replace("\\", "\\\\") # escape \ so we can use command_docs in re.sub
|
||||
command_docs = read_commands(docpath)
|
||||
with open(outfile, "w") as out:
|
||||
out.write("""\
|
||||
%% rclone(1) User Manual
|
||||
@@ -159,7 +156,7 @@ def main():
|
||||
if doc == "docs.md":
|
||||
contents = re.sub(r"The main rclone commands.*?for the full list.", command_docs, contents, 0, re.S)
|
||||
out.write(contents)
|
||||
print("Written '%s'" % outfile)
|
||||
print "Written '%s'" % outfile
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Update the authors.md file with the authors from the git log
|
||||
"""
|
||||
@@ -23,14 +23,13 @@ def add_email(name, email):
|
||||
"""
|
||||
adds the email passed in to the end of authors.md
|
||||
"""
|
||||
print("Adding %s <%s>" % (name, email))
|
||||
print "Adding %s <%s>" % (name, email)
|
||||
with open(AUTHORS, "a+") as fd:
|
||||
print(" * %s <%s>" % (name, email), file=fd)
|
||||
print >>fd, " * %s <%s>" % (name, email)
|
||||
subprocess.check_call(["git", "commit", "-m", "Add %s to contributors" % name, AUTHORS])
|
||||
|
||||
def main():
|
||||
out = subprocess.check_output(["git", "log", '--reverse', '--format=%an|%ae', "master"])
|
||||
out = out.decode("utf-8")
|
||||
|
||||
previous = load()
|
||||
for line in out.split("\n"):
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -20,9 +19,8 @@ var (
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "Format output as JSON")
|
||||
flags.BoolVarP(cmdFlags, &fullOutput, "full", "", false, "Full numbers instead of SI units")
|
||||
commandDefinition.Flags().BoolVar(&jsonOutput, "json", false, "Format output as JSON")
|
||||
commandDefinition.Flags().BoolVar(&fullOutput, "full", false, "Full numbers instead of SI units")
|
||||
}
|
||||
|
||||
// printValue formats uv to be output
|
||||
|
||||
@@ -3,32 +3,22 @@ package authorize
|
||||
import (
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
noAutoBrowser bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &noAutoBrowser, "auth-no-open-browser", "", false, "Do not automatically open auth link in default browser")
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "authorize",
|
||||
Short: `Remote authorization.`,
|
||||
Long: `
|
||||
Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.
|
||||
|
||||
Use the --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.`,
|
||||
rclone config.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 3, command, args)
|
||||
config.Authorize(args, noAutoBrowser)
|
||||
config.Authorize(args)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -23,16 +22,15 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.Int64VarP(cmdFlags, &head, "head", "", head, "Only print the first N characters.")
|
||||
flags.Int64VarP(cmdFlags, &tail, "tail", "", tail, "Only print the last N characters.")
|
||||
flags.Int64VarP(cmdFlags, &offset, "offset", "", offset, "Start printing at offset N (or from end if -ve).")
|
||||
flags.Int64VarP(cmdFlags, &count, "count", "", count, "Only print N characters.")
|
||||
flags.BoolVarP(cmdFlags, &discard, "discard", "", discard, "Discard the output instead of printing.")
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().Int64VarP(&head, "head", "", head, "Only print the first N characters.")
|
||||
commandDefintion.Flags().Int64VarP(&tail, "tail", "", tail, "Only print the last N characters.")
|
||||
commandDefintion.Flags().Int64VarP(&offset, "offset", "", offset, "Start printing at offset N (or from end if -ve).")
|
||||
commandDefintion.Flags().Int64VarP(&count, "count", "", count, "Only print N characters.")
|
||||
commandDefintion.Flags().BoolVarP(&discard, "discard", "", discard, "Discard the output instead of printing.")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "cat remote:path",
|
||||
Short: `Concatenates any files and sends them to stdout.`,
|
||||
Long: `
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -16,13 +15,12 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash.")
|
||||
flags.BoolVarP(cmdFlags, &oneway, "one-way", "", oneway, "Check one way only, source files must exist on remote")
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&download, "download", "", download, "Check by downloading rather than with hash.")
|
||||
commandDefintion.Flags().BoolVarP(&oneway, "one-way", "", oneway, "Check one way only, source files must exist on remote")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "check source:path dest:path",
|
||||
Short: `Checks the files in the source and destination match.`,
|
||||
Long: `
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "cleanup remote:path",
|
||||
Short: `Clean up the remote if possible`,
|
||||
Long: `
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user