mirror of
https://github.com/rclone/rclone.git
synced 2026-01-06 18:43:50 +00:00
Compare commits
87 Commits
darthShado
...
pr-4698-jo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d3c026fb7d | ||
|
|
4e2b5389d7 | ||
|
|
dc4e63631f | ||
|
|
275bf456d3 | ||
|
|
7dfa871095 | ||
|
|
70cc88de22 | ||
|
|
4bc0f46955 | ||
|
|
5b09599a23 | ||
|
|
f4dd8e3fe8 | ||
|
|
d0888edc0a | ||
|
|
51a230d7fd | ||
|
|
fc5b14b620 | ||
|
|
bbddadbd04 | ||
|
|
7428e47ebc | ||
|
|
72083c65ad | ||
|
|
70f92fd6b3 | ||
|
|
a86cedbc24 | ||
|
|
0906f8dd3b | ||
|
|
664213cedb | ||
|
|
75a7226174 | ||
|
|
9e925becb6 | ||
|
|
e3a5bb9b48 | ||
|
|
b7eeb0e260 | ||
|
|
84d64ddabc | ||
|
|
6c9f92aee6 | ||
|
|
893297760b | ||
|
|
c5c56cda02 | ||
|
|
2295123cad | ||
|
|
ff0280c0cb | ||
|
|
64d736a57b | ||
|
|
5f1d5a1897 | ||
|
|
aac2406e19 | ||
|
|
6dc28ef50a | ||
|
|
66def93373 | ||
|
|
c58023a9ba | ||
|
|
3edc9ff0b0 | ||
|
|
8e8ae1edc7 | ||
|
|
20b00db390 | ||
|
|
db4bbf9521 | ||
|
|
2b7994e739 | ||
|
|
e7fbdac8e0 | ||
|
|
41ec712aa9 | ||
|
|
17acae2b00 | ||
|
|
57261c7e97 | ||
|
|
d8239e0194 | ||
|
|
004c3796de | ||
|
|
18c7549770 | ||
|
|
e5190f14ce | ||
|
|
433b73a5a8 | ||
|
|
ab88a3341f | ||
|
|
181da3ce9b | ||
|
|
b14a58c9b8 | ||
|
|
60cc2cba1f | ||
|
|
c797494d88 | ||
|
|
e2a57182be | ||
|
|
8928441466 | ||
|
|
0e8965060f | ||
|
|
f3cf6fcdd7 | ||
|
|
18ccf0f871 | ||
|
|
313647bcf3 | ||
|
|
61fe068c90 | ||
|
|
5c49096e11 | ||
|
|
a73c78545d | ||
|
|
e0fd560711 | ||
|
|
6a56ac1032 | ||
|
|
96299629b4 | ||
|
|
75de30cfa8 | ||
|
|
233bed6a73 | ||
|
|
b3964efe4d | ||
|
|
575f061629 | ||
|
|
640d7d3b4e | ||
|
|
e92294b482 | ||
|
|
22937e8982 | ||
|
|
c3d1474eb9 | ||
|
|
e2426ea87b | ||
|
|
e58a61175f | ||
|
|
05bea46c3e | ||
|
|
c8a719ae0d | ||
|
|
c3884aafd9 | ||
|
|
0a9785a4ff | ||
|
|
8140f67092 | ||
|
|
4a001b8a02 | ||
|
|
525433e6dd | ||
|
|
f71f6c57d7 | ||
|
|
e35623c72e | ||
|
|
344bce7e2a | ||
|
|
3a4322a7ba |
@@ -86,7 +86,7 @@ git reset --soft HEAD~2 # This squashes the 2 latest commits together.
|
|||||||
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
|
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
|
||||||
git commit # Add a new commit message.
|
git commit # Add a new commit message.
|
||||||
git push --force # Push the squashed commit to your GitHub repo.
|
git push --force # Push the squashed commit to your GitHub repo.
|
||||||
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also reccommends wizardzines.com
|
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
|
||||||
```
|
```
|
||||||
|
|
||||||
## CI for your fork ##
|
## CI for your fork ##
|
||||||
|
|||||||
11
Makefile
11
Makefile
@@ -8,7 +8,8 @@ VERSION := $(shell cat VERSION)
|
|||||||
# Last tag on this branch
|
# Last tag on this branch
|
||||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||||
# Next version
|
# Next version
|
||||||
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
NEXT_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2+1,0}')
|
||||||
|
NEXT_PATCH_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2,$$3+1}')
|
||||||
# If we are working on a release, override branch to master
|
# If we are working on a release, override branch to master
|
||||||
ifdef RELEASE_TAG
|
ifdef RELEASE_TAG
|
||||||
BRANCH := master
|
BRANCH := master
|
||||||
@@ -246,5 +247,13 @@ startdev:
|
|||||||
echo "$(NEXT_VERSION)" > VERSION
|
echo "$(NEXT_VERSION)" > VERSION
|
||||||
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||||
|
|
||||||
|
startstable:
|
||||||
|
@echo "Version is $(VERSION)"
|
||||||
|
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
|
||||||
|
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||||
|
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
|
||||||
|
echo "$(NEXT_PATCH_VERSION)" > VERSION
|
||||||
|
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||||
|
|
||||||
winzip:
|
winzip:
|
||||||
zip -9 rclone-$(TAG).zip rclone.exe
|
zip -9 rclone-$(TAG).zip rclone.exe
|
||||||
|
|||||||
@@ -64,6 +64,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
|||||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||||
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
||||||
|
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||||
|
|||||||
57
RELEASE.md
57
RELEASE.md
@@ -9,7 +9,7 @@ This file describes how to make the various kinds of releases
|
|||||||
|
|
||||||
## Making a release
|
## Making a release
|
||||||
|
|
||||||
* git checkout master
|
* git checkout master # see below for stable branch
|
||||||
* git pull
|
* git pull
|
||||||
* git status - make sure everything is checked in
|
* git status - make sure everything is checked in
|
||||||
* Check GitHub actions build for master is Green
|
* Check GitHub actions build for master is Green
|
||||||
@@ -21,7 +21,7 @@ This file describes how to make the various kinds of releases
|
|||||||
* git status - to check for new man pages - git add them
|
* git status - to check for new man pages - git add them
|
||||||
* git commit -a -v -m "Version v1.XX.0"
|
* git commit -a -v -m "Version v1.XX.0"
|
||||||
* make retag
|
* make retag
|
||||||
* git push --tags origin master
|
* git push --follow-tags origin
|
||||||
* # Wait for the GitHub builds to complete then...
|
* # Wait for the GitHub builds to complete then...
|
||||||
* make fetch_binaries
|
* make fetch_binaries
|
||||||
* make tarball
|
* make tarball
|
||||||
@@ -31,7 +31,7 @@ This file describes how to make the various kinds of releases
|
|||||||
* make upload
|
* make upload
|
||||||
* make upload_website
|
* make upload_website
|
||||||
* make upload_github
|
* make upload_github
|
||||||
* make startdev
|
* make startdev # make startstable for stable branch
|
||||||
* # announce with forum post, twitter post, patreon post
|
* # announce with forum post, twitter post, patreon post
|
||||||
|
|
||||||
Early in the next release cycle update the dependencies
|
Early in the next release cycle update the dependencies
|
||||||
@@ -42,62 +42,35 @@ Early in the next release cycle update the dependencies
|
|||||||
* git add new files
|
* git add new files
|
||||||
* git commit -a -v
|
* git commit -a -v
|
||||||
|
|
||||||
If `make update` fails with errors like this:
|
|
||||||
|
|
||||||
```
|
|
||||||
# github.com/cpuguy83/go-md2man/md2man
|
|
||||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
|
|
||||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
|
|
||||||
```
|
|
||||||
|
|
||||||
Can be fixed with
|
|
||||||
|
|
||||||
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
|
|
||||||
* GO111MODULE=on go mod tidy
|
|
||||||
|
|
||||||
|
|
||||||
## Making a point release
|
## Making a point release
|
||||||
|
|
||||||
If rclone needs a point release due to some horrendous bug:
|
If rclone needs a point release due to some horrendous bug:
|
||||||
|
|
||||||
First make the release branch. If this is a second point release then
|
Set vars
|
||||||
this will be done already.
|
|
||||||
|
|
||||||
* BASE_TAG=v1.XX # eg v1.52
|
* BASE_TAG=v1.XX # eg v1.52
|
||||||
* NEW_TAG=${BASE_TAG}.Y # eg v1.52.1
|
* NEW_TAG=${BASE_TAG}.Y # eg v1.52.1
|
||||||
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
||||||
|
|
||||||
|
First make the release branch. If this is a second point release then
|
||||||
|
this will be done already.
|
||||||
|
|
||||||
* git branch ${BASE_TAG} ${BASE_TAG}-stable
|
* git branch ${BASE_TAG} ${BASE_TAG}-stable
|
||||||
|
* git co ${BASE_TAG}-stable
|
||||||
|
* make startstable
|
||||||
|
|
||||||
Now
|
Now
|
||||||
|
|
||||||
* FIXME this is now broken with new semver layout - needs fixing
|
|
||||||
* FIXME the TAG=${NEW_TAG} shouldn't be necessary any more
|
|
||||||
* git co ${BASE_TAG}-stable
|
* git co ${BASE_TAG}-stable
|
||||||
* git cherry-pick any fixes
|
* git cherry-pick any fixes
|
||||||
* Test (see above)
|
* Do the steps as above
|
||||||
* make NEXT_VERSION=${NEW_TAG} tag
|
* make startstable
|
||||||
* edit docs/content/changelog.md
|
|
||||||
* make TAG=${NEW_TAG} doc
|
|
||||||
* git commit -a -v -m "Version ${NEW_TAG}"
|
|
||||||
* git tag -d ${NEW_TAG}
|
|
||||||
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
|
|
||||||
* git push --tags -u origin ${BASE_TAG}-stable
|
|
||||||
* Wait for builds to complete
|
|
||||||
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
|
|
||||||
* make TAG=${NEW_TAG} tarball
|
|
||||||
* make TAG=${NEW_TAG} sign_upload
|
|
||||||
* make TAG=${NEW_TAG} check_sign
|
|
||||||
* make TAG=${NEW_TAG} upload
|
|
||||||
* make TAG=${NEW_TAG} upload_website
|
|
||||||
* make TAG=${NEW_TAG} upload_github
|
|
||||||
* NB this overwrites the current beta so we need to do this
|
* NB this overwrites the current beta so we need to do this
|
||||||
* git co master
|
* git co master
|
||||||
* make VERSION=${NEW_TAG} startdev
|
* # cherry pick the changes to the changelog
|
||||||
* # cherry pick the changes to the changelog and VERSION
|
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||||
* git checkout ${BASE_TAG}-stable VERSION docs/content/changelog.md
|
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||||
* git commit --amend
|
|
||||||
* git push
|
* git push
|
||||||
* Announce!
|
|
||||||
|
|
||||||
## Making a manual build of docker
|
## Making a manual build of docker
|
||||||
|
|
||||||
|
|||||||
@@ -1245,15 +1245,15 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
}
|
}
|
||||||
blob := o.getBlobReference()
|
blob := o.getBlobReference()
|
||||||
ac := azblob.BlobAccessConditions{}
|
ac := azblob.BlobAccessConditions{}
|
||||||
var dowloadResponse *azblob.DownloadResponse
|
var downloadResponse *azblob.DownloadResponse
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
dowloadResponse, err = blob.Download(ctx, offset, count, ac, false)
|
downloadResponse, err = blob.Download(ctx, offset, count, ac, false)
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to open for download")
|
return nil, errors.Wrap(err, "failed to open for download")
|
||||||
}
|
}
|
||||||
in = dowloadResponse.Body(azblob.RetryReaderOptions{})
|
in = downloadResponse.Body(azblob.RetryReaderOptions{})
|
||||||
return in, nil
|
return in, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1475,7 +1475,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
|
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
|
||||||
// is merged the SDK can't upload a single blob of exactly the chunk
|
// is merged the SDK can't upload a single blob of exactly the chunk
|
||||||
// size, so upload with a multpart upload to work around.
|
// size, so upload with a multipart upload to work around.
|
||||||
// See: https://github.com/rclone/rclone/issues/2653
|
// See: https://github.com/rclone/rclone/issues/2653
|
||||||
multipartUpload := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
multipartUpload := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||||
if size == int64(o.fs.opt.ChunkSize) {
|
if size == int64(o.fs.opt.ChunkSize) {
|
||||||
|
|||||||
@@ -1013,7 +1013,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
return info.SharedLink.URL, err
|
return info.SharedLink.URL, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// deletePermanently permenently deletes a trashed file
|
// deletePermanently permanently deletes a trashed file
|
||||||
func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "DELETE",
|
Method: "DELETE",
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// multpart upload for box
|
// multipart upload for box
|
||||||
|
|
||||||
package box
|
package box
|
||||||
|
|
||||||
|
|||||||
@@ -296,6 +296,8 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
ServerSideAcrossConfigs: true,
|
ServerSideAcrossConfigs: true,
|
||||||
}).Fill(f).Mask(baseFs).WrapsFs(f, baseFs)
|
}).Fill(f).Mask(baseFs).WrapsFs(f, baseFs)
|
||||||
|
|
||||||
|
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
|
||||||
|
|
||||||
return f, err
|
return f, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -958,6 +960,8 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
|
|||||||
}
|
}
|
||||||
info := f.wrapInfo(src, chunkRemote, size)
|
info := f.wrapInfo(src, chunkRemote, size)
|
||||||
|
|
||||||
|
// Refill chunkLimit and let basePut repeatedly call chunkingReader.Read()
|
||||||
|
c.chunkLimit = c.chunkSize
|
||||||
// TODO: handle range/limit options
|
// TODO: handle range/limit options
|
||||||
chunk, errChunk := basePut(ctx, wrapIn, info, options...)
|
chunk, errChunk := basePut(ctx, wrapIn, info, options...)
|
||||||
if errChunk != nil {
|
if errChunk != nil {
|
||||||
@@ -1166,10 +1170,14 @@ func (c *chunkingReader) updateHashes() {
|
|||||||
func (c *chunkingReader) Read(buf []byte) (bytesRead int, err error) {
|
func (c *chunkingReader) Read(buf []byte) (bytesRead int, err error) {
|
||||||
if c.chunkLimit <= 0 {
|
if c.chunkLimit <= 0 {
|
||||||
// Chunk complete - switch to next one.
|
// Chunk complete - switch to next one.
|
||||||
|
// Note #1:
|
||||||
// We might not get here because some remotes (eg. box multi-uploader)
|
// We might not get here because some remotes (eg. box multi-uploader)
|
||||||
// read the specified size exactly and skip the concluding EOF Read.
|
// read the specified size exactly and skip the concluding EOF Read.
|
||||||
// Then a check in the put loop will kick in.
|
// Then a check in the put loop will kick in.
|
||||||
c.chunkLimit = c.chunkSize
|
// Note #2:
|
||||||
|
// The crypt backend after receiving EOF here will call Read again
|
||||||
|
// and we must insist on returning EOF, so we postpone refilling
|
||||||
|
// chunkLimit to the main loop.
|
||||||
return 0, io.EOF
|
return 0, io.EOF
|
||||||
}
|
}
|
||||||
if int64(len(buf)) > c.chunkLimit {
|
if int64(len(buf)) > c.chunkLimit {
|
||||||
|
|||||||
@@ -147,7 +147,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
|||||||
// If salt is "" we use a fixed salt just to make attackers lives
|
// If salt is "" we use a fixed salt just to make attackers lives
|
||||||
// slighty harder than using no salt.
|
// slighty harder than using no salt.
|
||||||
//
|
//
|
||||||
// Note that empty passsword makes all 0x00 keys which is used in the
|
// Note that empty password makes all 0x00 keys which is used in the
|
||||||
// tests.
|
// tests.
|
||||||
func (c *Cipher) Key(password, salt string) (err error) {
|
func (c *Cipher) Key(password, salt string) (err error) {
|
||||||
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
|
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
|
||||||
@@ -633,11 +633,8 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
|
|||||||
}
|
}
|
||||||
// possibly err != nil here, but we will process the
|
// possibly err != nil here, but we will process the
|
||||||
// data and the next call to ReadFull will return 0, err
|
// data and the next call to ReadFull will return 0, err
|
||||||
// Write nonce to start of block
|
|
||||||
copy(fh.buf, fh.nonce[:])
|
|
||||||
// Encrypt the block using the nonce
|
// Encrypt the block using the nonce
|
||||||
block := fh.buf
|
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||||
secretbox.Seal(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
|
||||||
fh.bufIndex = 0
|
fh.bufIndex = 0
|
||||||
fh.bufSize = blockHeaderSize + n
|
fh.bufSize = blockHeaderSize + n
|
||||||
fh.nonce.increment()
|
fh.nonce.increment()
|
||||||
@@ -782,8 +779,7 @@ func (fh *decrypter) fillBuffer() (err error) {
|
|||||||
return ErrorEncryptedFileBadHeader
|
return ErrorEncryptedFileBadHeader
|
||||||
}
|
}
|
||||||
// Decrypt the block using the nonce
|
// Decrypt the block using the nonce
|
||||||
block := fh.buf
|
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||||
_, ok := secretbox.Open(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
|
||||||
if !ok {
|
if !ok {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err // return pending error as it is likely more accurate
|
return err // return pending error as it is likely more accurate
|
||||||
|
|||||||
@@ -159,7 +159,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if strings.HasPrefix(remote, name+":") {
|
if strings.HasPrefix(remote, name+":") {
|
||||||
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
||||||
}
|
}
|
||||||
// Make sure to remove trailing . reffering to the current dir
|
// Make sure to remove trailing . referring to the current dir
|
||||||
if path.Base(rpath) == "." {
|
if path.Base(rpath) == "." {
|
||||||
rpath = strings.TrimSuffix(rpath, ".")
|
rpath = strings.TrimSuffix(rpath, ".")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -87,7 +87,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// wrap the object in a crypt for upload using the nonce we
|
// wrap the object in a crypt for upload using the nonce we
|
||||||
// saved from the encryptor
|
// saved from the encrypter
|
||||||
src := f.newObjectInfo(oi, nonce)
|
src := f.newObjectInfo(oi, nonce)
|
||||||
|
|
||||||
// Test ObjectInfo methods
|
// Test ObjectInfo methods
|
||||||
|
|||||||
@@ -35,6 +35,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/rclone/rclone/fs/walk"
|
"github.com/rclone/rclone/fs/walk"
|
||||||
@@ -157,6 +158,17 @@ func driveScopesContainsAppFolder(scopes []string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func driveOAuthOptions() []fs.Option {
|
||||||
|
opts := []fs.Option{}
|
||||||
|
for _, opt := range oauthutil.SharedOptions {
|
||||||
|
if opt.Name == config.ConfigClientID {
|
||||||
|
opt.Help = "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance."
|
||||||
|
}
|
||||||
|
opts = append(opts, opt)
|
||||||
|
}
|
||||||
|
return opts
|
||||||
|
}
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
@@ -192,7 +204,7 @@ func init() {
|
|||||||
log.Fatalf("Failed to configure team drive: %v", err)
|
log.Fatalf("Failed to configure team drive: %v", err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
Options: append(driveOAuthOptions(), []fs.Option{{
|
||||||
Name: "scope",
|
Name: "scope",
|
||||||
Help: "Scope that rclone should use when requesting access from drive.",
|
Help: "Scope that rclone should use when requesting access from drive.",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
@@ -459,6 +471,21 @@ Note that this detection is relying on error message strings which
|
|||||||
Google don't document so it may break in the future.
|
Google don't document so it may break in the future.
|
||||||
|
|
||||||
See: https://github.com/rclone/rclone/issues/3857
|
See: https://github.com/rclone/rclone/issues/3857
|
||||||
|
`,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "stop_on_download_limit",
|
||||||
|
Default: false,
|
||||||
|
Help: `Make download limit errors be fatal
|
||||||
|
|
||||||
|
At the time of writing it is only possible to download 10TB of data from
|
||||||
|
Google Drive a day (this is an undocumented limit). When this limit is
|
||||||
|
reached Google Drive produces a slightly different error message. When
|
||||||
|
this flag is set it causes these errors to be fatal. These will stop
|
||||||
|
the in-progress sync.
|
||||||
|
|
||||||
|
Note that this detection is relying on error message strings which
|
||||||
|
Google don't document so it may break in the future.
|
||||||
`,
|
`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -528,6 +555,7 @@ type Options struct {
|
|||||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||||
DisableHTTP2 bool `config:"disable_http2"`
|
DisableHTTP2 bool `config:"disable_http2"`
|
||||||
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
|
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
|
||||||
|
StopOnDownloadLimit bool `config:"stop_on_download_limit"`
|
||||||
SkipShortcuts bool `config:"skip_shortcuts"`
|
SkipShortcuts bool `config:"skip_shortcuts"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
@@ -627,6 +655,9 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
|
|||||||
return false, fserrors.FatalError(err)
|
return false, fserrors.FatalError(err)
|
||||||
}
|
}
|
||||||
return true, err
|
return true, err
|
||||||
|
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
|
||||||
|
fs.Errorf(f, "Received download limit error: %v", err)
|
||||||
|
return false, fserrors.FatalError(err)
|
||||||
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
||||||
fs.Errorf(f, "Received team drive file limit error: %v", err)
|
fs.Errorf(f, "Received team drive file limit error: %v", err)
|
||||||
return false, fserrors.FatalError(err)
|
return false, fserrors.FatalError(err)
|
||||||
@@ -2014,10 +2045,10 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
|
|||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||||
case fs.ErrorObjectNotFound:
|
case fs.ErrorObjectNotFound:
|
||||||
// Not found so create it
|
// Not found so create it
|
||||||
return f.PutUnchecked(ctx, in, src, options...)
|
return f.PutUnchecked(ctx, in, src, options...)
|
||||||
@@ -2948,6 +2979,38 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
|
|||||||
return f.unTrash(ctx, dir, directoryID, true)
|
return f.unTrash(ctx, dir, directoryID, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// copy file with id to dest
|
||||||
|
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||||
|
info, err := f.getFile(id, f.fileFields)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "couldn't find id")
|
||||||
|
}
|
||||||
|
if info.MimeType == driveFolderType {
|
||||||
|
return errors.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
|
||||||
|
}
|
||||||
|
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
||||||
|
o, err := f.newObjectWithInfo(info.Name, info)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
destDir, destLeaf, err := fspath.Split(dest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if destLeaf == "" {
|
||||||
|
destLeaf = info.Name
|
||||||
|
}
|
||||||
|
dstFs, err := cache.Get(destDir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "copy failed")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{{
|
var commandHelp = []fs.CommandHelp{{
|
||||||
Name: "get",
|
Name: "get",
|
||||||
Short: "Get command for fetching the drive config parameters",
|
Short: "Get command for fetching the drive config parameters",
|
||||||
@@ -3048,6 +3111,29 @@ Result:
|
|||||||
"Errors": 0
|
"Errors": 0
|
||||||
}
|
}
|
||||||
`,
|
`,
|
||||||
|
}, {
|
||||||
|
Name: "copyid",
|
||||||
|
Short: "Copy files by ID",
|
||||||
|
Long: `This command copies files by ID
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
rclone backend copyid drive: ID path
|
||||||
|
rclone backend copyid drive: ID1 path1 ID2 path2
|
||||||
|
|
||||||
|
It copies the drive file with ID given to the path (an rclone path which
|
||||||
|
will be passed internally to rclone copyto). The ID and path pairs can be
|
||||||
|
repeated.
|
||||||
|
|
||||||
|
The path should end with a / to indicate copy the file as named to
|
||||||
|
this directory. If it doesn't end with a / then the last path
|
||||||
|
component will be used as the file name.
|
||||||
|
|
||||||
|
If the destination is a drive backend then server side copying will be
|
||||||
|
attempted if possible.
|
||||||
|
|
||||||
|
Use the -i flag to see what would be copied before copying.
|
||||||
|
`,
|
||||||
}}
|
}}
|
||||||
|
|
||||||
// Command the backend to run a named command
|
// Command the backend to run a named command
|
||||||
@@ -3119,6 +3205,19 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
dir = arg[0]
|
dir = arg[0]
|
||||||
}
|
}
|
||||||
return f.unTrashDir(ctx, dir, true)
|
return f.unTrashDir(ctx, dir, true)
|
||||||
|
case "copyid":
|
||||||
|
if len(arg)%2 != 0 {
|
||||||
|
return nil, errors.New("need an even number of arguments")
|
||||||
|
}
|
||||||
|
for len(arg) > 0 {
|
||||||
|
id, dest := arg[0], arg[1]
|
||||||
|
arg = arg[2:]
|
||||||
|
err = f.copyID(ctx, id, dest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed copying %q to %q", id, dest)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
default:
|
default:
|
||||||
return nil, fs.ErrorCommandNotFound
|
return nil, fs.ErrorCommandNotFound
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,8 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"mime"
|
"mime"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -272,14 +274,15 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// from fstest/fstests/fstests.go
|
||||||
|
existingDir = "hello? sausage"
|
||||||
|
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
|
||||||
|
existingSubDir = "êé"
|
||||||
|
)
|
||||||
|
|
||||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts
|
// TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts
|
||||||
func (f *Fs) InternalTestShortcuts(t *testing.T) {
|
func (f *Fs) InternalTestShortcuts(t *testing.T) {
|
||||||
const (
|
|
||||||
// from fstest/fstests/fstests.go
|
|
||||||
existingDir = "hello? sausage"
|
|
||||||
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
|
|
||||||
existingSubDir = "êé"
|
|
||||||
)
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
srcObj, err := f.NewObject(ctx, existingFile)
|
srcObj, err := f.NewObject(ctx, existingFile)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -408,6 +411,55 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
|||||||
require.NoError(t, f.Purge(ctx, "trashDir"))
|
require.NoError(t, f.Purge(ctx, "trashDir"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
|
||||||
|
func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
obj, err := f.NewObject(ctx, existingFile)
|
||||||
|
require.NoError(t, err)
|
||||||
|
o := obj.(*Object)
|
||||||
|
|
||||||
|
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
_ = os.RemoveAll(dir)
|
||||||
|
}()
|
||||||
|
|
||||||
|
checkFile := func(name string) {
|
||||||
|
filePath := filepath.Join(dir, name)
|
||||||
|
fi, err := os.Stat(filePath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, int64(100), fi.Size())
|
||||||
|
err = os.Remove(filePath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("BadID", func(t *testing.T) {
|
||||||
|
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.Contains(t, err.Error(), "couldn't find id")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Directory", func(t *testing.T) {
|
||||||
|
rootID, err := f.dirCache.RootID(ctx, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = f.copyID(ctx, rootID, dir+"/")
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.Contains(t, err.Error(), "can't copy directory")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("WithoutDestName", func(t *testing.T) {
|
||||||
|
err = f.copyID(ctx, o.id, dir+"/")
|
||||||
|
require.NoError(t, err)
|
||||||
|
checkFile(path.Base(existingFile))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("WithDestName", func(t *testing.T) {
|
||||||
|
err = f.copyID(ctx, o.id, dir+"/potato.txt")
|
||||||
|
require.NoError(t, err)
|
||||||
|
checkFile("potato.txt")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
// These tests all depend on each other so run them as nested tests
|
// These tests all depend on each other so run them as nested tests
|
||||||
t.Run("DocumentImport", func(t *testing.T) {
|
t.Run("DocumentImport", func(t *testing.T) {
|
||||||
@@ -424,6 +476,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
|||||||
})
|
})
|
||||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||||
|
t.Run("CopyID", f.InternalTestCopyID)
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
var _ fstests.InternalTester = (*Fs)(nil)
|
||||||
|
|||||||
@@ -142,6 +142,31 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
|||||||
Help: "Impersonate this user when using a business account.",
|
Help: "Impersonate this user when using a business account.",
|
||||||
Default: "",
|
Default: "",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "shared_files",
|
||||||
|
Help: `Instructs rclone to work on individual shared files.
|
||||||
|
|
||||||
|
In this mode rclone's features are extremely limited - only list (ls, lsl, etc.)
|
||||||
|
operations and read operations (e.g. downloading) are supported in this mode.
|
||||||
|
All other operations will be disabled.`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "shared_folders",
|
||||||
|
Help: `Instructs rclone to work on shared folders.
|
||||||
|
|
||||||
|
When this flag is used with no path only the List operation is supported and
|
||||||
|
all available shared folders will be listed. If you specify a path the first part
|
||||||
|
will be interpreted as the name of shared folder. Rclone will then try to mount this
|
||||||
|
shared to the root namespace. On success shared folder rclone proceeds normally.
|
||||||
|
The shared folder is now pretty much a normal folder and all normal operations
|
||||||
|
are supported.
|
||||||
|
|
||||||
|
Note that we don't unmount the shared folder afterwards so the
|
||||||
|
--dropbox-shared-folders can be omitted after the first use of a particular
|
||||||
|
shared folder.`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -161,9 +186,11 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
|||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
Impersonate string `config:"impersonate"`
|
Impersonate string `config:"impersonate"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
SharedFiles bool `config:"shared_files"`
|
||||||
|
SharedFolders bool `config:"shared_folders"`
|
||||||
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote dropbox server
|
// Fs represents a remote dropbox server
|
||||||
@@ -186,7 +213,9 @@ type Fs struct {
|
|||||||
//
|
//
|
||||||
// Dropbox Objects always have full metadata
|
// Dropbox Objects always have full metadata
|
||||||
type Object struct {
|
type Object struct {
|
||||||
fs *Fs // what this object is part of
|
fs *Fs // what this object is part of
|
||||||
|
id string
|
||||||
|
url string
|
||||||
remote string // The remote path
|
remote string // The remote path
|
||||||
bytes int64 // size of the object
|
bytes int64 // size of the object
|
||||||
modTime time.Time // time it was last modified
|
modTime time.Time // time it was last modified
|
||||||
@@ -332,8 +361,60 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(f)
|
})
|
||||||
f.setRoot(root)
|
|
||||||
|
// do not fill features yet
|
||||||
|
if f.opt.SharedFiles {
|
||||||
|
f.setRoot(root)
|
||||||
|
if f.root == "" {
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
_, err := f.findSharedFile(f.root)
|
||||||
|
f.root = ""
|
||||||
|
if err == nil {
|
||||||
|
return f, fs.ErrorIsFile
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.opt.SharedFolders {
|
||||||
|
f.setRoot(root)
|
||||||
|
if f.root == "" {
|
||||||
|
return f, nil // our root it empty so we probably want to list shared folders
|
||||||
|
}
|
||||||
|
|
||||||
|
dir := path.Dir(f.root)
|
||||||
|
if dir == "." {
|
||||||
|
dir = f.root
|
||||||
|
}
|
||||||
|
|
||||||
|
// root is not empty so we have find the right shared folder if it exists
|
||||||
|
id, err := f.findSharedFolder(dir)
|
||||||
|
if err != nil {
|
||||||
|
// if we didn't find the specified shared folder we have to bail out here
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// we found the specified shared folder so let's mount it
|
||||||
|
// this will add it to the users normal root namespace and allows us
|
||||||
|
// to actually perform operations on it using the normal api endpoints.
|
||||||
|
err = f.mountSharedFolder(id)
|
||||||
|
if err != nil {
|
||||||
|
switch e := err.(type) {
|
||||||
|
case sharing.MountFolderAPIError:
|
||||||
|
if e.EndpointError == nil || (e.EndpointError != nil && e.EndpointError.Tag != sharing.MountFolderErrorAlreadyMounted) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// if the moint failed we have to abort here
|
||||||
|
}
|
||||||
|
// if the mount succeeded it's now a normal folder in the users root namespace
|
||||||
|
// we disable shared folder mode and proceed normally
|
||||||
|
f.opt.SharedFolders = false
|
||||||
|
}
|
||||||
|
|
||||||
|
f.features.Fill(f)
|
||||||
|
|
||||||
// If root starts with / then use the actual root
|
// If root starts with / then use the actual root
|
||||||
if strings.HasPrefix(root, "/") {
|
if strings.HasPrefix(root, "/") {
|
||||||
@@ -355,6 +436,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
fs.Debugf(f, "Using root namespace %q", f.ns)
|
fs.Debugf(f, "Using root namespace %q", f.ns)
|
||||||
}
|
}
|
||||||
|
f.setRoot(root)
|
||||||
|
|
||||||
// See if the root is actually an object
|
// See if the root is actually an object
|
||||||
_, err = f.getFileMetadata(f.slashRoot)
|
_, err = f.getFileMetadata(f.slashRoot)
|
||||||
@@ -465,9 +547,150 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
|
|||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
|
if f.opt.SharedFiles {
|
||||||
|
return f.findSharedFile(remote)
|
||||||
|
}
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// listSharedFoldersApi lists all available shared folders mounted and not mounted
|
||||||
|
// we'll need the id later so we have to return them in original format
|
||||||
|
func (f *Fs) listSharedFolders() (entries fs.DirEntries, err error) {
|
||||||
|
started := false
|
||||||
|
var res *sharing.ListFoldersResult
|
||||||
|
for {
|
||||||
|
if !started {
|
||||||
|
arg := sharing.ListFoldersArgs{
|
||||||
|
Limit: 100,
|
||||||
|
}
|
||||||
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
|
res, err = f.sharing.ListFolders(&arg)
|
||||||
|
return shouldRetry(err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
started = true
|
||||||
|
} else {
|
||||||
|
arg := sharing.ListFoldersContinueArg{
|
||||||
|
Cursor: res.Cursor,
|
||||||
|
}
|
||||||
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
|
res, err = f.sharing.ListFoldersContinue(&arg)
|
||||||
|
return shouldRetry(err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "list continue")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, entry := range res.Entries {
|
||||||
|
leaf := f.opt.Enc.ToStandardName(entry.Name)
|
||||||
|
d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
|
||||||
|
entries = append(entries, d)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if res.Cursor == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// findSharedFolder find the id for a given shared folder name
|
||||||
|
// somewhat annoyingly there is no endpoint to query a shared folder by it's name
|
||||||
|
// so our only option is to iterate over all shared folders
|
||||||
|
func (f *Fs) findSharedFolder(name string) (id string, err error) {
|
||||||
|
entries, err := f.listSharedFolders()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.(*fs.Dir).Remote() == name {
|
||||||
|
return entry.(*fs.Dir).ID(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// mountSharedFolders mount a shared folder to the root namespace
|
||||||
|
func (f *Fs) mountSharedFolder(id string) error {
|
||||||
|
arg := sharing.MountFolderArg{
|
||||||
|
SharedFolderId: id,
|
||||||
|
}
|
||||||
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
|
_, err := f.sharing.MountFolder(&arg)
|
||||||
|
return shouldRetry(err)
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// listSharedFolders lists shared the user as access to (note this means individual
|
||||||
|
// files not files contained in shared folders)
|
||||||
|
func (f *Fs) listReceivedFiles() (entries fs.DirEntries, err error) {
|
||||||
|
started := false
|
||||||
|
var res *sharing.ListFilesResult
|
||||||
|
for {
|
||||||
|
if !started {
|
||||||
|
arg := sharing.ListFilesArg{
|
||||||
|
Limit: 100,
|
||||||
|
}
|
||||||
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
|
res, err = f.sharing.ListReceivedFiles(&arg)
|
||||||
|
return shouldRetry(err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
started = true
|
||||||
|
} else {
|
||||||
|
arg := sharing.ListFilesContinueArg{
|
||||||
|
Cursor: res.Cursor,
|
||||||
|
}
|
||||||
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
|
res, err = f.sharing.ListReceivedFilesContinue(&arg)
|
||||||
|
return shouldRetry(err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "list continue")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, entry := range res.Entries {
|
||||||
|
fmt.Printf("%+v\n", entry)
|
||||||
|
entryPath := entry.Name
|
||||||
|
o := &Object{
|
||||||
|
fs: f,
|
||||||
|
url: entry.PreviewUrl,
|
||||||
|
remote: entryPath,
|
||||||
|
modTime: entry.TimeInvited,
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
entries = append(entries, o)
|
||||||
|
}
|
||||||
|
if res.Cursor == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) findSharedFile(name string) (o *Object, err error) {
|
||||||
|
files, err := f.listReceivedFiles()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, entry := range files {
|
||||||
|
if entry.(*Object).remote == name {
|
||||||
|
return entry.(*Object), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
|
||||||
// List the objects and directories in dir into entries. The
|
// List the objects and directories in dir into entries. The
|
||||||
// entries can be returned in any order but should be for a
|
// entries can be returned in any order but should be for a
|
||||||
// complete directory.
|
// complete directory.
|
||||||
@@ -478,6 +701,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
if f.opt.SharedFiles {
|
||||||
|
return f.listReceivedFiles()
|
||||||
|
}
|
||||||
|
if f.opt.SharedFolders {
|
||||||
|
return f.listSharedFolders()
|
||||||
|
}
|
||||||
|
|
||||||
root := f.slashRoot
|
root := f.slashRoot
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
root += "/" + dir
|
root += "/" + dir
|
||||||
@@ -541,7 +771,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
|
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
|
||||||
remote := path.Join(dir, leaf)
|
remote := path.Join(dir, leaf)
|
||||||
if folderInfo != nil {
|
if folderInfo != nil {
|
||||||
d := fs.NewDir(remote, time.Now())
|
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
} else if fileInfo != nil {
|
} else if fileInfo != nil {
|
||||||
o, err := f.newObjectWithInfo(remote, fileInfo)
|
o, err := f.newObjectWithInfo(remote, fileInfo)
|
||||||
@@ -564,6 +794,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
|
if f.opt.SharedFiles || f.opt.SharedFolders {
|
||||||
|
return nil, fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||||
|
}
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
@@ -579,6 +812,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
// Mkdir creates the container if it doesn't exist
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
|
if f.opt.SharedFiles || f.opt.SharedFolders {
|
||||||
|
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||||
|
}
|
||||||
root := path.Join(f.slashRoot, dir)
|
root := path.Join(f.slashRoot, dir)
|
||||||
|
|
||||||
// can't create or run metadata on root
|
// can't create or run metadata on root
|
||||||
@@ -656,6 +892,9 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
|||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
|
if f.opt.SharedFiles || f.opt.SharedFolders {
|
||||||
|
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||||
|
}
|
||||||
return f.purgeCheck(ctx, dir, true)
|
return f.purgeCheck(ctx, dir, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -927,8 +1166,16 @@ func (o *Object) Remote() string {
|
|||||||
return o.remote
|
return o.remote
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ID returns the object id
|
||||||
|
func (o *Object) ID() string {
|
||||||
|
return o.id
|
||||||
|
}
|
||||||
|
|
||||||
// Hash returns the dropbox special hash
|
// Hash returns the dropbox special hash
|
||||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
|
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
|
||||||
|
return "", fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||||
|
}
|
||||||
if t != DbHashType {
|
if t != DbHashType {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@@ -946,8 +1193,9 @@ func (o *Object) Size() int64 {
|
|||||||
|
|
||||||
// setMetadataFromEntry sets the fs data from a files.FileMetadata
|
// setMetadataFromEntry sets the fs data from a files.FileMetadata
|
||||||
//
|
//
|
||||||
// This isn't a complete set of metadata and has an inacurate date
|
// This isn't a complete set of metadata and has an inaccurate date
|
||||||
func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
|
func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
|
||||||
|
o.id = info.Id
|
||||||
o.bytes = int64(info.Size)
|
o.bytes = int64(info.Size)
|
||||||
o.modTime = info.ClientModified
|
o.modTime = info.ClientModified
|
||||||
o.hash = info.ContentHash
|
o.hash = info.ContentHash
|
||||||
@@ -1016,10 +1264,27 @@ func (o *Object) Storable() bool {
|
|||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
|
if o.fs.opt.SharedFiles {
|
||||||
|
if len(options) != 0 {
|
||||||
|
return nil, errors.New("OpenOptions not supported for shared files")
|
||||||
|
}
|
||||||
|
arg := sharing.GetSharedLinkMetadataArg{
|
||||||
|
Url: o.url,
|
||||||
|
}
|
||||||
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
_, in, err = o.fs.sharing.GetSharedLinkFile(&arg)
|
||||||
|
return shouldRetry(err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
fs.FixRangeOption(options, o.bytes)
|
fs.FixRangeOption(options, o.bytes)
|
||||||
headers := fs.OpenOptionHeaders(options)
|
headers := fs.OpenOptionHeaders(options)
|
||||||
arg := files.DownloadArg{
|
arg := files.DownloadArg{
|
||||||
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
|
Path: o.id,
|
||||||
ExtraHeaders: headers,
|
ExtraHeaders: headers,
|
||||||
}
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
@@ -1153,6 +1418,9 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
|||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
|
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
|
||||||
|
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||||
|
}
|
||||||
remote := o.remotePath()
|
remote := o.remotePath()
|
||||||
if ignoredFiles.MatchString(remote) {
|
if ignoredFiles.MatchString(remote) {
|
||||||
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
||||||
@@ -1181,6 +1449,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||||
|
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
|
||||||
|
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||||
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
|
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
|
||||||
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
|
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
|
||||||
@@ -1201,4 +1472,5 @@ var (
|
|||||||
_ fs.DirMover = (*Fs)(nil)
|
_ fs.DirMover = (*Fs)(nil)
|
||||||
_ fs.Abouter = (*Fs)(nil)
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
|
_ fs.IDer = (*Object)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -306,10 +306,10 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
// will return the object and the error, otherwise will return
|
// will return the object and the error, otherwise will return
|
||||||
// nil and the error
|
// nil and the error
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||||
case fs.ErrorObjectNotFound:
|
case fs.ErrorObjectNotFound:
|
||||||
// Not found so create it
|
// Not found so create it
|
||||||
return f.PutUnchecked(ctx, in, src, options...)
|
return f.PutUnchecked(ctx, in, src, options...)
|
||||||
@@ -323,7 +323,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
// This will create a duplicate if we upload a new file without
|
// This will create a duplicate if we upload a new file without
|
||||||
// checking to see if there is one already - use Put() for that.
|
// checking to see if there is one already - use Put() for that.
|
||||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
if size > int64(100e9) {
|
if size > int64(300e9) {
|
||||||
return nil, errors.New("File too big, cant upload")
|
return nil, errors.New("File too big, cant upload")
|
||||||
} else if size == 0 {
|
} else if size == 0 {
|
||||||
return nil, fs.ErrorCantUploadEmptyFiles
|
return nil, fs.ErrorCantUploadEmptyFiles
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"io"
|
"io"
|
||||||
"net/textproto"
|
"net/textproto"
|
||||||
"os"
|
|
||||||
"path"
|
"path"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -22,10 +21,15 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
|
"github.com/rclone/rclone/lib/env"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
currentUser = env.CurrentUser()
|
||||||
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
@@ -42,7 +46,7 @@ func init() {
|
|||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "user",
|
Name: "user",
|
||||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
Help: "FTP username, leave blank for current username, " + currentUser,
|
||||||
}, {
|
}, {
|
||||||
Name: "port",
|
Name: "port",
|
||||||
Help: "FTP port, leave blank to use default (21)",
|
Help: "FTP port, leave blank to use default (21)",
|
||||||
@@ -311,7 +315,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
|||||||
}
|
}
|
||||||
user := opt.User
|
user := opt.User
|
||||||
if user == "" {
|
if user == "" {
|
||||||
user = os.Getenv("USER")
|
user = currentUser
|
||||||
}
|
}
|
||||||
port := opt.Port
|
port := opt.Port
|
||||||
if port == "" {
|
if port == "" {
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ func TestIntegration(t *testing.T) {
|
|||||||
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
||||||
})
|
})
|
||||||
|
|
||||||
// Check it is there in the date/month/year heirachy
|
// Check it is there in the date/month/year hierarchy
|
||||||
// 2013-07-13 is the creation date of the folder
|
// 2013-07-13 is the creation date of the folder
|
||||||
checkPresent := func(t *testing.T, objPath string) {
|
checkPresent := func(t *testing.T, objPath string) {
|
||||||
entries, err := f.List(ctx, objPath)
|
entries, err := f.List(ctx, objPath)
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ package hubic
|
|||||||
|
|
||||||
// This uses the normal swift mechanism to update the credentials and
|
// This uses the normal swift mechanism to update the credentials and
|
||||||
// ignores the expires field returned by the Hubic API. This may need
|
// ignores the expires field returned by the Hubic API. This may need
|
||||||
// to be revisted after some actual experience.
|
// to be revisited after some actual experience.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|||||||
@@ -153,9 +153,9 @@ type CustomerInfo struct {
|
|||||||
AccountType string `json:"account_type"`
|
AccountType string `json:"account_type"`
|
||||||
SubscriptionType string `json:"subscription_type"`
|
SubscriptionType string `json:"subscription_type"`
|
||||||
Usage int64 `json:"usage"`
|
Usage int64 `json:"usage"`
|
||||||
Qouta int64 `json:"quota"`
|
Quota int64 `json:"quota"`
|
||||||
BusinessUsage int64 `json:"business_usage"`
|
BusinessUsage int64 `json:"business_usage"`
|
||||||
BusinessQouta int64 `json:"business_quota"`
|
BusinessQuota int64 `json:"business_quota"`
|
||||||
WriteLocked bool `json:"write_locked"`
|
WriteLocked bool `json:"write_locked"`
|
||||||
ReadLocked bool `json:"read_locked"`
|
ReadLocked bool `json:"read_locked"`
|
||||||
LockedCause interface{} `json:"locked_cause"`
|
LockedCause interface{} `json:"locked_cause"`
|
||||||
@@ -386,7 +386,7 @@ type Error struct {
|
|||||||
Cause string `xml:"cause"`
|
Cause string `xml:"cause"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error returns a string for the error and statistifes the error interface
|
// Error returns a string for the error and satisfies the error interface
|
||||||
func (e *Error) Error() string {
|
func (e *Error) Error() string {
|
||||||
out := fmt.Sprintf("error %d", e.StatusCode)
|
out := fmt.Sprintf("error %d", e.StatusCode)
|
||||||
if e.Message != "" {
|
if e.Message != "" {
|
||||||
|
|||||||
@@ -107,7 +107,7 @@ func init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Use legacy authentification?.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n")
|
fmt.Printf("Use legacy authentication?.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n")
|
||||||
if config.Confirm(false) {
|
if config.Confirm(false) {
|
||||||
v1config(ctx, name, m)
|
v1config(ctx, name, m)
|
||||||
} else {
|
} else {
|
||||||
@@ -230,7 +230,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
|||||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||||
}
|
}
|
||||||
|
|
||||||
// v1config configure a jottacloud backend using legacy authentification
|
// v1config configure a jottacloud backend using legacy authentication
|
||||||
func v1config(ctx context.Context, name string, m configmap.Mapper) {
|
func v1config(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||||
|
|
||||||
@@ -323,7 +323,7 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
|
|||||||
return deviceRegistration, err
|
return deviceRegistration, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// doAuthV1 runs the actual token request for V1 authentification
|
// doAuthV1 runs the actual token request for V1 authentication
|
||||||
func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
|
func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
|
||||||
// prepare out token request with username and password
|
// prepare out token request with username and password
|
||||||
values := url.Values{}
|
values := url.Values{}
|
||||||
@@ -365,7 +365,7 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
|
|||||||
return token, err
|
return token, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// v2config configure a jottacloud backend using the modern JottaCli token based authentification
|
// v2config configure a jottacloud backend using the modern JottaCli token based authentication
|
||||||
func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||||
|
|
||||||
@@ -373,6 +373,9 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
|||||||
fmt.Printf("Login Token> ")
|
fmt.Printf("Login Token> ")
|
||||||
loginToken := config.ReadLine()
|
loginToken := config.ReadLine()
|
||||||
|
|
||||||
|
m.Set(configClientID, "jottacli")
|
||||||
|
m.Set(configClientSecret, "")
|
||||||
|
|
||||||
token, err := doAuthV2(ctx, srv, loginToken, m)
|
token, err := doAuthV2(ctx, srv, loginToken, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to get oauth token: %s", err)
|
log.Fatalf("Failed to get oauth token: %s", err)
|
||||||
@@ -384,7 +387,6 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
|||||||
|
|
||||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||||
if config.Confirm(false) {
|
if config.Confirm(false) {
|
||||||
oauthConfig.ClientID = "jottacli"
|
|
||||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||||
@@ -403,7 +405,7 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
|||||||
m.Set("configVersion", strconv.Itoa(configVersion))
|
m.Set("configVersion", strconv.Itoa(configVersion))
|
||||||
}
|
}
|
||||||
|
|
||||||
// doAuthV2 runs the actual token request for V2 authentification
|
// doAuthV2 runs the actual token request for V2 authentication
|
||||||
func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
|
func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
|
||||||
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
|
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -551,7 +553,7 @@ func (f *Fs) setEndpointURL() {
|
|||||||
if f.opt.Mountpoint == "" {
|
if f.opt.Mountpoint == "" {
|
||||||
f.opt.Mountpoint = defaultMountpoint
|
f.opt.Mountpoint = defaultMountpoint
|
||||||
}
|
}
|
||||||
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
|
f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
// readMetaDataForPath reads the metadata from the path
|
// readMetaDataForPath reads the metadata from the path
|
||||||
@@ -1087,8 +1089,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||||
retry, _ := shouldRetry(resp, err)
|
return shouldRetry(resp, err)
|
||||||
return (retry && resp.StatusCode != 500), err
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1192,18 +1193,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
|
|
||||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
||||||
|
|
||||||
// surprise! jottacloud fucked up dirmove - the api spits out an error but
|
|
||||||
// dir gets moved regardless
|
|
||||||
if apiErr, ok := err.(*api.Error); ok {
|
|
||||||
if apiErr.StatusCode == 500 {
|
|
||||||
_, err := f.NewObject(ctx, dstRemote)
|
|
||||||
if err == fs.ErrorNotAFile {
|
|
||||||
log.Printf("FIXME: ignoring DirMove error - move succeeded anyway\n")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't move directory")
|
return errors.Wrap(err, "couldn't move directory")
|
||||||
}
|
}
|
||||||
@@ -1523,7 +1512,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the file state is INCOMPLETE and CORRPUT, try to upload a then
|
// If the file state is INCOMPLETE and CORRUPT, try to upload a then
|
||||||
if response.State != "COMPLETED" {
|
if response.State != "COMPLETED" {
|
||||||
// how much do we still have to upload?
|
// how much do we still have to upload?
|
||||||
remainingBytes := size - response.ResumePos
|
remainingBytes := size - response.ResumePos
|
||||||
|
|||||||
@@ -1213,7 +1213,7 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
|||||||
// Set the file to be a sparse file (important on Windows)
|
// Set the file to be a sparse file (important on Windows)
|
||||||
err = file.SetSparse(out)
|
err = file.SetSparse(out)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Failed to set sparse: %v", err)
|
fs.Errorf(o, "Failed to set sparse: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1231,6 +1231,15 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
|||||||
o.modTime = info.ModTime()
|
o.modTime = info.ModTime()
|
||||||
o.mode = info.Mode()
|
o.mode = info.Mode()
|
||||||
o.fs.objectMetaMu.Unlock()
|
o.fs.objectMetaMu.Unlock()
|
||||||
|
// On Windows links read as 0 size so set the correct size here
|
||||||
|
if runtime.GOOS == "windows" && o.translatedLink {
|
||||||
|
linkdst, err := os.Readlink(o.path)
|
||||||
|
if err != nil {
|
||||||
|
fs.Errorf(o, "Failed to read link size: %v", err)
|
||||||
|
} else {
|
||||||
|
o.size = int64(len(linkdst))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stat an Object into info
|
// Stat an Object into info
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -89,9 +88,6 @@ func TestSymlink(t *testing.T) {
|
|||||||
|
|
||||||
// Object viewed as symlink
|
// Object viewed as symlink
|
||||||
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
file2.Size = 0 // symlinks are 0 length under Windows
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object viewed as destination
|
// Object viewed as destination
|
||||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||||
@@ -121,9 +117,6 @@ func TestSymlink(t *testing.T) {
|
|||||||
// Create a symlink
|
// Create a symlink
|
||||||
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
||||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
file3.Size = 0 // symlinks are 0 length under Windows
|
|
||||||
}
|
|
||||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||||
if haveLChtimes {
|
if haveLChtimes {
|
||||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||||
@@ -142,9 +135,7 @@ func TestSymlink(t *testing.T) {
|
|||||||
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
|
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
||||||
if runtime.GOOS != "windows" {
|
assert.Equal(t, int64(8), o.Size())
|
||||||
assert.Equal(t, int64(8), o.Size())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check that NewObject doesn't see the non suffixed version
|
// Check that NewObject doesn't see the non suffixed version
|
||||||
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
|
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
|
||||||
|
|||||||
@@ -117,7 +117,7 @@ type ListItem struct {
|
|||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Home string `json:"home"`
|
Home string `json:"home"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
Mtime int64 `json:"mtime,omitempty"`
|
Mtime uint64 `json:"mtime,omitempty"`
|
||||||
Hash string `json:"hash,omitempty"`
|
Hash string `json:"hash,omitempty"`
|
||||||
VirusScan string `json:"virus_scan,omitempty"`
|
VirusScan string `json:"virus_scan,omitempty"`
|
||||||
Tree string `json:"tree,omitempty"`
|
Tree string `json:"tree,omitempty"`
|
||||||
@@ -159,71 +159,6 @@ type FolderInfoResponse struct {
|
|||||||
Email string `json:"email"`
|
Email string `json:"email"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShardInfoResponse ...
|
|
||||||
type ShardInfoResponse struct {
|
|
||||||
Email string `json:"email"`
|
|
||||||
Body struct {
|
|
||||||
Video []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"video"`
|
|
||||||
ViewDirect []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"view_direct"`
|
|
||||||
WeblinkView []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"weblink_view"`
|
|
||||||
WeblinkVideo []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"weblink_video"`
|
|
||||||
WeblinkGet []struct {
|
|
||||||
Count int `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"weblink_get"`
|
|
||||||
Stock []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"stock"`
|
|
||||||
WeblinkThumbnails []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"weblink_thumbnails"`
|
|
||||||
PublicUpload []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"public_upload"`
|
|
||||||
Auth []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"auth"`
|
|
||||||
Web []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"web"`
|
|
||||||
View []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"view"`
|
|
||||||
Upload []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"upload"`
|
|
||||||
Get []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"get"`
|
|
||||||
Thumbnails []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"thumbnails"`
|
|
||||||
} `json:"body"`
|
|
||||||
Time int64 `json:"time"`
|
|
||||||
Status int `json:"status"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CleanupResponse ...
|
// CleanupResponse ...
|
||||||
type CleanupResponse struct {
|
type CleanupResponse struct {
|
||||||
Email string `json:"email"`
|
Email string `json:"email"`
|
||||||
|
|||||||
@@ -37,6 +37,7 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
|
"github.com/rclone/rclone/lib/readers"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@@ -191,7 +192,7 @@ This option must not be used by an ordinary user. It is intended only to
|
|||||||
facilitate remote troubleshooting of backend issues. Strict meaning of
|
facilitate remote troubleshooting of backend issues. Strict meaning of
|
||||||
flags is not documented and not guaranteed to persist between releases.
|
flags is not documented and not guaranteed to persist between releases.
|
||||||
Quirks will be removed when the backend grows stable.
|
Quirks will be removed when the backend grows stable.
|
||||||
Supported quirks: atomicmkdir binlist gzip insecure retry400`,
|
Supported quirks: atomicmkdir binlist`,
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -237,9 +238,6 @@ func shouldRetry(res *http.Response, err error, f *Fs, opts *rest.Opts) (bool, e
|
|||||||
reAuthErr := f.reAuthorize(opts, err)
|
reAuthErr := f.reAuthorize(opts, err)
|
||||||
return reAuthErr == nil, err // return an original error
|
return reAuthErr == nil, err // return an original error
|
||||||
}
|
}
|
||||||
if res != nil && res.StatusCode == 400 && f.quirks.retry400 {
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
|
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -275,7 +273,7 @@ type Fs struct {
|
|||||||
root string // root path
|
root string // root path
|
||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
speedupGlobs []string // list of file name patterns eligible for speedup
|
speedupGlobs []string // list of file name patterns eligible for speedup
|
||||||
speedupAny bool // true if all file names are aligible for speedup
|
speedupAny bool // true if all file names are eligible for speedup
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
srv *rest.Client // REST API client
|
srv *rest.Client // REST API client
|
||||||
cli *http.Client // underlying HTTP client (for authorize)
|
cli *http.Client // underlying HTTP client (for authorize)
|
||||||
@@ -341,7 +339,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if opt.UserAgent != "" {
|
if opt.UserAgent != "" {
|
||||||
clientConfig.UserAgent = opt.UserAgent
|
clientConfig.UserAgent = opt.UserAgent
|
||||||
}
|
}
|
||||||
clientConfig.NoGzip = !f.quirks.gzip // Send not "Accept-Encoding: gzip" like official client
|
clientConfig.NoGzip = true // Mimic official client, skip sending "Accept-Encoding: gzip"
|
||||||
f.cli = fshttp.NewClient(&clientConfig)
|
f.cli = fshttp.NewClient(&clientConfig)
|
||||||
|
|
||||||
f.srv = rest.NewClient(f.cli)
|
f.srv = rest.NewClient(f.cli)
|
||||||
@@ -349,12 +347,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
f.srv.SetHeader("Accept", "*/*") // Send "Accept: */*" with every request like official client
|
f.srv.SetHeader("Accept", "*/*") // Send "Accept: */*" with every request like official client
|
||||||
f.srv.SetErrorHandler(errorHandler)
|
f.srv.SetErrorHandler(errorHandler)
|
||||||
|
|
||||||
if f.quirks.insecure {
|
|
||||||
transport := f.cli.Transport.(*fshttp.Transport).Transport
|
|
||||||
transport.TLSClientConfig.InsecureSkipVerify = true
|
|
||||||
transport.ProxyConnectHeader = http.Header{"User-Agent": {clientConfig.UserAgent}}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = f.authorize(ctx, false); err != nil {
|
if err = f.authorize(ctx, false); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -387,30 +379,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
// Internal maintenance flags (to be removed when the backend matures).
|
// Internal maintenance flags (to be removed when the backend matures).
|
||||||
// Primarily intended to facilitate remote support and troubleshooting.
|
// Primarily intended to facilitate remote support and troubleshooting.
|
||||||
type quirks struct {
|
type quirks struct {
|
||||||
gzip bool
|
|
||||||
insecure bool
|
|
||||||
binlist bool
|
binlist bool
|
||||||
atomicmkdir bool
|
atomicmkdir bool
|
||||||
retry400 bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *quirks) parseQuirks(option string) {
|
func (q *quirks) parseQuirks(option string) {
|
||||||
for _, flag := range strings.Split(option, ",") {
|
for _, flag := range strings.Split(option, ",") {
|
||||||
switch strings.ToLower(strings.TrimSpace(flag)) {
|
switch strings.ToLower(strings.TrimSpace(flag)) {
|
||||||
case "gzip":
|
|
||||||
// This backend mimics the official client which never sends the
|
|
||||||
// "Accept-Encoding: gzip" header. However, enabling compression
|
|
||||||
// might be good for performance.
|
|
||||||
// Use this quirk to investigate the performance impact.
|
|
||||||
// Remove this quirk if performance does not improve.
|
|
||||||
q.gzip = true
|
|
||||||
case "insecure":
|
|
||||||
// The mailru disk-o protocol is not documented. To compare HTTP
|
|
||||||
// stream against the official client one can use Telerik Fiddler,
|
|
||||||
// which introduces a self-signed certificate. This quirk forces
|
|
||||||
// the Go http layer to accept it.
|
|
||||||
// Remove this quirk when the backend reaches maturity.
|
|
||||||
q.insecure = true
|
|
||||||
case "binlist":
|
case "binlist":
|
||||||
// The official client sometimes uses a so called "bin" protocol,
|
// The official client sometimes uses a so called "bin" protocol,
|
||||||
// implemented in the listBin file system method below. This method
|
// implemented in the listBin file system method below. This method
|
||||||
@@ -423,18 +398,11 @@ func (q *quirks) parseQuirks(option string) {
|
|||||||
case "atomicmkdir":
|
case "atomicmkdir":
|
||||||
// At the moment rclone requires Mkdir to return success if the
|
// At the moment rclone requires Mkdir to return success if the
|
||||||
// directory already exists. However, such programs as borgbackup
|
// directory already exists. However, such programs as borgbackup
|
||||||
// or restic use mkdir as a locking primitive and depend on its
|
// use mkdir as a locking primitive and depend on its atomicity.
|
||||||
// atomicity. This quirk is a workaround. It can be removed
|
// Remove this quirk when the above issue is investigated.
|
||||||
// when the above issue is investigated.
|
|
||||||
q.atomicmkdir = true
|
q.atomicmkdir = true
|
||||||
case "retry400":
|
|
||||||
// This quirk will help in troubleshooting a very rare "Error 400"
|
|
||||||
// issue. It can be removed if the problem does not show up
|
|
||||||
// for a year or so. See the below issue:
|
|
||||||
// https://github.com/ivandeex/rclone/issues/14
|
|
||||||
q.retry400 = true
|
|
||||||
default:
|
default:
|
||||||
// Just ignore all unknown flags
|
// Ignore unknown flags
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -655,9 +623,14 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, -1, err
|
return nil, -1, err
|
||||||
}
|
}
|
||||||
|
mTime := int64(item.Mtime)
|
||||||
|
if mTime < 0 {
|
||||||
|
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
|
||||||
|
mTime = 0
|
||||||
|
}
|
||||||
switch item.Kind {
|
switch item.Kind {
|
||||||
case "folder":
|
case "folder":
|
||||||
dir := fs.NewDir(remote, time.Unix(item.Mtime, 0)).SetSize(item.Size)
|
dir := fs.NewDir(remote, time.Unix(mTime, 0)).SetSize(item.Size)
|
||||||
dirSize := item.Count.Files + item.Count.Folders
|
dirSize := item.Count.Files + item.Count.Folders
|
||||||
return dir, dirSize, nil
|
return dir, dirSize, nil
|
||||||
case "file":
|
case "file":
|
||||||
@@ -671,7 +644,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
|
|||||||
hasMetaData: true,
|
hasMetaData: true,
|
||||||
size: item.Size,
|
size: item.Size,
|
||||||
mrHash: binHash,
|
mrHash: binHash,
|
||||||
modTime: time.Unix(item.Mtime, 0),
|
modTime: time.Unix(mTime, 0),
|
||||||
}
|
}
|
||||||
return file, -1, nil
|
return file, -1, nil
|
||||||
default:
|
default:
|
||||||
@@ -1861,30 +1834,30 @@ func (f *Fs) uploadShard(ctx context.Context) (string, error) {
|
|||||||
return f.shardURL, nil
|
return f.shardURL, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
token, err := f.accessToken()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
RootURL: api.DispatchServerURL,
|
||||||
Path: "/api/m1/dispatcher",
|
Method: "GET",
|
||||||
Parameters: url.Values{
|
Path: "/u",
|
||||||
"client_id": {api.OAuthClientID},
|
|
||||||
"access_token": {token},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var info api.ShardInfoResponse
|
var (
|
||||||
|
res *http.Response
|
||||||
|
url string
|
||||||
|
err error
|
||||||
|
)
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
|
res, err = f.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(res, err, f, &opts)
|
if err == nil {
|
||||||
|
url, err = readBodyWord(res)
|
||||||
|
}
|
||||||
|
return fserrors.ShouldRetry(err), err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
closeBody(res)
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
f.shardURL = info.Body.Upload[0].URL
|
f.shardURL = url
|
||||||
f.shardExpiry = time.Now().Add(shardExpirySec * time.Second)
|
f.shardExpiry = time.Now().Add(shardExpirySec * time.Second)
|
||||||
fs.Debugf(f, "new upload shard: %s", f.shardURL)
|
fs.Debugf(f, "new upload shard: %s", f.shardURL)
|
||||||
|
|
||||||
@@ -2116,7 +2089,18 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
start, end, partial := getTransferRange(o.size, options...)
|
start, end, partialRequest := getTransferRange(o.size, options...)
|
||||||
|
|
||||||
|
headers := map[string]string{
|
||||||
|
"Accept": "*/*",
|
||||||
|
"Content-Type": "application/octet-stream",
|
||||||
|
}
|
||||||
|
if partialRequest {
|
||||||
|
rangeStr := fmt.Sprintf("bytes=%d-%d", start, end-1)
|
||||||
|
headers["Range"] = rangeStr
|
||||||
|
// headers["Content-Range"] = rangeStr
|
||||||
|
headers["Accept-Ranges"] = "bytes"
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: set custom timeouts
|
// TODO: set custom timeouts
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
@@ -2127,10 +2111,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
"client_id": {api.OAuthClientID},
|
"client_id": {api.OAuthClientID},
|
||||||
"token": {token},
|
"token": {token},
|
||||||
},
|
},
|
||||||
ExtraHeaders: map[string]string{
|
ExtraHeaders: headers,
|
||||||
"Accept": "*/*",
|
|
||||||
"Range": fmt.Sprintf("bytes=%d-%d", start, end-1),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var res *http.Response
|
var res *http.Response
|
||||||
@@ -2151,18 +2132,36 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var hasher gohash.Hash
|
// Server should respond with Status 206 and Content-Range header to a range
|
||||||
if !partial {
|
// request. Status 200 (and no Content-Range) means a full-content response.
|
||||||
|
partialResponse := res.StatusCode == 206
|
||||||
|
|
||||||
|
var (
|
||||||
|
hasher gohash.Hash
|
||||||
|
wrapStream io.ReadCloser
|
||||||
|
)
|
||||||
|
if !partialResponse {
|
||||||
// Cannot check hash of partial download
|
// Cannot check hash of partial download
|
||||||
hasher = mrhash.New()
|
hasher = mrhash.New()
|
||||||
}
|
}
|
||||||
wrapStream := &endHandler{
|
wrapStream = &endHandler{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
stream: res.Body,
|
stream: res.Body,
|
||||||
hasher: hasher,
|
hasher: hasher,
|
||||||
o: o,
|
o: o,
|
||||||
server: server,
|
server: server,
|
||||||
}
|
}
|
||||||
|
if partialRequest && !partialResponse {
|
||||||
|
fs.Debugf(o, "Server returned full content instead of range")
|
||||||
|
if start > 0 {
|
||||||
|
// Discard the beginning of the data
|
||||||
|
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wrapStream = readers.NewLimitedReadCloser(wrapStream, end-start)
|
||||||
|
}
|
||||||
return wrapStream, nil
|
return wrapStream, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2215,7 +2214,7 @@ func (e *endHandler) handle(err error) error {
|
|||||||
return io.EOF
|
return io.EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
// serverPool backs server dispacher
|
// serverPool backs server dispatcher
|
||||||
type serverPool struct {
|
type serverPool struct {
|
||||||
pool pendingServerMap
|
pool pendingServerMap
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
|
|||||||
@@ -221,7 +221,7 @@ func (f *Fs) setRoot(root string) {
|
|||||||
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, bucket:path
|
// NewFs constructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
|
|||||||
@@ -254,7 +254,7 @@ type MoveItemRequest struct {
|
|||||||
//Always Type:view and Scope:anonymous for public sharing
|
//Always Type:view and Scope:anonymous for public sharing
|
||||||
type CreateShareLinkRequest struct {
|
type CreateShareLinkRequest struct {
|
||||||
Type string `json:"type"` //Link type in View, Edit or Embed
|
Type string `json:"type"` //Link type in View, Edit or Embed
|
||||||
Scope string `json:"scope,omitempty"` //Optional. Scope in anonymousi, organization
|
Scope string `json:"scope,omitempty"` //Optional. Scope in anonymous, organization
|
||||||
}
|
}
|
||||||
|
|
||||||
//CreateShareLinkResponse is the response from CreateShareLinkRequest
|
//CreateShareLinkResponse is the response from CreateShareLinkRequest
|
||||||
|
|||||||
@@ -1247,6 +1247,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
return nil, errors.Wrap(err, "about failed")
|
return nil, errors.Wrap(err, "about failed")
|
||||||
}
|
}
|
||||||
q := drive.Quota
|
q := drive.Quota
|
||||||
|
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
|
||||||
|
if q.Total == 0 && q.Used == 0 && q.Deleted == 0 && q.Remaining == 0 {
|
||||||
|
return &fs.Usage{}, nil
|
||||||
|
}
|
||||||
usage = &fs.Usage{
|
usage = &fs.Usage{
|
||||||
Total: fs.NewUsageValue(q.Total), // quota of bytes that can be used
|
Total: fs.NewUsageValue(q.Total), // quota of bytes that can be used
|
||||||
Used: fs.NewUsageValue(q.Used), // bytes in use
|
Used: fs.NewUsageValue(q.Used), // bytes in use
|
||||||
|
|||||||
@@ -646,7 +646,6 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
// retryErrorCodes is a slice of error codes that we will retry
|
||||||
var retryErrorCodes = []int{
|
var retryErrorCodes = []int{
|
||||||
400, // Bad request (seen in "Next token is expired")
|
|
||||||
401, // Unauthorized (seen in "Token has expired")
|
401, // Unauthorized (seen in "Token has expired")
|
||||||
408, // Request Timeout
|
408, // Request Timeout
|
||||||
423, // Locked - get this on folders sometimes
|
423, // Locked - get this on folders sometimes
|
||||||
|
|||||||
@@ -1125,7 +1125,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
// Special treatment for a 0 length upload. This doesn't work
|
// Special treatment for a 0 length upload. This doesn't work
|
||||||
// with PUT even with Content-Length set (by setting
|
// with PUT even with Content-Length set (by setting
|
||||||
// opts.Body=0), so upload it as a multpart form POST with
|
// opts.Body=0), so upload it as a multipart form POST with
|
||||||
// Content-Length set.
|
// Content-Length set.
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
formReader, contentType, overhead, err := rest.MultipartUpload(in, opts.Parameters, "content", leaf)
|
formReader, contentType, overhead, err := rest.MultipartUpload(in, opts.Parameters, "content", leaf)
|
||||||
|
|||||||
@@ -236,10 +236,10 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||||
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
|
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
|
||||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||||
case fs.ErrorObjectNotFound:
|
case fs.ErrorObjectNotFound:
|
||||||
// Not found so create it
|
// Not found so create it
|
||||||
return f.PutUnchecked(ctx, in, src, options...)
|
return f.PutUnchecked(ctx, in, src, options...)
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ func (o *Object) MimeType(ctx context.Context) string {
|
|||||||
|
|
||||||
// setMetadataFromEntry sets the fs data from a putio.File
|
// setMetadataFromEntry sets the fs data from a putio.File
|
||||||
//
|
//
|
||||||
// This isn't a complete set of metadata and has an inacurate date
|
// This isn't a complete set of metadata and has an inaccurate date
|
||||||
func (o *Object) setMetadataFromEntry(info putio.File) error {
|
func (o *Object) setMetadataFromEntry(info putio.File) error {
|
||||||
o.file = &info
|
o.file = &info
|
||||||
o.modtime = info.UpdatedAt.Time
|
o.modtime = info.UpdatedAt.Time
|
||||||
|
|||||||
@@ -104,7 +104,7 @@ enough memory, then increasing this will speed up the transfers.`,
|
|||||||
This is the number of chunks of the same file that are uploaded
|
This is the number of chunks of the same file that are uploaded
|
||||||
concurrently.
|
concurrently.
|
||||||
|
|
||||||
NB if you set this to > 1 then the checksums of multpart uploads
|
NB if you set this to > 1 then the checksums of multipart uploads
|
||||||
become corrupted (the uploads themselves are not corrupted though).
|
become corrupted (the uploads themselves are not corrupted though).
|
||||||
|
|
||||||
If you are uploading small numbers of large file over high speed link
|
If you are uploading small numbers of large file over high speed link
|
||||||
|
|||||||
220
backend/s3/s3.go
220
backend/s3/s3.go
@@ -5,6 +5,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
|
"crypto/tls"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
@@ -58,7 +59,7 @@ import (
|
|||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
Name: "s3",
|
Name: "s3",
|
||||||
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
|
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
@@ -94,6 +95,9 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Value: "StackPath",
|
Value: "StackPath",
|
||||||
Help: "StackPath Object Storage",
|
Help: "StackPath Object Storage",
|
||||||
|
}, {
|
||||||
|
Value: "TencentCOS",
|
||||||
|
Help: "Tencent Cloud Object Storage (COS)",
|
||||||
}, {
|
}, {
|
||||||
Value: "Wasabi",
|
Value: "Wasabi",
|
||||||
Help: "Wasabi Object Storage",
|
Help: "Wasabi Object Storage",
|
||||||
@@ -119,6 +123,9 @@ func init() {
|
|||||||
Name: "secret_access_key",
|
Name: "secret_access_key",
|
||||||
Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
|
Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
|
||||||
}, {
|
}, {
|
||||||
|
// References:
|
||||||
|
// 1. https://docs.aws.amazon.com/general/latest/gr/rande.html
|
||||||
|
// 2. https://docs.aws.amazon.com/general/latest/gr/s3.html
|
||||||
Name: "region",
|
Name: "region",
|
||||||
Help: "Region to connect to.",
|
Help: "Region to connect to.",
|
||||||
Provider: "AWS",
|
Provider: "AWS",
|
||||||
@@ -128,12 +135,12 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Value: "us-east-2",
|
Value: "us-east-2",
|
||||||
Help: "US East (Ohio) Region\nNeeds location constraint us-east-2.",
|
Help: "US East (Ohio) Region\nNeeds location constraint us-east-2.",
|
||||||
}, {
|
|
||||||
Value: "us-west-2",
|
|
||||||
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
|
|
||||||
}, {
|
}, {
|
||||||
Value: "us-west-1",
|
Value: "us-west-1",
|
||||||
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
|
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
|
||||||
|
}, {
|
||||||
|
Value: "us-west-2",
|
||||||
|
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
|
||||||
}, {
|
}, {
|
||||||
Value: "ca-central-1",
|
Value: "ca-central-1",
|
||||||
Help: "Canada (Central) Region\nNeeds location constraint ca-central-1.",
|
Help: "Canada (Central) Region\nNeeds location constraint ca-central-1.",
|
||||||
@@ -143,9 +150,15 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Value: "eu-west-2",
|
Value: "eu-west-2",
|
||||||
Help: "EU (London) Region\nNeeds location constraint eu-west-2.",
|
Help: "EU (London) Region\nNeeds location constraint eu-west-2.",
|
||||||
|
}, {
|
||||||
|
Value: "eu-west-3",
|
||||||
|
Help: "EU (Paris) Region\nNeeds location constraint eu-west-3.",
|
||||||
}, {
|
}, {
|
||||||
Value: "eu-north-1",
|
Value: "eu-north-1",
|
||||||
Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
|
Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
|
||||||
|
}, {
|
||||||
|
Value: "eu-south-1",
|
||||||
|
Help: "EU (Milan) Region\nNeeds location constraint eu-south-1.",
|
||||||
}, {
|
}, {
|
||||||
Value: "eu-central-1",
|
Value: "eu-central-1",
|
||||||
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
|
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
|
||||||
@@ -161,15 +174,36 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Value: "ap-northeast-2",
|
Value: "ap-northeast-2",
|
||||||
Help: "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.",
|
Help: "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.",
|
||||||
|
}, {
|
||||||
|
Value: "ap-northeast-3",
|
||||||
|
Help: "Asia Pacific (Osaka-Local)\nNeeds location constraint ap-northeast-3.",
|
||||||
}, {
|
}, {
|
||||||
Value: "ap-south-1",
|
Value: "ap-south-1",
|
||||||
Help: "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
|
Help: "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
|
||||||
}, {
|
}, {
|
||||||
Value: "ap-east-1",
|
Value: "ap-east-1",
|
||||||
Help: "Asia Patific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
|
Help: "Asia Pacific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
|
||||||
}, {
|
}, {
|
||||||
Value: "sa-east-1",
|
Value: "sa-east-1",
|
||||||
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
|
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
|
||||||
|
}, {
|
||||||
|
Value: "me-south-1",
|
||||||
|
Help: "Middle East (Bahrain) Region\nNeeds location constraint me-south-1.",
|
||||||
|
}, {
|
||||||
|
Value: "af-south-1",
|
||||||
|
Help: "Africa (Cape Town) Region\nNeeds location constraint af-south-1.",
|
||||||
|
}, {
|
||||||
|
Value: "cn-north-1",
|
||||||
|
Help: "China (Beijing) Region\nNeeds location constraint cn-north-1.",
|
||||||
|
}, {
|
||||||
|
Value: "cn-northwest-1",
|
||||||
|
Help: "China (Ningxia) Region\nNeeds location constraint cn-northwest-1.",
|
||||||
|
}, {
|
||||||
|
Value: "us-gov-east-1",
|
||||||
|
Help: "AWS GovCloud (US-East) Region\nNeeds location constraint us-gov-east-1.",
|
||||||
|
}, {
|
||||||
|
Value: "us-gov-west-1",
|
||||||
|
Help: "AWS GovCloud (US) Region\nNeeds location constraint us-gov-west-1.",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "region",
|
Name: "region",
|
||||||
@@ -185,7 +219,7 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Name: "region",
|
Name: "region",
|
||||||
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
|
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||||
Provider: "!AWS,Alibaba,Scaleway",
|
Provider: "!AWS,Alibaba,Scaleway,TencentCOS",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "",
|
Value: "",
|
||||||
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
|
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
|
||||||
@@ -476,10 +510,73 @@ func init() {
|
|||||||
Value: "s3.eu-central-1.stackpathstorage.com",
|
Value: "s3.eu-central-1.stackpathstorage.com",
|
||||||
Help: "EU Endpoint",
|
Help: "EU Endpoint",
|
||||||
}},
|
}},
|
||||||
|
}, {
|
||||||
|
// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
|
||||||
|
Name: "endpoint",
|
||||||
|
Help: "Endpoint for Tencent COS API.",
|
||||||
|
Provider: "TencentCOS",
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "cos.ap-beijing.myqcloud.com",
|
||||||
|
Help: "Beijing Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-nanjing.myqcloud.com",
|
||||||
|
Help: "Nanjing Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-shanghai.myqcloud.com",
|
||||||
|
Help: "Shanghai Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-guangzhou.myqcloud.com",
|
||||||
|
Help: "Guangzhou Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-nanjing.myqcloud.com",
|
||||||
|
Help: "Nanjing Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-chengdu.myqcloud.com",
|
||||||
|
Help: "Chengdu Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-chongqing.myqcloud.com",
|
||||||
|
Help: "Chongqing Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-hongkong.myqcloud.com",
|
||||||
|
Help: "Hong Kong (China) Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-singapore.myqcloud.com",
|
||||||
|
Help: "Singapore Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-mumbai.myqcloud.com",
|
||||||
|
Help: "Mumbai Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-seoul.myqcloud.com",
|
||||||
|
Help: "Seoul Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-bangkok.myqcloud.com",
|
||||||
|
Help: "Bangkok Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-tokyo.myqcloud.com",
|
||||||
|
Help: "Tokyo Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.na-siliconvalley.myqcloud.com",
|
||||||
|
Help: "Silicon Valley Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.na-ashburn.myqcloud.com",
|
||||||
|
Help: "Virginia Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.na-toronto.myqcloud.com",
|
||||||
|
Help: "Toronto Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.eu-frankfurt.myqcloud.com",
|
||||||
|
Help: "Frankfurt Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.eu-moscow.myqcloud.com",
|
||||||
|
Help: "Moscow Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.accelerate.myqcloud.com",
|
||||||
|
Help: "Use Tencent COS Accelerate Endpoint.",
|
||||||
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
|
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
|
||||||
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath",
|
Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "objects-us-east-1.dream.io",
|
Value: "objects-us-east-1.dream.io",
|
||||||
Help: "Dream Objects endpoint",
|
Help: "Dream Objects endpoint",
|
||||||
@@ -519,12 +616,12 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Value: "us-east-2",
|
Value: "us-east-2",
|
||||||
Help: "US East (Ohio) Region.",
|
Help: "US East (Ohio) Region.",
|
||||||
}, {
|
|
||||||
Value: "us-west-2",
|
|
||||||
Help: "US West (Oregon) Region.",
|
|
||||||
}, {
|
}, {
|
||||||
Value: "us-west-1",
|
Value: "us-west-1",
|
||||||
Help: "US West (Northern California) Region.",
|
Help: "US West (Northern California) Region.",
|
||||||
|
}, {
|
||||||
|
Value: "us-west-2",
|
||||||
|
Help: "US West (Oregon) Region.",
|
||||||
}, {
|
}, {
|
||||||
Value: "ca-central-1",
|
Value: "ca-central-1",
|
||||||
Help: "Canada (Central) Region.",
|
Help: "Canada (Central) Region.",
|
||||||
@@ -534,9 +631,15 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Value: "eu-west-2",
|
Value: "eu-west-2",
|
||||||
Help: "EU (London) Region.",
|
Help: "EU (London) Region.",
|
||||||
|
}, {
|
||||||
|
Value: "eu-west-3",
|
||||||
|
Help: "EU (Paris) Region.",
|
||||||
}, {
|
}, {
|
||||||
Value: "eu-north-1",
|
Value: "eu-north-1",
|
||||||
Help: "EU (Stockholm) Region.",
|
Help: "EU (Stockholm) Region.",
|
||||||
|
}, {
|
||||||
|
Value: "eu-south-1",
|
||||||
|
Help: "EU (Milan) Region.",
|
||||||
}, {
|
}, {
|
||||||
Value: "EU",
|
Value: "EU",
|
||||||
Help: "EU Region.",
|
Help: "EU Region.",
|
||||||
@@ -551,16 +654,37 @@ func init() {
|
|||||||
Help: "Asia Pacific (Tokyo) Region.",
|
Help: "Asia Pacific (Tokyo) Region.",
|
||||||
}, {
|
}, {
|
||||||
Value: "ap-northeast-2",
|
Value: "ap-northeast-2",
|
||||||
Help: "Asia Pacific (Seoul)",
|
Help: "Asia Pacific (Seoul) Region.",
|
||||||
|
}, {
|
||||||
|
Value: "ap-northeast-3",
|
||||||
|
Help: "Asia Pacific (Osaka-Local) Region.",
|
||||||
}, {
|
}, {
|
||||||
Value: "ap-south-1",
|
Value: "ap-south-1",
|
||||||
Help: "Asia Pacific (Mumbai)",
|
Help: "Asia Pacific (Mumbai) Region.",
|
||||||
}, {
|
}, {
|
||||||
Value: "ap-east-1",
|
Value: "ap-east-1",
|
||||||
Help: "Asia Pacific (Hong Kong)",
|
Help: "Asia Pacific (Hong Kong) Region.",
|
||||||
}, {
|
}, {
|
||||||
Value: "sa-east-1",
|
Value: "sa-east-1",
|
||||||
Help: "South America (Sao Paulo) Region.",
|
Help: "South America (Sao Paulo) Region.",
|
||||||
|
}, {
|
||||||
|
Value: "me-south-1",
|
||||||
|
Help: "Middle East (Bahrain) Region.",
|
||||||
|
}, {
|
||||||
|
Value: "af-south-1",
|
||||||
|
Help: "Africa (Cape Town) Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cn-north-1",
|
||||||
|
Help: "China (Beijing) Region",
|
||||||
|
}, {
|
||||||
|
Value: "cn-northwest-1",
|
||||||
|
Help: "China (Ningxia) Region.",
|
||||||
|
}, {
|
||||||
|
Value: "us-gov-east-1",
|
||||||
|
Help: "AWS GovCloud (US-East) Region.",
|
||||||
|
}, {
|
||||||
|
Value: "us-gov-west-1",
|
||||||
|
Help: "AWS GovCloud (US) Region.",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "location_constraint",
|
Name: "location_constraint",
|
||||||
@@ -666,7 +790,7 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Name: "location_constraint",
|
Name: "location_constraint",
|
||||||
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
|
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
|
||||||
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath",
|
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath,TencentCOS",
|
||||||
}, {
|
}, {
|
||||||
Name: "acl",
|
Name: "acl",
|
||||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||||
@@ -678,9 +802,13 @@ For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview
|
|||||||
Note that this ACL is applied when server side copying objects as S3
|
Note that this ACL is applied when server side copying objects as S3
|
||||||
doesn't copy the ACL from the source but rather writes a fresh one.`,
|
doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "default",
|
||||||
|
Help: "Owner gets Full_CONTROL. No one else has access rights (default).",
|
||||||
|
Provider: "TencentCOS",
|
||||||
|
}, {
|
||||||
Value: "private",
|
Value: "private",
|
||||||
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
|
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
|
||||||
Provider: "!IBMCOS",
|
Provider: "!IBMCOS,TencentCOS",
|
||||||
}, {
|
}, {
|
||||||
Value: "public-read",
|
Value: "public-read",
|
||||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
|
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
|
||||||
@@ -842,6 +970,24 @@ isn't set then "acl" is used instead.`,
|
|||||||
Value: "STANDARD_IA",
|
Value: "STANDARD_IA",
|
||||||
Help: "Infrequent access storage mode.",
|
Help: "Infrequent access storage mode.",
|
||||||
}},
|
}},
|
||||||
|
}, {
|
||||||
|
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
|
||||||
|
Name: "storage_class",
|
||||||
|
Help: "The storage class to use when storing new objects in Tencent COS.",
|
||||||
|
Provider: "TencentCOS",
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "",
|
||||||
|
Help: "Default",
|
||||||
|
}, {
|
||||||
|
Value: "STANDARD",
|
||||||
|
Help: "Standard storage class",
|
||||||
|
}, {
|
||||||
|
Value: "ARCHIVE",
|
||||||
|
Help: "Archive storage mode.",
|
||||||
|
}, {
|
||||||
|
Value: "STANDARD_IA",
|
||||||
|
Help: "Infrequent access storage mode.",
|
||||||
|
}},
|
||||||
}, {
|
}, {
|
||||||
// Mapping from here: https://www.scaleway.com/en/docs/object-storage-glacier/#-Scaleway-Storage-Classes
|
// Mapping from here: https://www.scaleway.com/en/docs/object-storage-glacier/#-Scaleway-Storage-Classes
|
||||||
Name: "storage_class",
|
Name: "storage_class",
|
||||||
@@ -975,7 +1121,7 @@ if false then rclone will use virtual path style. See [the AWS S3
|
|||||||
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
|
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
|
||||||
for more info.
|
for more info.
|
||||||
|
|
||||||
Some providers (eg AWS, Aliyun OSS or Netease COS) require this set to
|
Some providers (eg AWS, Aliyun OSS, Netease COS or Tencent COS) require this set to
|
||||||
false - rclone will do this automatically based on the provider
|
false - rclone will do this automatically based on the provider
|
||||||
setting.`,
|
setting.`,
|
||||||
Default: true,
|
Default: true,
|
||||||
@@ -1058,6 +1204,19 @@ This option controls how often unused buffers will be removed from the pool.`,
|
|||||||
Default: memoryPoolUseMmap,
|
Default: memoryPoolUseMmap,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Help: `Whether to use mmap buffers in internal memory pool.`,
|
Help: `Whether to use mmap buffers in internal memory pool.`,
|
||||||
|
}, {
|
||||||
|
Name: "disable_http2",
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
Help: `Disable usage of http2 for S3 backends
|
||||||
|
|
||||||
|
There is currently an unsolved issue with the s3 (specifically minio) backend
|
||||||
|
and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be
|
||||||
|
disabled here. When the issue is solved this flag will be removed.
|
||||||
|
|
||||||
|
See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631
|
||||||
|
|
||||||
|
`,
|
||||||
},
|
},
|
||||||
}})
|
}})
|
||||||
}
|
}
|
||||||
@@ -1115,6 +1274,7 @@ type Options struct {
|
|||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||||
|
DisableHTTP2 bool `config:"disable_http2"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote s3 server
|
// Fs represents a remote s3 server
|
||||||
@@ -1236,6 +1396,19 @@ func (o *Object) split() (bucket, bucketPath string) {
|
|||||||
return o.fs.split(o.remote)
|
return o.fs.split(o.remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getClient makes an http client according to the options
|
||||||
|
func getClient(opt *Options) *http.Client {
|
||||||
|
// TODO: Do we need cookies too?
|
||||||
|
t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) {
|
||||||
|
if opt.DisableHTTP2 {
|
||||||
|
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return &http.Client{
|
||||||
|
Transport: t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// s3Connection makes a connection to s3
|
// s3Connection makes a connection to s3
|
||||||
func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||||
// Make the auth
|
// Make the auth
|
||||||
@@ -1246,6 +1419,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
|
lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
|
||||||
|
|
||||||
def := defaults.Get()
|
def := defaults.Get()
|
||||||
def.Config.HTTPClient = lowTimeoutClient
|
def.Config.HTTPClient = lowTimeoutClient
|
||||||
|
|
||||||
@@ -1305,7 +1479,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
|||||||
if opt.Region == "" {
|
if opt.Region == "" {
|
||||||
opt.Region = "us-east-1"
|
opt.Region = "us-east-1"
|
||||||
}
|
}
|
||||||
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.Provider == "Scaleway" || opt.UseAccelerateEndpoint {
|
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.Provider == "Scaleway" || opt.Provider == "TencentCOS" || opt.UseAccelerateEndpoint {
|
||||||
opt.ForcePathStyle = false
|
opt.ForcePathStyle = false
|
||||||
}
|
}
|
||||||
if opt.Provider == "Scaleway" && opt.MaxUploadParts > 1000 {
|
if opt.Provider == "Scaleway" && opt.MaxUploadParts > 1000 {
|
||||||
@@ -1314,7 +1488,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
|||||||
awsConfig := aws.NewConfig().
|
awsConfig := aws.NewConfig().
|
||||||
WithMaxRetries(0). // Rely on rclone's retry logic
|
WithMaxRetries(0). // Rely on rclone's retry logic
|
||||||
WithCredentials(cred).
|
WithCredentials(cred).
|
||||||
WithHTTPClient(fshttp.NewClient(fs.Config)).
|
WithHTTPClient(getClient(opt)).
|
||||||
WithS3ForcePathStyle(opt.ForcePathStyle).
|
WithS3ForcePathStyle(opt.ForcePathStyle).
|
||||||
WithS3UseAccelerate(opt.UseAccelerateEndpoint).
|
WithS3UseAccelerate(opt.UseAccelerateEndpoint).
|
||||||
WithS3UsEast1RegionalEndpoint(endpoints.RegionalS3UsEast1Endpoint)
|
WithS3UsEast1RegionalEndpoint(endpoints.RegionalS3UsEast1Endpoint)
|
||||||
@@ -1428,7 +1602,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
ses: ses,
|
ses: ses,
|
||||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||||
cache: bucket.NewCache(),
|
cache: bucket.NewCache(),
|
||||||
srv: fshttp.NewClient(fs.Config),
|
srv: getClient(opt),
|
||||||
pool: pool.New(
|
pool: pool.New(
|
||||||
time.Duration(opt.MemoryPoolFlushTime),
|
time.Duration(opt.MemoryPoolFlushTime),
|
||||||
int(opt.ChunkSize),
|
int(opt.ChunkSize),
|
||||||
@@ -1587,7 +1761,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
//
|
//
|
||||||
// So we enable only on providers we know supports it properly, all others can retry when a
|
// So we enable only on providers we know supports it properly, all others can retry when a
|
||||||
// XML Syntax error is detected.
|
// XML Syntax error is detected.
|
||||||
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio")
|
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio" || f.opt.Provider == "TencentCOS")
|
||||||
for {
|
for {
|
||||||
// FIXME need to implement ALL loop
|
// FIXME need to implement ALL loop
|
||||||
req := s3.ListObjectsInput{
|
req := s3.ListObjectsInput{
|
||||||
@@ -2190,7 +2364,7 @@ All the objects shown will be marked for restore, then
|
|||||||
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||||
|
|
||||||
It returns a list of status dictionaries with Remote and Status
|
It returns a list of status dictionaries with Remote and Status
|
||||||
keys. The Status will be OK if it was successfull or an error message
|
keys. The Status will be OK if it was successful or an error message
|
||||||
if not.
|
if not.
|
||||||
|
|
||||||
[
|
[
|
||||||
@@ -2355,7 +2529,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
|
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
|
||||||
//
|
//
|
||||||
// Note that rather lazily we treat key as a prefix so it matches
|
// Note that rather lazily we treat key as a prefix so it matches
|
||||||
// directories and objects. This could suprise the user if they ask
|
// directories and objects. This could surprise the user if they ask
|
||||||
// for "dir" and it returns "dirKey"
|
// for "dir" and it returns "dirKey"
|
||||||
func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uploads []*s3.MultipartUpload, err error) {
|
func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uploads []*s3.MultipartUpload, err error) {
|
||||||
var (
|
var (
|
||||||
@@ -2888,7 +3062,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
|
|
||||||
// read the md5sum if available
|
// read the md5sum if available
|
||||||
// - for non multpart
|
// - for non multipart
|
||||||
// - so we can add a ContentMD5
|
// - so we can add a ContentMD5
|
||||||
// - for multipart provided checksums aren't disabled
|
// - for multipart provided checksums aren't disabled
|
||||||
// - so we can add the md5sum in the metadata as metaMD5Hash
|
// - so we can add the md5sum in the metadata as metaMD5Hash
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ type Library struct {
|
|||||||
Encrypted bool `json:"encrypted"`
|
Encrypted bool `json:"encrypted"`
|
||||||
Owner string `json:"owner"`
|
Owner string `json:"owner"`
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
Size int `json:"size"`
|
Size int64 `json:"size"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Modified int64 `json:"mtime"`
|
Modified int64 `json:"mtime"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1004,7 +1004,7 @@ func (f *Fs) listLibraries(ctx context.Context) (entries fs.DirEntries, err erro
|
|||||||
|
|
||||||
for _, library := range libraries {
|
for _, library := range libraries {
|
||||||
d := fs.NewDir(library.Name, time.Unix(library.Modified, 0))
|
d := fs.NewDir(library.Name, time.Unix(library.Modified, 0))
|
||||||
d.SetSize(int64(library.Size))
|
d.SetSize(library.Size)
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"os/user"
|
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -33,6 +32,7 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
sshagent "github.com/xanzy/ssh-agent"
|
sshagent "github.com/xanzy/ssh-agent"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
|
"golang.org/x/crypto/ssh/knownhosts"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -43,7 +43,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
currentUser = readCurrentUser()
|
currentUser = env.CurrentUser()
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -82,6 +82,21 @@ func init() {
|
|||||||
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
||||||
in the new OpenSSH format can't be used.`,
|
in the new OpenSSH format can't be used.`,
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
|
}, {
|
||||||
|
Name: "pubkey_file",
|
||||||
|
Help: `Optional path to public key file.
|
||||||
|
|
||||||
|
Set this if you have a signed certificate you want to use for authentication.` + env.ShellExpandHelp,
|
||||||
|
}, {
|
||||||
|
Name: "known_hosts_file",
|
||||||
|
Help: `Optional path to known_hosts file.
|
||||||
|
|
||||||
|
Set this value to enable server host key validation.` + env.ShellExpandHelp,
|
||||||
|
Advanced: true,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "~/.ssh/known_hosts",
|
||||||
|
Help: "Use OpenSSH's known_hosts file",
|
||||||
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "key_use_agent",
|
Name: "key_use_agent",
|
||||||
Help: `When set forces the usage of the ssh-agent.
|
Help: `When set forces the usage of the ssh-agent.
|
||||||
@@ -190,6 +205,8 @@ type Options struct {
|
|||||||
KeyPem string `config:"key_pem"`
|
KeyPem string `config:"key_pem"`
|
||||||
KeyFile string `config:"key_file"`
|
KeyFile string `config:"key_file"`
|
||||||
KeyFilePass string `config:"key_file_pass"`
|
KeyFilePass string `config:"key_file_pass"`
|
||||||
|
PubKeyFile string `config:"pubkey_file"`
|
||||||
|
KnownHostsFile string `config:"known_hosts_file"`
|
||||||
KeyUseAgent bool `config:"key_use_agent"`
|
KeyUseAgent bool `config:"key_use_agent"`
|
||||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||||
@@ -218,6 +235,7 @@ type Fs struct {
|
|||||||
poolMu sync.Mutex
|
poolMu sync.Mutex
|
||||||
pool []*conn
|
pool []*conn
|
||||||
pacer *fs.Pacer // pacer for operations
|
pacer *fs.Pacer // pacer for operations
|
||||||
|
savedpswd string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||||
@@ -231,20 +249,6 @@ type Object struct {
|
|||||||
sha1sum *string // Cached SHA1 checksum
|
sha1sum *string // Cached SHA1 checksum
|
||||||
}
|
}
|
||||||
|
|
||||||
// readCurrentUser finds the current user name or "" if not found
|
|
||||||
func readCurrentUser() (userName string) {
|
|
||||||
usr, err := user.Current()
|
|
||||||
if err == nil {
|
|
||||||
return usr.Username
|
|
||||||
}
|
|
||||||
// Fall back to reading $USER then $LOGNAME
|
|
||||||
userName = os.Getenv("USER")
|
|
||||||
if userName != "" {
|
|
||||||
return userName
|
|
||||||
}
|
|
||||||
return os.Getenv("LOGNAME")
|
|
||||||
}
|
|
||||||
|
|
||||||
// dial starts a client connection to the given SSH server. It is a
|
// dial starts a client connection to the given SSH server. It is a
|
||||||
// convenience function that connects to the given network address,
|
// convenience function that connects to the given network address,
|
||||||
// initiates the SSH handshake, and then sets up a Client.
|
// initiates the SSH handshake, and then sets up a Client.
|
||||||
@@ -410,6 +414,10 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
|||||||
// NewFs creates a new Fs object from the name and root. It connects to
|
// NewFs creates a new Fs object from the name and root. It connects to
|
||||||
// the host specified in the config file.
|
// the host specified in the config file.
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
// This will hold the Fs object. We need to create it here
|
||||||
|
// so we can refer to it in the SSH callback, but it's populated
|
||||||
|
// in NewFsWithConnection
|
||||||
|
f := &Fs{}
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
@@ -423,6 +431,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if opt.Port == "" {
|
if opt.Port == "" {
|
||||||
opt.Port = "22"
|
opt.Port = "22"
|
||||||
}
|
}
|
||||||
|
|
||||||
sshConfig := &ssh.ClientConfig{
|
sshConfig := &ssh.ClientConfig{
|
||||||
User: opt.User,
|
User: opt.User,
|
||||||
Auth: []ssh.AuthMethod{},
|
Auth: []ssh.AuthMethod{},
|
||||||
@@ -431,6 +440,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
ClientVersion: "SSH-2.0-" + fs.Config.UserAgent,
|
ClientVersion: "SSH-2.0-" + fs.Config.UserAgent,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if opt.KnownHostsFile != "" {
|
||||||
|
hostcallback, err := knownhosts.New(opt.KnownHostsFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "couldn't parse known_hosts_file")
|
||||||
|
}
|
||||||
|
sshConfig.HostKeyCallback = hostcallback
|
||||||
|
}
|
||||||
|
|
||||||
if opt.UseInsecureCipher {
|
if opt.UseInsecureCipher {
|
||||||
sshConfig.Config.SetDefaults()
|
sshConfig.Config.SetDefaults()
|
||||||
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc", "aes192-cbc", "aes256-cbc", "3des-cbc")
|
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc", "aes192-cbc", "aes256-cbc", "3des-cbc")
|
||||||
@@ -438,6 +455,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
keyFile := env.ShellExpand(opt.KeyFile)
|
keyFile := env.ShellExpand(opt.KeyFile)
|
||||||
|
pubkeyFile := env.ShellExpand(opt.PubKeyFile)
|
||||||
//keyPem := env.ShellExpand(opt.KeyPem)
|
//keyPem := env.ShellExpand(opt.KeyPem)
|
||||||
// Add ssh agent-auth if no password or file or key PEM specified
|
// Add ssh agent-auth if no password or file or key PEM specified
|
||||||
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
|
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
|
||||||
@@ -507,7 +525,38 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to parse private key file")
|
return nil, errors.Wrap(err, "failed to parse private key file")
|
||||||
}
|
}
|
||||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signer))
|
|
||||||
|
// If a public key has been specified then use that
|
||||||
|
if pubkeyFile != "" {
|
||||||
|
certfile, err := ioutil.ReadFile(pubkeyFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "unable to read cert file")
|
||||||
|
}
|
||||||
|
|
||||||
|
pk, _, _, _, err := ssh.ParseAuthorizedKey(certfile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "unable to parse cert file")
|
||||||
|
}
|
||||||
|
|
||||||
|
// And the signer for this, which includes the private key signer
|
||||||
|
// This is what we'll pass to the ssh client.
|
||||||
|
// Normally the ssh client will use the public key built
|
||||||
|
// into the private key, but we need to tell it to use the user
|
||||||
|
// specified public key cert. This signer is specific to the
|
||||||
|
// cert and will include the private key signer. Now ssh
|
||||||
|
// knows everything it needs.
|
||||||
|
cert, ok := pk.(*ssh.Certificate)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("public key file is not a certificate file: " + pubkeyFile)
|
||||||
|
}
|
||||||
|
pubsigner, err := ssh.NewCertSigner(cert, signer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "error generating cert signer")
|
||||||
|
}
|
||||||
|
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(pubsigner))
|
||||||
|
} else {
|
||||||
|
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signer))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Auth from password if specified
|
// Auth from password if specified
|
||||||
@@ -519,30 +568,45 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ask for password if none was defined and we're allowed to
|
// Config for password if none was defined and we're allowed to
|
||||||
|
// We don't ask now; we ask if the ssh connection succeeds
|
||||||
if opt.Pass == "" && opt.AskPassword {
|
if opt.Pass == "" && opt.AskPassword {
|
||||||
_, _ = fmt.Fprint(os.Stderr, "Enter SFTP password: ")
|
sshConfig.Auth = append(sshConfig.Auth, ssh.PasswordCallback(f.getPass))
|
||||||
clearpass := config.ReadPassword()
|
|
||||||
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return NewFsWithConnection(ctx, name, root, m, opt, sshConfig)
|
return NewFsWithConnection(ctx, f, name, root, m, opt, sshConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're in password mode and ssh connection succeeds then this
|
||||||
|
// callback is called. First time around we ask the user, and then
|
||||||
|
// save it so on reconnection we give back the previous string.
|
||||||
|
// This removes the ability to let the user correct a mistaken entry,
|
||||||
|
// but means that reconnects are transparent.
|
||||||
|
// We'll re-use config.Pass for this, 'cos we know it's not been
|
||||||
|
// specified.
|
||||||
|
func (f *Fs) getPass() (string, error) {
|
||||||
|
for f.savedpswd == "" {
|
||||||
|
_, _ = fmt.Fprint(os.Stderr, "Enter SFTP password: ")
|
||||||
|
f.savedpswd = config.ReadPassword()
|
||||||
|
}
|
||||||
|
return f.savedpswd, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFsWithConnection creates a new Fs object from the name and root and an ssh.ClientConfig. It connects to
|
// NewFsWithConnection creates a new Fs object from the name and root and an ssh.ClientConfig. It connects to
|
||||||
// the host specified in the ssh.ClientConfig
|
// the host specified in the ssh.ClientConfig
|
||||||
func NewFsWithConnection(ctx context.Context, name string, root string, m configmap.Mapper, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
|
func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m configmap.Mapper, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
|
||||||
f := &Fs{
|
// Populate the Filesystem Object
|
||||||
name: name,
|
f.name = name
|
||||||
root: root,
|
f.root = root
|
||||||
absRoot: root,
|
f.absRoot = root
|
||||||
opt: *opt,
|
f.opt = *opt
|
||||||
m: m,
|
f.m = m
|
||||||
config: sshConfig,
|
f.config = sshConfig
|
||||||
url: "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root,
|
f.url = "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root
|
||||||
mkdirLock: newStringLock(),
|
f.mkdirLock = newStringLock()
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
f.pacer = fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||||
}
|
f.savedpswd = ""
|
||||||
|
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
SlowHash: true,
|
SlowHash: true,
|
||||||
@@ -888,7 +952,7 @@ func (f *Fs) run(cmd string) ([]byte, error) {
|
|||||||
|
|
||||||
session, err := c.sshClient.NewSession()
|
session, err := c.sshClient.NewSession()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "run: get SFTP sessiion")
|
return nil, errors.Wrap(err, "run: get SFTP session")
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = session.Close()
|
_ = session.Close()
|
||||||
@@ -1087,7 +1151,7 @@ func shellEscape(str string) string {
|
|||||||
func parseHash(bytes []byte) string {
|
func parseHash(bytes []byte) string {
|
||||||
// For strings with backslash *sum writes a leading \
|
// For strings with backslash *sum writes a leading \
|
||||||
// https://unix.stackexchange.com/q/313733/94054
|
// https://unix.stackexchange.com/q/313733/94054
|
||||||
return strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0] // Split at hash / filename separator
|
return strings.ToLower(strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0]) // Split at hash / filename separator / all convert to lowercase
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parses the byte array output from the SSH session
|
// Parses the byte array output from the SSH session
|
||||||
|
|||||||
@@ -95,7 +95,7 @@ type UploadSpecification struct {
|
|||||||
ChunkURI string `json:"ChunkUri"` // Specifies the URI the client must send the file data to
|
ChunkURI string `json:"ChunkUri"` // Specifies the URI the client must send the file data to
|
||||||
FinishURI string `json:"FinishUri"` // If provided, specifies the final call the client must perform to finish the upload process
|
FinishURI string `json:"FinishUri"` // If provided, specifies the final call the client must perform to finish the upload process
|
||||||
ProgressData string `json:"ProgressData"` // Allows the client to check progress of standard uploads
|
ProgressData string `json:"ProgressData"` // Allows the client to check progress of standard uploads
|
||||||
IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supproted.
|
IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supported.
|
||||||
ResumeIndex int64 `json:"ResumeIndex"` // Specifies the initial index for resuming, if IsResume is true.
|
ResumeIndex int64 `json:"ResumeIndex"` // Specifies the initial index for resuming, if IsResume is true.
|
||||||
ResumeOffset int64 `json:"ResumeOffset"` // Specifies the initial file offset by bytes, if IsResume is true
|
ResumeOffset int64 `json:"ResumeOffset"` // Specifies the initial file offset by bytes, if IsResume is true
|
||||||
ResumeFileHash string `json:"ResumeFileHash"` // Specifies the MD5 hash of the first ResumeOffset bytes of the partial file found at the server
|
ResumeFileHash string `json:"ResumeFileHash"` // Specifies the MD5 hash of the first ResumeOffset bytes of the partial file found at the server
|
||||||
|
|||||||
@@ -1090,7 +1090,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
|||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return nil, errors.Wrap(err, "copy: failed to examine destination dir")
|
return nil, errors.Wrap(err, "copy: failed to examine destination dir")
|
||||||
} else {
|
} else {
|
||||||
// otherwise need to copy via a temporary directlry
|
// otherwise need to copy via a temporary directory
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -350,7 +350,7 @@ func (f *Fs) getAuth(req *http.Request) (err error) {
|
|||||||
// if have auth, check it is in date
|
// if have auth, check it is in date
|
||||||
if f.opt.Authorization == "" || f.opt.User == "" || f.authExpiry.IsZero() || time.Until(f.authExpiry) < expiryLeeway {
|
if f.opt.Authorization == "" || f.opt.User == "" || f.authExpiry.IsZero() || time.Until(f.authExpiry) < expiryLeeway {
|
||||||
// Get the auth token
|
// Get the auth token
|
||||||
f.srv.SetSigner(nil) // temporariliy remove the signer so we don't infinitely recurse
|
f.srv.SetSigner(nil) // temporarily remove the signer so we don't infinitely recurse
|
||||||
err = f.getAuthToken(ctx)
|
err = f.getAuthToken(ctx)
|
||||||
f.srv.SetSigner(f.getAuth) // replace signer
|
f.srv.SetSigner(f.getAuth) // replace signer
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -67,12 +67,12 @@ func init() {
|
|||||||
log.Fatalf("Couldn't create access grant: %v", err)
|
log.Fatalf("Couldn't create access grant: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
serialziedAccess, err := access.Serialize()
|
serializedAccess, err := access.Serialize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Couldn't serialize access grant: %v", err)
|
log.Fatalf("Couldn't serialize access grant: %v", err)
|
||||||
}
|
}
|
||||||
configMapper.Set("satellite_address", satellite)
|
configMapper.Set("satellite_address", satellite)
|
||||||
configMapper.Set("access_grant", serialziedAccess)
|
configMapper.Set("access_grant", serializedAccess)
|
||||||
} else if provider == existingProvider {
|
} else if provider == existingProvider {
|
||||||
config.FileDeleteKey(name, "satellite_address")
|
config.FileDeleteKey(name, "satellite_address")
|
||||||
config.FileDeleteKey(name, "api_key")
|
config.FileDeleteKey(name, "api_key")
|
||||||
|
|||||||
@@ -61,7 +61,7 @@ func (p *EpAll) Action(ctx context.Context, upstreams []*upstream.Fs, path strin
|
|||||||
return p.epall(ctx, upstreams, path)
|
return p.epall(ctx, upstreams, path)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ActionEntries is ACTION category policy but receivng a set of candidate entries
|
// ActionEntries is ACTION category policy but receiving a set of candidate entries
|
||||||
func (p *EpAll) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
func (p *EpAll) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||||
if len(entries) == 0 {
|
if len(entries) == 0 {
|
||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
|||||||
@@ -106,7 +106,7 @@ func (p *EpMfs) Search(ctx context.Context, upstreams []*upstream.Fs, path strin
|
|||||||
return p.mfs(upstreams)
|
return p.mfs(upstreams)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SearchEntries is SEARCH category policy but receivng a set of candidate entries
|
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
|
||||||
func (p *EpMfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
func (p *EpMfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||||
if len(entries) == 0 {
|
if len(entries) == 0 {
|
||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ func init() {
|
|||||||
// FF stands for first found
|
// FF stands for first found
|
||||||
// Search category: same as epff.
|
// Search category: same as epff.
|
||||||
// Action category: same as epff.
|
// Action category: same as epff.
|
||||||
// Create category: Given the order of the candiates, act on the first one found.
|
// Create category: Given the order of the candidates, act on the first one found.
|
||||||
type FF struct {
|
type FF struct {
|
||||||
EpFF
|
EpFF
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -60,7 +60,7 @@ func init() {
|
|||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Upstreams fs.SpaceSepList `config:"upstreams"`
|
Upstreams fs.SpaceSepList `config:"upstreams"`
|
||||||
Remotes fs.SpaceSepList `config:"remotes"` // Depreated
|
Remotes fs.SpaceSepList `config:"remotes"` // Deprecated
|
||||||
ActionPolicy string `config:"action_policy"`
|
ActionPolicy string `config:"action_policy"`
|
||||||
CreatePolicy string `config:"create_policy"`
|
CreatePolicy string `config:"create_policy"`
|
||||||
SearchPolicy string `config:"search_policy"`
|
SearchPolicy string `config:"search_policy"`
|
||||||
@@ -567,7 +567,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
entriess := make([][]upstream.Entry, len(f.upstreams))
|
entriesList := make([][]upstream.Entry, len(f.upstreams))
|
||||||
errs := Errors(make([]error, len(f.upstreams)))
|
errs := Errors(make([]error, len(f.upstreams)))
|
||||||
multithread(len(f.upstreams), func(i int) {
|
multithread(len(f.upstreams), func(i int) {
|
||||||
u := f.upstreams[i]
|
u := f.upstreams[i]
|
||||||
@@ -580,7 +580,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
for j, e := range entries {
|
for j, e := range entries {
|
||||||
uEntries[j], _ = u.WrapEntry(e)
|
uEntries[j], _ = u.WrapEntry(e)
|
||||||
}
|
}
|
||||||
entriess[i] = uEntries
|
entriesList[i] = uEntries
|
||||||
})
|
})
|
||||||
if len(errs) == len(errs.FilterNil()) {
|
if len(errs) == len(errs.FilterNil()) {
|
||||||
errs = errs.Map(func(e error) error {
|
errs = errs.Map(func(e error) error {
|
||||||
@@ -594,7 +594,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
}
|
}
|
||||||
return nil, errs.Err()
|
return nil, errs.Err()
|
||||||
}
|
}
|
||||||
return f.mergeDirEntries(entriess)
|
return f.mergeDirEntries(entriesList)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@@ -614,7 +614,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
// Don't implement this unless you have a more efficient way
|
// Don't implement this unless you have a more efficient way
|
||||||
// of listing recursively that doing a directory traversal.
|
// of listing recursively that doing a directory traversal.
|
||||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||||
var entriess [][]upstream.Entry
|
var entriesList [][]upstream.Entry
|
||||||
errs := Errors(make([]error, len(f.upstreams)))
|
errs := Errors(make([]error, len(f.upstreams)))
|
||||||
var mutex sync.Mutex
|
var mutex sync.Mutex
|
||||||
multithread(len(f.upstreams), func(i int) {
|
multithread(len(f.upstreams), func(i int) {
|
||||||
@@ -626,7 +626,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
uEntries[j], _ = u.WrapEntry(e)
|
uEntries[j], _ = u.WrapEntry(e)
|
||||||
}
|
}
|
||||||
mutex.Lock()
|
mutex.Lock()
|
||||||
entriess = append(entriess, uEntries)
|
entriesList = append(entriesList, uEntries)
|
||||||
mutex.Unlock()
|
mutex.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -653,7 +653,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
}
|
}
|
||||||
return errs.Err()
|
return errs.Err()
|
||||||
}
|
}
|
||||||
entries, err := f.mergeDirEntries(entriess)
|
entries, err := f.mergeDirEntries(entriesList)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -724,9 +724,9 @@ func (f *Fs) searchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
|||||||
return f.searchPolicy.SearchEntries(entries...)
|
return f.searchPolicy.SearchEntries(entries...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) mergeDirEntries(entriess [][]upstream.Entry) (fs.DirEntries, error) {
|
func (f *Fs) mergeDirEntries(entriesList [][]upstream.Entry) (fs.DirEntries, error) {
|
||||||
entryMap := make(map[string]([]upstream.Entry))
|
entryMap := make(map[string]([]upstream.Entry))
|
||||||
for _, en := range entriess {
|
for _, en := range entriesList {
|
||||||
if en == nil {
|
if en == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ type Object struct {
|
|||||||
f *Fs
|
f *Fs
|
||||||
}
|
}
|
||||||
|
|
||||||
// Entry describe a warpped fs.DirEntry interface with the
|
// Entry describe a wrapped fs.DirEntry interface with the
|
||||||
// information of upstream Fs
|
// information of upstream Fs
|
||||||
type Entry interface {
|
type Entry interface {
|
||||||
fs.DirEntry
|
fs.DirEntry
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ import (
|
|||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestWebdavNexcloud:",
|
RemoteName: "TestWebdavNextcloud:",
|
||||||
NilObject: (*webdav.Object)(nil),
|
NilObject: (*webdav.Object)(nil),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ type ResourceInfoRequestOptions struct {
|
|||||||
Fields []string
|
Fields []string
|
||||||
}
|
}
|
||||||
|
|
||||||
//ResourceInfoResponse struct is returned by the API for metedata requests.
|
//ResourceInfoResponse struct is returned by the API for metadata requests.
|
||||||
type ResourceInfoResponse struct {
|
type ResourceInfoResponse struct {
|
||||||
PublicKey string `json:"public_key"`
|
PublicKey string `json:"public_key"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
|
|||||||
@@ -280,7 +280,7 @@ func stripVersion(goarch string) string {
|
|||||||
|
|
||||||
// build the binary in dir returning success or failure
|
// build the binary in dir returning success or failure
|
||||||
func compileArch(version, goos, goarch, dir string) bool {
|
func compileArch(version, goos, goarch, dir string) bool {
|
||||||
log.Printf("Compiling %s/%s", goos, goarch)
|
log.Printf("Compiling %s/%s into %s", goos, goarch, dir)
|
||||||
output := filepath.Join(dir, "rclone")
|
output := filepath.Join(dir, "rclone")
|
||||||
if goos == "windows" {
|
if goos == "windows" {
|
||||||
output += ".exe"
|
output += ".exe"
|
||||||
@@ -298,7 +298,6 @@ func compileArch(version, goos, goarch, dir string) bool {
|
|||||||
"go", "build",
|
"go", "build",
|
||||||
"--ldflags", "-s -X github.com/rclone/rclone/fs.Version=" + version,
|
"--ldflags", "-s -X github.com/rclone/rclone/fs.Version=" + version,
|
||||||
"-trimpath",
|
"-trimpath",
|
||||||
"-i",
|
|
||||||
"-o", output,
|
"-o", output,
|
||||||
"-tags", *tags,
|
"-tags", *tags,
|
||||||
"..",
|
"..",
|
||||||
@@ -325,7 +324,7 @@ func compileArch(version, goos, goarch, dir string) bool {
|
|||||||
artifacts := []string{buildZip(dir)}
|
artifacts := []string{buildZip(dir)}
|
||||||
// build a .deb and .rpm if appropriate
|
// build a .deb and .rpm if appropriate
|
||||||
if goos == "linux" {
|
if goos == "linux" {
|
||||||
artifacts = append(artifacts, buildDebAndRpm(dir, version, goarch)...)
|
artifacts = append(artifacts, buildDebAndRpm(dir, version, stripVersion(goarch))...)
|
||||||
}
|
}
|
||||||
if *copyAs != "" {
|
if *copyAs != "" {
|
||||||
for _, artifact := range artifacts {
|
for _, artifact := range artifacts {
|
||||||
|
|||||||
@@ -141,7 +141,7 @@ def main():
|
|||||||
for name in sorted(bugfixes.keys()):
|
for name in sorted(bugfixes.keys()):
|
||||||
out(name)
|
out(name)
|
||||||
|
|
||||||
# Read old changlog and split
|
# Read old changelog and split
|
||||||
with open("docs/content/changelog.md") as fd:
|
with open("docs/content/changelog.md") as fd:
|
||||||
old_changelog = fd.read()
|
old_changelog = fd.read()
|
||||||
heading = "# Changelog"
|
heading = "# Changelog"
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ var (
|
|||||||
// Flags
|
// Flags
|
||||||
numberOfFiles = flag.Int("n", 1000, "Number of files to create")
|
numberOfFiles = flag.Int("n", 1000, "Number of files to create")
|
||||||
averageFilesPerDirectory = flag.Int("files-per-directory", 10, "Average number of files per directory")
|
averageFilesPerDirectory = flag.Int("files-per-directory", 10, "Average number of files per directory")
|
||||||
maxDepth = flag.Int("max-depth", 10, "Maximum depth of directory heirachy")
|
maxDepth = flag.Int("max-depth", 10, "Maximum depth of directory hierarchy")
|
||||||
minFileSize = flag.Int64("min-size", 0, "Minimum size of file to create")
|
minFileSize = flag.Int64("min-size", 0, "Minimum size of file to create")
|
||||||
maxFileSize = flag.Int64("max-size", 100, "Maximum size of files to create")
|
maxFileSize = flag.Int64("max-size", 100, "Maximum size of files to create")
|
||||||
minFileNameLength = flag.Int("min-name-length", 4, "Minimum size of file to create")
|
minFileNameLength = flag.Int("min-name-length", 4, "Minimum size of file to create")
|
||||||
@@ -61,7 +61,7 @@ func fileName() (name string) {
|
|||||||
return name
|
return name
|
||||||
}
|
}
|
||||||
|
|
||||||
// dir is a directory in the directory heirachy being built up
|
// dir is a directory in the directory hierarchy being built up
|
||||||
type dir struct {
|
type dir struct {
|
||||||
name string
|
name string
|
||||||
depth int
|
depth int
|
||||||
@@ -69,7 +69,7 @@ type dir struct {
|
|||||||
parent *dir
|
parent *dir
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a random directory heirachy under d
|
// Create a random directory hierarchy under d
|
||||||
func (d *dir) createDirectories() {
|
func (d *dir) createDirectories() {
|
||||||
for totalDirectories < directoriesToCreate {
|
for totalDirectories < directoriesToCreate {
|
||||||
newDir := &dir{
|
newDir := &dir{
|
||||||
@@ -91,7 +91,7 @@ func (d *dir) createDirectories() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// list the directory heirachy
|
// list the directory hierarchy
|
||||||
func (d *dir) list(path string, output []string) []string {
|
func (d *dir) list(path string, output []string) []string {
|
||||||
dirPath := filepath.Join(path, d.name)
|
dirPath := filepath.Join(path, d.name)
|
||||||
output = append(output, dirPath)
|
output = append(output, dirPath)
|
||||||
|
|||||||
@@ -10,6 +10,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/coreos/go-semver/semver"
|
||||||
)
|
)
|
||||||
|
|
||||||
// version=$(sed <VERSION -e 's/\.[0-9]+*$//g')
|
// version=$(sed <VERSION -e 's/\.[0-9]+*$//g')
|
||||||
@@ -28,7 +30,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
|||||||
cmd := exec.Command("git", "log", "--oneline", from+".."+to)
|
cmd := exec.Command("git", "log", "--oneline", from+".."+to)
|
||||||
out, err := cmd.Output()
|
out, err := cmd.Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("failed to run git log: %v", err)
|
log.Fatalf("failed to run git log %s: %v", from+".."+to, err)
|
||||||
}
|
}
|
||||||
logMap = map[string]string{}
|
logMap = map[string]string{}
|
||||||
logs = []string{}
|
logs = []string{}
|
||||||
@@ -53,15 +55,20 @@ func main() {
|
|||||||
if len(args) != 0 {
|
if len(args) != 0 {
|
||||||
log.Fatalf("Syntax: %s", os.Args[0])
|
log.Fatalf("Syntax: %s", os.Args[0])
|
||||||
}
|
}
|
||||||
|
// v1.54.0
|
||||||
versionBytes, err := ioutil.ReadFile("VERSION")
|
versionBytes, err := ioutil.ReadFile("VERSION")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to read version: %v", err)
|
log.Fatalf("Failed to read version: %v", err)
|
||||||
}
|
}
|
||||||
i := bytes.LastIndexByte(versionBytes, '.')
|
if versionBytes[0] == 'v' {
|
||||||
version := string(versionBytes[:i])
|
versionBytes = versionBytes[1:]
|
||||||
log.Printf("Finding commits not in stable %s", version)
|
}
|
||||||
masterMap, masterLogs := readCommits(version+".0", "master")
|
versionBytes = bytes.TrimSpace(versionBytes)
|
||||||
stableMap, _ := readCommits(version+".0", version+"-stable")
|
semver := semver.New(string(versionBytes))
|
||||||
|
stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1)
|
||||||
|
log.Printf("Finding commits in %v not in stable %s", semver, stable)
|
||||||
|
masterMap, masterLogs := readCommits(stable+".0", "master")
|
||||||
|
stableMap, _ := readCommits(stable+".0", stable+"-stable")
|
||||||
for _, logMessage := range masterLogs {
|
for _, logMessage := range masterLogs {
|
||||||
// Commit found in stable already
|
// Commit found in stable already
|
||||||
if _, found := stableMap[logMessage]; found {
|
if _, found := stableMap[logMessage]; found {
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ var (
|
|||||||
func init() {
|
func init() {
|
||||||
cmd.Root.AddCommand(commandDefinition)
|
cmd.Root.AddCommand(commandDefinition)
|
||||||
cmdFlags := commandDefinition.Flags()
|
cmdFlags := commandDefinition.Flags()
|
||||||
|
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash.")
|
||||||
AddFlags(cmdFlags)
|
AddFlags(cmdFlags)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -50,7 +51,7 @@ the source match the files in the destination, not the other way
|
|||||||
around. This means that extra files in the destination that are not in
|
around. This means that extra files in the destination that are not in
|
||||||
the source will not be detected.
|
the source will not be detected.
|
||||||
|
|
||||||
The |--differ|, |--missing-on-dst|, |--missing-on-src|, |--src-only|
|
The |--differ|, |--missing-on-dst|, |--missing-on-src|, |--match|
|
||||||
and |--error| flags write paths, one per line, to the file name (or
|
and |--error| flags write paths, one per line, to the file name (or
|
||||||
stdout if it is |-|) supplied. What they write is described in the
|
stdout if it is |-|) supplied. What they write is described in the
|
||||||
help below. For example |--differ| will write all paths which are
|
help below. For example |--differ| will write all paths which are
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ func init() {
|
|||||||
|
|
||||||
var commandDefinition = &cobra.Command{
|
var commandDefinition = &cobra.Command{
|
||||||
Use: "cleanup remote:path",
|
Use: "cleanup remote:path",
|
||||||
Short: `Clean up the remote if possible`,
|
Short: `Clean up the remote if possible.`,
|
||||||
Long: `
|
Long: `
|
||||||
Clean up the remote if possible. Empty the trash or delete old file
|
Clean up the remote if possible. Empty the trash or delete old file
|
||||||
versions. Not supported by all remotes.
|
versions. Not supported by all remotes.
|
||||||
|
|||||||
24
cmd/cmd.go
24
cmd/cmd.go
@@ -1,4 +1,4 @@
|
|||||||
// Package cmd implemnts the rclone command
|
// Package cmd implements the rclone command
|
||||||
//
|
//
|
||||||
// It is in a sub package so it's internals can be re-used elsewhere
|
// It is in a sub package so it's internals can be re-used elsewhere
|
||||||
package cmd
|
package cmd
|
||||||
@@ -21,6 +21,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
systemd "github.com/iguanesolutions/go-systemd/v5"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
@@ -35,6 +36,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||||
"github.com/rclone/rclone/fs/rc/rcserver"
|
"github.com/rclone/rclone/fs/rc/rcserver"
|
||||||
"github.com/rclone/rclone/lib/atexit"
|
"github.com/rclone/rclone/lib/atexit"
|
||||||
|
"github.com/rclone/rclone/lib/terminal"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
@@ -288,6 +290,11 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
|||||||
}
|
}
|
||||||
fs.Debugf(nil, "%d go routines active\n", runtime.NumGoroutine())
|
fs.Debugf(nil, "%d go routines active\n", runtime.NumGoroutine())
|
||||||
|
|
||||||
|
if fs.Config.Progress && fs.Config.ProgressTerminalTitle {
|
||||||
|
// Clear terminal title
|
||||||
|
terminal.WriteTerminalTitle("")
|
||||||
|
}
|
||||||
|
|
||||||
// dump all running go-routines
|
// dump all running go-routines
|
||||||
if fs.Config.Dump&fs.DumpGoRoutines != 0 {
|
if fs.Config.Dump&fs.DumpGoRoutines != 0 {
|
||||||
err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
||||||
@@ -364,6 +371,12 @@ func StartStats() func() {
|
|||||||
|
|
||||||
// initConfig is run by cobra after initialising the flags
|
// initConfig is run by cobra after initialising the flags
|
||||||
func initConfig() {
|
func initConfig() {
|
||||||
|
// Activate logger systemd support if systemd invocation ID is detected
|
||||||
|
_, sysdLaunch := systemd.GetInvocationID()
|
||||||
|
if sysdLaunch {
|
||||||
|
fs.Config.LogSystemdSupport = true // used during fslog.InitLogging()
|
||||||
|
}
|
||||||
|
|
||||||
// Start the logger
|
// Start the logger
|
||||||
fslog.InitLogging()
|
fslog.InitLogging()
|
||||||
|
|
||||||
@@ -379,6 +392,13 @@ func initConfig() {
|
|||||||
// Write the args for debug purposes
|
// Write the args for debug purposes
|
||||||
fs.Debugf("rclone", "Version %q starting with parameters %q", fs.Version, os.Args)
|
fs.Debugf("rclone", "Version %q starting with parameters %q", fs.Version, os.Args)
|
||||||
|
|
||||||
|
// Inform user about systemd log support now that we have a logger
|
||||||
|
if sysdLaunch {
|
||||||
|
fs.Debugf("rclone", "systemd logging support automatically activated")
|
||||||
|
} else if fs.Config.LogSystemdSupport {
|
||||||
|
fs.Debugf("rclone", "systemd logging support manually activated")
|
||||||
|
}
|
||||||
|
|
||||||
// Start the remote control server if configured
|
// Start the remote control server if configured
|
||||||
_, err = rcserver.Start(&rcflags.Opt)
|
_, err = rcserver.Start(&rcflags.Opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -493,7 +513,7 @@ func AddBackendFlags() {
|
|||||||
if opt.IsPassword {
|
if opt.IsPassword {
|
||||||
help += " (obscured)"
|
help += " (obscured)"
|
||||||
}
|
}
|
||||||
flag := pflag.CommandLine.VarPF(opt, name, opt.ShortOpt, help)
|
flag := flags.VarPF(pflag.CommandLine, opt, name, opt.ShortOpt, help)
|
||||||
if _, isBool := opt.Default.(bool); isBool {
|
if _, isBool := opt.Default.(bool); isBool {
|
||||||
flag.NoOptDefVal = "true"
|
flag.NoOptDefVal = "true"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -270,7 +270,7 @@ func (fsys *FS) Releasedir(path string, fh uint64) (errc int) {
|
|||||||
return fsys.closeHandle(fh)
|
return fsys.closeHandle(fh)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Statfs reads overall stats on the filessystem
|
// Statfs reads overall stats on the filesystem
|
||||||
func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
|
func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
|
||||||
defer log.Trace(path, "")("stat=%+v, errc=%d", stat, &errc)
|
defer log.Trace(path, "")("stat=%+v, errc=%d", stat, &errc)
|
||||||
const blockSize = 4096
|
const blockSize = 4096
|
||||||
|
|||||||
@@ -111,7 +111,7 @@ whether the password is already obscured or not and put unobscured
|
|||||||
passwords into the config file. If you want to be 100% certain that
|
passwords into the config file. If you want to be 100% certain that
|
||||||
the passwords get obscured then use the "--obscure" flag, or if you
|
the passwords get obscured then use the "--obscure" flag, or if you
|
||||||
are 100% certain you are already passing obscured passwords then use
|
are 100% certain you are already passing obscured passwords then use
|
||||||
"--no-obscure". You can also set osbscured passwords using the
|
"--no-obscure". You can also set obscured passwords using the
|
||||||
"rclone config password" command.
|
"rclone config password" command.
|
||||||
`
|
`
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ func init() {
|
|||||||
|
|
||||||
var commandDefinition = &cobra.Command{
|
var commandDefinition = &cobra.Command{
|
||||||
Use: "copy source:path dest:path",
|
Use: "copy source:path dest:path",
|
||||||
Short: `Copy files from source to dest, skipping already copied`,
|
Short: `Copy files from source to dest, skipping already copied.`,
|
||||||
Long: `
|
Long: `
|
||||||
Copy the source to the destination. Doesn't transfer
|
Copy the source to the destination. Doesn't transfer
|
||||||
unchanged files, testing by size and modification time or
|
unchanged files, testing by size and modification time or
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ func init() {
|
|||||||
|
|
||||||
var commandDefinition = &cobra.Command{
|
var commandDefinition = &cobra.Command{
|
||||||
Use: "copyto source:path dest:path",
|
Use: "copyto source:path dest:path",
|
||||||
Short: `Copy files from source to dest, skipping already copied`,
|
Short: `Copy files from source to dest, skipping already copied.`,
|
||||||
Long: `
|
Long: `
|
||||||
If source:path is a file or directory then it copies it to a file or
|
If source:path is a file or directory then it copies it to a file or
|
||||||
directory named dest:path.
|
directory named dest:path.
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ This means that for most duplicated files the ` + "`dedupe`" + `
|
|||||||
command will not be interactive.
|
command will not be interactive.
|
||||||
|
|
||||||
` + "`dedupe`" + ` considers files to be identical if they have the
|
` + "`dedupe`" + ` considers files to be identical if they have the
|
||||||
same hash. If the backend does not support hashes (eg crypt wrapping
|
same file path and the same hash. If the backend does not support hashes (eg crypt wrapping
|
||||||
Google Drive) then they will never be found to be identical. If you
|
Google Drive) then they will never be found to be identical. If you
|
||||||
use the ` + "`--size-only`" + ` flag then files will be considered
|
use the ` + "`--size-only`" + ` flag then files will be considered
|
||||||
identical if they have the same size (any hash will be ignored). This
|
identical if they have the same size (any hash will be ignored). This
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ func init() {
|
|||||||
flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", true, "Check control characters.")
|
flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", true, "Check control characters.")
|
||||||
flags.DurationVarP(cmdFlags, &uploadWait, "upload-wait", "", 0, "Wait after writing a file.")
|
flags.DurationVarP(cmdFlags, &uploadWait, "upload-wait", "", 0, "Wait after writing a file.")
|
||||||
flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", true, "Check max filename length.")
|
flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", true, "Check max filename length.")
|
||||||
flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", true, "Check uploadxs with indeterminate file size.")
|
flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", true, "Check uploads with indeterminate file size.")
|
||||||
}
|
}
|
||||||
|
|
||||||
var commandDefinition = &cobra.Command{
|
var commandDefinition = &cobra.Command{
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ func init() {
|
|||||||
|
|
||||||
var commandDefinition = &cobra.Command{
|
var commandDefinition = &cobra.Command{
|
||||||
Use: "lsf remote:path",
|
Use: "lsf remote:path",
|
||||||
Short: `List directories and objects in remote:path formatted for parsing`,
|
Short: `List directories and objects in remote:path formatted for parsing.`,
|
||||||
Long: `
|
Long: `
|
||||||
List the contents of the source path (directories and objects) to
|
List the contents of the source path (directories and objects) to
|
||||||
standard output in a form which is easy to parse by scripts. By
|
standard output in a form which is easy to parse by scripts. By
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ func newFileHandle(h vfs.Handle, fsys *FS) *FileHandle {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check interface satistfied
|
// Check interface satisfied
|
||||||
var _ fusefs.FileHandle = (*FileHandle)(nil)
|
var _ fusefs.FileHandle = (*FileHandle)(nil)
|
||||||
|
|
||||||
// The String method is for debug printing.
|
// The String method is for debug printing.
|
||||||
|
|||||||
@@ -67,8 +67,8 @@ func setAttr(node vfs.Node, attr *fuse.Attr) {
|
|||||||
modTime := node.ModTime()
|
modTime := node.ModTime()
|
||||||
// set attributes
|
// set attributes
|
||||||
vfs := node.VFS()
|
vfs := node.VFS()
|
||||||
attr.Owner.Gid = vfs.Opt.UID
|
attr.Owner.Gid = vfs.Opt.GID
|
||||||
attr.Owner.Uid = vfs.Opt.GID
|
attr.Owner.Uid = vfs.Opt.UID
|
||||||
attr.Mode = getMode(node)
|
attr.Mode = getMode(node)
|
||||||
attr.Size = Size
|
attr.Size = Size
|
||||||
attr.Nlink = 1
|
attr.Nlink = 1
|
||||||
|
|||||||
@@ -258,7 +258,7 @@ var _ fusefs.DirStream = (*dirStream)(nil)
|
|||||||
|
|
||||||
// Readdir opens a stream of directory entries.
|
// Readdir opens a stream of directory entries.
|
||||||
//
|
//
|
||||||
// Readdir essentiallly returns a list of strings, and it is allowed
|
// Readdir essentially returns a list of strings, and it is allowed
|
||||||
// for Readdir to return different results from Lookup. For example,
|
// for Readdir to return different results from Lookup. For example,
|
||||||
// you can return nothing for Readdir ("ls my-fuse-mount" is empty),
|
// you can return nothing for Readdir ("ls my-fuse-mount" is empty),
|
||||||
// while still implementing Lookup ("ls my-fuse-mount/a-specific-file"
|
// while still implementing Lookup ("ls my-fuse-mount/a-specific-file"
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/okzk/sdnotify"
|
sysdnotify "github.com/iguanesolutions/go-systemd/v5/notify"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/cmd"
|
"github.com/rclone/rclone/cmd"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
@@ -162,7 +162,7 @@ FUSE.
|
|||||||
First set up your remote using ` + "`rclone config`" + `. Check it works with ` + "`rclone ls`" + ` etc.
|
First set up your remote using ` + "`rclone config`" + `. Check it works with ` + "`rclone ls`" + ` etc.
|
||||||
|
|
||||||
You can either run mount in foreground mode or background (daemon) mode. Mount runs in
|
You can either run mount in foreground mode or background (daemon) mode. Mount runs in
|
||||||
foreground mode by default, use the --daemon flag to specify background mode mode.
|
foreground mode by default, use the --daemon flag to specify background mode.
|
||||||
Background mode is only supported on Linux and OSX, you can only run mount in
|
Background mode is only supported on Linux and OSX, you can only run mount in
|
||||||
foreground mode on Windows.
|
foreground mode on Windows.
|
||||||
|
|
||||||
@@ -192,6 +192,9 @@ Stopping the mount manually:
|
|||||||
# OS X
|
# OS X
|
||||||
umount /path/to/local/mount
|
umount /path/to/local/mount
|
||||||
|
|
||||||
|
**Note**: As of ` + "`rclone` 1.52.2, `rclone mount`" + ` now requires Go version 1.13
|
||||||
|
or newer on some platforms depending on the underlying FUSE library in use.
|
||||||
|
|
||||||
### Installing on Windows
|
### Installing on Windows
|
||||||
|
|
||||||
To run rclone ` + commandName + ` on Windows, you will need to
|
To run rclone ` + commandName + ` on Windows, you will need to
|
||||||
@@ -333,9 +336,6 @@ With --vfs-read-chunk-size 100M and --vfs-read-chunk-size-limit 0 the following
|
|||||||
parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
|
parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
|
||||||
When --vfs-read-chunk-size-limit 500M is specified, the result would be
|
When --vfs-read-chunk-size-limit 500M is specified, the result would be
|
||||||
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
|
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
|
||||||
|
|
||||||
Chunked reading will only work with --vfs-cache-mode < full, as the file will always
|
|
||||||
be copied to the vfs cache before opening with --vfs-cache-mode full.
|
|
||||||
` + vfs.Help,
|
` + vfs.Help,
|
||||||
Run: func(command *cobra.Command, args []string) {
|
Run: func(command *cobra.Command, args []string) {
|
||||||
cmd.CheckArgs(2, 2, command, args)
|
cmd.CheckArgs(2, 2, command, args)
|
||||||
@@ -448,13 +448,13 @@ func Mount(VFS *vfs.VFS, mountpoint string, mount MountFn, opt *Options) error {
|
|||||||
|
|
||||||
// Unmount on exit
|
// Unmount on exit
|
||||||
fnHandle := atexit.Register(func() {
|
fnHandle := atexit.Register(func() {
|
||||||
|
_ = sysdnotify.Stopping()
|
||||||
_ = unmount()
|
_ = unmount()
|
||||||
_ = sdnotify.Stopping()
|
|
||||||
})
|
})
|
||||||
defer atexit.Unregister(fnHandle)
|
defer atexit.Unregister(fnHandle)
|
||||||
|
|
||||||
// Notify systemd
|
// Notify systemd
|
||||||
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
|
if err := sysdnotify.Ready(); err != nil {
|
||||||
return errors.Wrap(err, "failed to notify systemd")
|
return errors.Wrap(err, "failed to notify systemd")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -479,8 +479,8 @@ waitloop:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_ = sysdnotify.Stopping()
|
||||||
_ = unmount()
|
_ = unmount()
|
||||||
_ = sdnotify.Stopping()
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to umount FUSE fs")
|
return errors.Wrap(err, "failed to umount FUSE fs")
|
||||||
|
|||||||
@@ -75,7 +75,7 @@ func helpText() (tr []string) {
|
|||||||
" d delete file/directory",
|
" d delete file/directory",
|
||||||
}
|
}
|
||||||
if !clipboard.Unsupported {
|
if !clipboard.Unsupported {
|
||||||
tr = append(tr, " y copy current path to clipbard")
|
tr = append(tr, " y copy current path to clipboard")
|
||||||
}
|
}
|
||||||
tr = append(tr, []string{
|
tr = append(tr, []string{
|
||||||
" Y display current path",
|
" Y display current path",
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ func init() {
|
|||||||
|
|
||||||
var commandDefinition = &cobra.Command{
|
var commandDefinition = &cobra.Command{
|
||||||
Use: "obscure password",
|
Use: "obscure password",
|
||||||
Short: `Obscure password for use in the rclone config file`,
|
Short: `Obscure password for use in the rclone config file.`,
|
||||||
Long: `In the rclone config file, human readable passwords are
|
Long: `In the rclone config file, human readable passwords are
|
||||||
obscured. Obscuring them is done by encrypting them and writing them
|
obscured. Obscuring them is done by encrypting them and writing them
|
||||||
out in base64. This is **not** a secure way of encrypting these
|
out in base64. This is **not** a secure way of encrypting these
|
||||||
|
|||||||
@@ -208,7 +208,7 @@ func TestGET(t *testing.T) {
|
|||||||
body, err := ioutil.ReadAll(resp.Body)
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Check we got a Last-Modifed header and that it is a valid date
|
// Check we got a Last-Modified header and that it is a valid date
|
||||||
if test.Status == http.StatusOK || test.Status == http.StatusPartialContent {
|
if test.Status == http.StatusOK || test.Status == http.StatusPartialContent {
|
||||||
lastModified := resp.Header.Get("Last-Modified")
|
lastModified := resp.Header.Get("Last-Modified")
|
||||||
assert.NotEqual(t, "", lastModified, test.Golden)
|
assert.NotEqual(t, "", lastModified, test.Golden)
|
||||||
|
|||||||
@@ -61,7 +61,7 @@ to be used within the template to server pages:
|
|||||||
| .Name | The full path of a file/directory. |
|
| .Name | The full path of a file/directory. |
|
||||||
| .Title | Directory listing of .Name |
|
| .Title | Directory listing of .Name |
|
||||||
| .Sort | The current sort used. This is changeable via ?sort= parameter |
|
| .Sort | The current sort used. This is changeable via ?sort= parameter |
|
||||||
| | Sort Options: namedirfist,name,size,time (default namedirfirst) |
|
| | Sort Options: namedirfirst,name,size,time (default namedirfirst) |
|
||||||
| .Order | The current ordering used. This is changeable via ?order= parameter |
|
| .Order | The current ordering used. This is changeable via ?order= parameter |
|
||||||
| | Order Options: asc,desc (default asc) |
|
| | Order Options: asc,desc (default asc) |
|
||||||
| .Query | Currently unused. |
|
| .Query | Currently unused. |
|
||||||
|
|||||||
@@ -132,7 +132,7 @@ func Error(what interface{}, w http.ResponseWriter, text string, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProcessQueryParams takes and sorts/orders based on the request sort/order parameters and default is namedirfist/asc
|
// ProcessQueryParams takes and sorts/orders based on the request sort/order parameters and default is namedirfirst/asc
|
||||||
func (d *Directory) ProcessQueryParams(sortParm string, orderParm string) *Directory {
|
func (d *Directory) ProcessQueryParams(sortParm string, orderParm string) *Directory {
|
||||||
d.Sort = sortParm
|
d.Sort = sortParm
|
||||||
d.Order = orderParm
|
d.Order = orderParm
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ var Help = strings.Replace(`
|
|||||||
If you supply the parameter |--auth-proxy /path/to/program| then
|
If you supply the parameter |--auth-proxy /path/to/program| then
|
||||||
rclone will use that program to generate backends on the fly which
|
rclone will use that program to generate backends on the fly which
|
||||||
then are used to authenticate incoming requests. This uses a simple
|
then are used to authenticate incoming requests. This uses a simple
|
||||||
JSON based protocl with input on STDIN and output on STDOUT.
|
JSON based protocol with input on STDIN and output on STDOUT.
|
||||||
|
|
||||||
**PLEASE NOTE:** |--auth-proxy| and |--authorized-keys| cannot be used
|
**PLEASE NOTE:** |--auth-proxy| and |--authorized-keys| cannot be used
|
||||||
together, if |--auth-proxy| is set the authorized keys option will be
|
together, if |--auth-proxy| is set the authorized keys option will be
|
||||||
|
|||||||
@@ -64,7 +64,7 @@ func TestTouchWithTimestamp(t *testing.T) {
|
|||||||
checkFile(t, r.Fremote, srcFileName, "")
|
checkFile(t, r.Fremote, srcFileName, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTouchWithLognerTimestamp(t *testing.T) {
|
func TestTouchWithLongerTimestamp(t *testing.T) {
|
||||||
r := fstest.NewRun(t)
|
r := fstest.NewRun(t)
|
||||||
defer r.Finalise()
|
defer r.Finalise()
|
||||||
|
|
||||||
|
|||||||
@@ -148,6 +148,7 @@ WebDAV or S3, that work out of the box.)
|
|||||||
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
||||||
{{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}
|
{{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}
|
||||||
{{< provider name="Tardigrade" home="https://tardigrade.io/" config="/tardigrade/" >}}
|
{{< provider name="Tardigrade" home="https://tardigrade.io/" config="/tardigrade/" >}}
|
||||||
|
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
|
||||||
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
|
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
|
||||||
{{< provider name="WebDAV" home="https://en.wikipedia.org/wiki/WebDAV" config="/webdav/" >}}
|
{{< provider name="WebDAV" home="https://en.wikipedia.org/wiki/WebDAV" config="/webdav/" >}}
|
||||||
{{< provider name="Yandex Disk" home="https://disk.yandex.com/" config="/yandex/" >}}
|
{{< provider name="Yandex Disk" home="https://disk.yandex.com/" config="/yandex/" >}}
|
||||||
|
|||||||
@@ -409,3 +409,15 @@ put them back in again.` >}}
|
|||||||
* Lucas Kanashiro <lucas.kanashiro@canonical.com>
|
* Lucas Kanashiro <lucas.kanashiro@canonical.com>
|
||||||
* WarpedPixel <WarpedPixel@users.noreply.github.com>
|
* WarpedPixel <WarpedPixel@users.noreply.github.com>
|
||||||
* Sam Edwards <sam@samedwards.ca>
|
* Sam Edwards <sam@samedwards.ca>
|
||||||
|
* wjielai <gouki0123@gmail.com>
|
||||||
|
* Muffin King <jinxz_k@live.com>
|
||||||
|
* Christopher Stewart <6573710+1f47a@users.noreply.github.com>
|
||||||
|
* Russell Cattelan <cattelan@digitalelves.com>
|
||||||
|
* gyutw <30371241+gyutw@users.noreply.github.com>
|
||||||
|
* Hekmon <edouardhur@gmail.com>
|
||||||
|
* LaSombra <lasombra@users.noreply.github.com>
|
||||||
|
* Dov Murik <dov.murik@gmail.com>
|
||||||
|
* Ameer Dawood <ameer1234567890@gmail.com>
|
||||||
|
* Dan Hipschman <dan.hipschman@opendoor.com>
|
||||||
|
* Josh Soref <jsoref@users.noreply.github.com>
|
||||||
|
* David <david@staron.nl>
|
||||||
|
|||||||
@@ -404,6 +404,7 @@ Note that Box is case insensitive so you can't have a file called
|
|||||||
"Hello.doc" and one called "hello.doc".
|
"Hello.doc" and one called "hello.doc".
|
||||||
|
|
||||||
Box file names can't have the `\` character in. rclone maps this to
|
Box file names can't have the `\` character in. rclone maps this to
|
||||||
and from an identical looking unicode equivalent `\`.
|
and from an identical looking unicode equivalent `\` (U+FF3C Fullwidth
|
||||||
|
Reverse Solidus).
|
||||||
|
|
||||||
Box only supports filenames up to 255 characters in length.
|
Box only supports filenames up to 255 characters in length.
|
||||||
|
|||||||
@@ -5,6 +5,36 @@ description: "Rclone Changelog"
|
|||||||
|
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## v1.53.1 - 2020-09-13
|
||||||
|
|
||||||
|
[See commits](https://github.com/rclone/rclone/compare/v1.53.0...v1.53.1)
|
||||||
|
|
||||||
|
* Bug Fixes
|
||||||
|
* accounting: Remove new line from end of --stats-one-line display (Nick Craig-Wood)
|
||||||
|
* check
|
||||||
|
* Add back missing --download flag (Nick Craig-Wood)
|
||||||
|
* Fix docs (Nick Craig-Wood)
|
||||||
|
* docs
|
||||||
|
* Note --log-file does append (Nick Craig-Wood)
|
||||||
|
* Add full stops for consistency in rclone --help (edwardxml)
|
||||||
|
* Add Tencent COS to s3 provider list (wjielai)
|
||||||
|
* Updated mount command to reflect that it requires Go 1.13 or newer (Evan Harris)
|
||||||
|
* jottacloud: Mention that uploads from local disk will not need to cache files to disk for md5 calculation (albertony)
|
||||||
|
* Fix formatting of rc docs page (Nick Craig-Wood)
|
||||||
|
* build
|
||||||
|
* Include vendor tar ball in release and fix startdev (Nick Craig-Wood)
|
||||||
|
* Fix "Illegal instruction" error for ARMv6 builds (Nick Craig-Wood)
|
||||||
|
* Fix architecture name in ARMv7 build (Nick Craig-Wood)
|
||||||
|
* VFS
|
||||||
|
* Fix spurious error "vfs cache: failed to _ensure cache EOF" (Nick Craig-Wood)
|
||||||
|
* Log an ERROR if we fail to set the file to be sparse (Nick Craig-Wood)
|
||||||
|
* Local
|
||||||
|
* Log an ERROR if we fail to set the file to be sparse (Nick Craig-Wood)
|
||||||
|
* Drive
|
||||||
|
* Re-adds special oauth help text (Tim Gallant)
|
||||||
|
* Opendrive
|
||||||
|
* Do not retry 400 errors (Evan Harris)
|
||||||
|
|
||||||
## v1.53.0 - 2020-09-02
|
## v1.53.0 - 2020-09-02
|
||||||
|
|
||||||
[See commits](https://github.com/rclone/rclone/compare/v1.52.0...v1.53.0)
|
[See commits](https://github.com/rclone/rclone/compare/v1.52.0...v1.53.0)
|
||||||
@@ -33,7 +63,7 @@ description: "Rclone Changelog"
|
|||||||
* Add reverse proxy pluginsHandler for serving plugins (Chaitanya Bankanhal)
|
* Add reverse proxy pluginsHandler for serving plugins (Chaitanya Bankanhal)
|
||||||
* Add `mount/listmounts` option for listing current mounts (Chaitanya Bankanhal)
|
* Add `mount/listmounts` option for listing current mounts (Chaitanya Bankanhal)
|
||||||
* Add `operations/uploadfile` to upload a file through rc using encoding multipart/form-data (Chaitanya Bankanhal)
|
* Add `operations/uploadfile` to upload a file through rc using encoding multipart/form-data (Chaitanya Bankanhal)
|
||||||
* Add `core/copmmand` to execute rclone terminal commands. (Chaitanya Bankanhal)
|
* Add `core/command` to execute rclone terminal commands. (Chaitanya Bankanhal)
|
||||||
* `rclone check`
|
* `rclone check`
|
||||||
* Add reporting of filenames for same/missing/changed (Nick Craig-Wood)
|
* Add reporting of filenames for same/missing/changed (Nick Craig-Wood)
|
||||||
* Make check command obey `--dry-run`/`-i`/`--interactive` (Nick Craig-Wood)
|
* Make check command obey `--dry-run`/`-i`/`--interactive` (Nick Craig-Wood)
|
||||||
@@ -142,7 +172,7 @@ description: "Rclone Changelog"
|
|||||||
* Google Cloud Storage
|
* Google Cloud Storage
|
||||||
* Add support for anonymous access (Kai Lüke)
|
* Add support for anonymous access (Kai Lüke)
|
||||||
* Jottacloud
|
* Jottacloud
|
||||||
* Bring back legacy authentification for use with whitelabel versions (buengese)
|
* Bring back legacy authentication for use with whitelabel versions (buengese)
|
||||||
* Switch to new api root - also implement a very ugly workaround for the DirMove failures (buengese)
|
* Switch to new api root - also implement a very ugly workaround for the DirMove failures (buengese)
|
||||||
* Onedrive
|
* Onedrive
|
||||||
* Rework cancel of multipart uploads on rclone exit (Nick Craig-Wood)
|
* Rework cancel of multipart uploads on rclone exit (Nick Craig-Wood)
|
||||||
@@ -292,7 +322,7 @@ all the docs and Edward Barker for helping re-write the front page.
|
|||||||
* Add `--header` flag to add HTTP headers to every HTTP transaction (Nick Craig-Wood)
|
* Add `--header` flag to add HTTP headers to every HTTP transaction (Nick Craig-Wood)
|
||||||
* Add `--check-first` to do all checking before starting transfers (Nick Craig-Wood)
|
* Add `--check-first` to do all checking before starting transfers (Nick Craig-Wood)
|
||||||
* Add `--track-renames-strategy` for configurable matching criteria for `--track-renames` (Bernd Schoolmann)
|
* Add `--track-renames-strategy` for configurable matching criteria for `--track-renames` (Bernd Schoolmann)
|
||||||
* Add `--cutoff-mode` hard,soft,catious (Shing Kit Chan & Franklyn Tackitt)
|
* Add `--cutoff-mode` hard,soft,cautious (Shing Kit Chan & Franklyn Tackitt)
|
||||||
* Filter flags (eg `--files-from -`) can read from stdin (fishbullet)
|
* Filter flags (eg `--files-from -`) can read from stdin (fishbullet)
|
||||||
* Add `--error-on-no-transfer` option (Jon Fautley)
|
* Add `--error-on-no-transfer` option (Jon Fautley)
|
||||||
* Implement `--order-by xxx,mixed` for copying some small and some big files (Nick Craig-Wood)
|
* Implement `--order-by xxx,mixed` for copying some small and some big files (Nick Craig-Wood)
|
||||||
@@ -575,7 +605,7 @@ all the docs and Edward Barker for helping re-write the front page.
|
|||||||
* dbhashsum: Stop it returning UNSUPPORTED on dropbox (Nick Craig-Wood)
|
* dbhashsum: Stop it returning UNSUPPORTED on dropbox (Nick Craig-Wood)
|
||||||
* dedupe: Add missing modes to help string (Nick Craig-Wood)
|
* dedupe: Add missing modes to help string (Nick Craig-Wood)
|
||||||
* operations
|
* operations
|
||||||
* Fix dedupe continuing on errors like insufficientFilePermisson (SezalAgrawal)
|
* Fix dedupe continuing on errors like insufficientFilePersimmon (SezalAgrawal)
|
||||||
* Clear accounting before low level retry (Maciej Zimnoch)
|
* Clear accounting before low level retry (Maciej Zimnoch)
|
||||||
* Write debug message when hashes could not be checked (Ole Schütt)
|
* Write debug message when hashes could not be checked (Ole Schütt)
|
||||||
* Move interface assertion to tests to remove pflag dependency (Nick Craig-Wood)
|
* Move interface assertion to tests to remove pflag dependency (Nick Craig-Wood)
|
||||||
@@ -639,7 +669,7 @@ all the docs and Edward Barker for helping re-write the front page.
|
|||||||
* S3
|
* S3
|
||||||
* Re-implement multipart upload to fix memory issues (Nick Craig-Wood)
|
* Re-implement multipart upload to fix memory issues (Nick Craig-Wood)
|
||||||
* Add `--s3-copy-cutoff` for size to switch to multipart copy (Nick Craig-Wood)
|
* Add `--s3-copy-cutoff` for size to switch to multipart copy (Nick Craig-Wood)
|
||||||
* Add new region Asia Patific (Hong Kong) (Outvi V)
|
* Add new region Asia Pacific (Hong Kong) (Outvi V)
|
||||||
* Reduce memory usage streaming files by reducing max stream upload size (Nick Craig-Wood)
|
* Reduce memory usage streaming files by reducing max stream upload size (Nick Craig-Wood)
|
||||||
* Add `--s3-list-chunk` option for bucket listing (Thomas Kriechbaumer)
|
* Add `--s3-list-chunk` option for bucket listing (Thomas Kriechbaumer)
|
||||||
* Force path style bucket access to off for AWS deprecation (Nick Craig-Wood)
|
* Force path style bucket access to off for AWS deprecation (Nick Craig-Wood)
|
||||||
@@ -900,7 +930,7 @@ all the docs and Edward Barker for helping re-write the front page.
|
|||||||
* rcat: Fix slowdown on systems with multiple hashes (Nick Craig-Wood)
|
* rcat: Fix slowdown on systems with multiple hashes (Nick Craig-Wood)
|
||||||
* rcd: Fix permissions problems on cache directory with web gui download (Nick Craig-Wood)
|
* rcd: Fix permissions problems on cache directory with web gui download (Nick Craig-Wood)
|
||||||
* Mount
|
* Mount
|
||||||
* Default `--deamon-timout` to 15 minutes on macOS and FreeBSD (Nick Craig-Wood)
|
* Default `--daemon-timout` to 15 minutes on macOS and FreeBSD (Nick Craig-Wood)
|
||||||
* Update docs to show mounting from root OK for bucket based (Nick Craig-Wood)
|
* Update docs to show mounting from root OK for bucket based (Nick Craig-Wood)
|
||||||
* Remove nonseekable flag from write files (Nick Craig-Wood)
|
* Remove nonseekable flag from write files (Nick Craig-Wood)
|
||||||
* VFS
|
* VFS
|
||||||
@@ -1063,7 +1093,7 @@ all the docs and Edward Barker for helping re-write the front page.
|
|||||||
* Add client side TLS/SSL flags `--ca-cert`/`--client-cert`/`--client-key` (Nick Craig-Wood)
|
* Add client side TLS/SSL flags `--ca-cert`/`--client-cert`/`--client-key` (Nick Craig-Wood)
|
||||||
* Implement `--suffix-keep-extension` for use with `--suffix` (Nick Craig-Wood)
|
* Implement `--suffix-keep-extension` for use with `--suffix` (Nick Craig-Wood)
|
||||||
* build:
|
* build:
|
||||||
* Switch to semvar compliant version tags to be go modules compliant (Nick Craig-Wood)
|
* Switch to semver compliant version tags to be go modules compliant (Nick Craig-Wood)
|
||||||
* Update to use go1.12.x for the build (Nick Craig-Wood)
|
* Update to use go1.12.x for the build (Nick Craig-Wood)
|
||||||
* serve dlna: Add connection manager service description to improve compatibility (Dan Walters)
|
* serve dlna: Add connection manager service description to improve compatibility (Dan Walters)
|
||||||
* lsf: Add 'e' format to show encrypted names and 'o' for original IDs (Nick Craig-Wood)
|
* lsf: Add 'e' format to show encrypted names and 'o' for original IDs (Nick Craig-Wood)
|
||||||
|
|||||||
@@ -6,23 +6,26 @@ description: "Encryption overlay remote"
|
|||||||
{{< icon "fa fa-lock" >}}Crypt
|
{{< icon "fa fa-lock" >}}Crypt
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
|
|
||||||
The `crypt` remote encrypts and decrypts another remote.
|
Rclone `crypt` remotes encrypt and decrypt other remotes.
|
||||||
|
|
||||||
To use it first set up the underlying remote following the config
|
To use `crypt`, first set up the underlying remote. Follow the `rclone
|
||||||
instructions for that remote. You can also use a local pathname
|
config` instructions for that remote.
|
||||||
instead of a remote which will encrypt and decrypt from that directory
|
|
||||||
which might be useful for encrypting onto a USB stick for example.
|
|
||||||
|
|
||||||
First check your chosen remote is working - we'll call it
|
`crypt` applied to a local pathname instead of a remote will
|
||||||
`remote:path` in these docs. Note that anything inside `remote:path`
|
encrypt and decrypt that directory, and can be used to encrypt USB
|
||||||
will be encrypted and anything outside won't. This means that if you
|
removable drives.
|
||||||
are using a bucket based remote (eg S3, B2, swift) then you should
|
|
||||||
probably put the bucket in the remote `s3:bucket`. If you just use
|
|
||||||
`s3:` then rclone will make encrypted bucket names too (if using file
|
|
||||||
name encryption) which may or may not be what you want.
|
|
||||||
|
|
||||||
Now configure `crypt` using `rclone config`. We will call this one
|
Before configuring the crypt remote, check the underlying remote is
|
||||||
`secret` to differentiate it from the `remote`.
|
working. In this example the underlying remote is called `remote:path`.
|
||||||
|
Anything inside `remote:path` will be encrypted and anything outside
|
||||||
|
will not. In the case of an S3 based underlying remote (eg Amazon S3,
|
||||||
|
B2, Swift) it is generally advisable to define a crypt remote in the
|
||||||
|
underlying remote `s3:bucket`. If `s3:` alone is specified alongside
|
||||||
|
file name encryption, rclone will encrypt the bucket name.
|
||||||
|
|
||||||
|
Configure `crypt` using `rclone config`. In this example the `crypt`
|
||||||
|
remote is called `secret`, to differentiate it from the underlying
|
||||||
|
`remote`.
|
||||||
|
|
||||||
```
|
```
|
||||||
No remotes found - make a new one
|
No remotes found - make a new one
|
||||||
@@ -96,49 +99,42 @@ d) Delete this remote
|
|||||||
y/e/d> y
|
y/e/d> y
|
||||||
```
|
```
|
||||||
|
|
||||||
**Important** The password is stored in the config file is lightly
|
**Important** The crypt password stored in `rclone.conf` is lightly
|
||||||
obscured so it isn't immediately obvious what it is. It is in no way
|
obscured. That only protects it from cursory inspection. It is not
|
||||||
secure unless you use config file encryption.
|
secure unless encryption of `rclone.conf` is specified.
|
||||||
|
|
||||||
A long passphrase is recommended, or you can use a random one.
|
A long passphrase is recommended, or `rclone config` can generate a
|
||||||
|
random one.
|
||||||
|
|
||||||
The obscured password is created by using AES-CTR with a static key, with
|
The obscured password is created using AES-CTR with a static key. The
|
||||||
the salt stored verbatim at the beginning of the obscured password. This
|
salt is stored verbatim at the beginning of the obscured password. This
|
||||||
static key is shared by between all versions of rclone.
|
static key is shared between all versions of rclone.
|
||||||
|
|
||||||
If you reconfigure rclone with the same passwords/passphrases
|
If you reconfigure rclone with the same passwords/passphrases
|
||||||
elsewhere it will be compatible, but the obscured version will be different
|
elsewhere it will be compatible, but the obscured version will be different
|
||||||
due to the different salt.
|
due to the different salt.
|
||||||
|
|
||||||
Note that rclone does not encrypt
|
Rclone does not encrypt
|
||||||
|
|
||||||
* file length - this can be calculated within 16 bytes
|
* file length - this can be calculated within 16 bytes
|
||||||
* modification time - used for syncing
|
* modification time - used for syncing
|
||||||
|
|
||||||
## Specifying the remote ##
|
## Specifying the remote ##
|
||||||
|
|
||||||
In normal use, make sure the remote has a `:` in. If you specify the
|
In normal use, ensure the remote has a `:` in. If specified without,
|
||||||
remote without a `:` then rclone will use a local directory of that
|
rclone uses a local directory of that name. For example if a remote
|
||||||
name. So if you use a remote of `/path/to/secret/files` then rclone
|
`/path/to/secret/files` is specified, rclone encrypts content to that
|
||||||
will encrypt stuff to that directory. If you use a remote of `name`
|
directory. If a remote `name` is specified, rclone targets a directory
|
||||||
then rclone will put files in a directory called `name` in the current
|
`name` in the current directory.
|
||||||
directory.
|
|
||||||
|
|
||||||
If you specify the remote as `remote:path/to/dir` then rclone will
|
If remote `remote:path/to/dir` is specified, rclone stores encrypted
|
||||||
store encrypted files in `path/to/dir` on the remote. If you are using
|
files in `path/to/dir` on the remote. With file name encryption, files
|
||||||
file name encryption, then when you save files to
|
saved to `secret:subdir/subfile` are stored in the unencrypted path
|
||||||
`secret:subdir/subfile` this will store them in the unencrypted path
|
`path/to/dir` but the `subdir/subpath` element is encrypted.
|
||||||
`path/to/dir` but the `subdir/subpath` bit will be encrypted.
|
|
||||||
|
|
||||||
Note that unless you want encrypted bucket names (which are difficult
|
|
||||||
to manage because you won't know what directory they represent in web
|
|
||||||
interfaces etc), you should probably specify a bucket, eg
|
|
||||||
`remote:secretbucket` when using bucket based remotes such as S3,
|
|
||||||
Swift, Hubic, B2, GCS.
|
|
||||||
|
|
||||||
## Example ##
|
## Example ##
|
||||||
|
|
||||||
To test I made a little directory of files using "standard" file name
|
Create the following file structure using "standard" file name
|
||||||
encryption.
|
encryption.
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -152,7 +148,7 @@ plaintext/
|
|||||||
└── file4.txt
|
└── file4.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
Copy these to the remote and list them back
|
Copy these to the remote, and list them
|
||||||
|
|
||||||
```
|
```
|
||||||
$ rclone -q copy plaintext secret:
|
$ rclone -q copy plaintext secret:
|
||||||
@@ -164,7 +160,7 @@ $ rclone -q ls secret:
|
|||||||
9 subdir/file3.txt
|
9 subdir/file3.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
Now see what that looked like when encrypted
|
The crypt remote looks like
|
||||||
|
|
||||||
```
|
```
|
||||||
$ rclone -q ls remote:path
|
$ rclone -q ls remote:path
|
||||||
@@ -175,7 +171,7 @@ $ rclone -q ls remote:path
|
|||||||
56 86vhrsv86mpbtd3a0akjuqslj8/8njh1sk437gttmep3p70g81aps
|
56 86vhrsv86mpbtd3a0akjuqslj8/8njh1sk437gttmep3p70g81aps
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that this retains the directory structure which means you can do this
|
The directory structure is preserved
|
||||||
|
|
||||||
```
|
```
|
||||||
$ rclone -q ls secret:subdir
|
$ rclone -q ls secret:subdir
|
||||||
@@ -184,9 +180,9 @@ $ rclone -q ls secret:subdir
|
|||||||
10 subsubdir/file4.txt
|
10 subsubdir/file4.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
If don't use file name encryption then the remote will look like this
|
Without file name encryption `.bin` extensions are added to underlying
|
||||||
- note the `.bin` extensions added to prevent the cloud provider
|
names. This prevents the cloud provider attempting to interpret file
|
||||||
attempting to interpret the data.
|
content.
|
||||||
|
|
||||||
```
|
```
|
||||||
$ rclone -q ls remote:path
|
$ rclone -q ls remote:path
|
||||||
@@ -199,8 +195,6 @@ $ rclone -q ls remote:path
|
|||||||
|
|
||||||
### File name encryption modes ###
|
### File name encryption modes ###
|
||||||
|
|
||||||
Here are some of the features of the file name encryption modes
|
|
||||||
|
|
||||||
Off
|
Off
|
||||||
|
|
||||||
* doesn't hide file names or directory structure
|
* doesn't hide file names or directory structure
|
||||||
@@ -219,17 +213,19 @@ Standard
|
|||||||
Obfuscation
|
Obfuscation
|
||||||
|
|
||||||
This is a simple "rotate" of the filename, with each file having a rot
|
This is a simple "rotate" of the filename, with each file having a rot
|
||||||
distance based on the filename. We store the distance at the beginning
|
distance based on the filename. Rclone stores the distance at the
|
||||||
of the filename. So a file called "hello" may become "53.jgnnq".
|
beginning of the filename. A file called "hello" may become "53.jgnnq".
|
||||||
|
|
||||||
This is not a strong encryption of filenames, but it may stop automated
|
Obfuscation is not a strong encryption of filenames, but hinders
|
||||||
scanning tools from picking up on filename patterns. As such it's an
|
automated scanning tools picking up on filename patterns. It is an
|
||||||
intermediate between "off" and "standard". The advantage is that it
|
intermediate between "off" and "standard" which allows for longer path
|
||||||
allows for longer path segment names.
|
segment names.
|
||||||
|
|
||||||
There is a possibility with some unicode based filenames that the
|
There is a possibility with some unicode based filenames that the
|
||||||
obfuscation is weak and may map lower case characters to upper case
|
obfuscation is weak and may map lower case characters to upper case
|
||||||
equivalents. You can not rely on this for strong protection.
|
equivalents.
|
||||||
|
|
||||||
|
Obfuscation cannot be relied upon for strong protection.
|
||||||
|
|
||||||
* file names very lightly obfuscated
|
* file names very lightly obfuscated
|
||||||
* file names can be longer than standard encryption
|
* file names can be longer than standard encryption
|
||||||
@@ -237,13 +233,14 @@ equivalents. You can not rely on this for strong protection.
|
|||||||
* directory structure visible
|
* directory structure visible
|
||||||
* identical files names will have identical uploaded names
|
* identical files names will have identical uploaded names
|
||||||
|
|
||||||
Cloud storage systems have various limits on file name length and
|
Cloud storage systems have limits on file name length and
|
||||||
total path length which you are more likely to hit using "Standard"
|
total path length which rclone is more likely to breach using
|
||||||
file name encryption. If you keep your file names to below 156
|
"Standard" file name encryption. Where file names are less thn 156
|
||||||
characters in length then you should be OK on all providers.
|
characters in length issues should not be encountered, irrespective of
|
||||||
|
cloud storage provider.
|
||||||
|
|
||||||
There may be an even more secure file name encryption mode in the
|
An alternative, future rclone file name encryption mode may tolerate
|
||||||
future which will address the long file name problem.
|
backend provider path length limits.
|
||||||
|
|
||||||
### Directory name encryption ###
|
### Directory name encryption ###
|
||||||
Crypt offers the option of encrypting dir names or leaving them intact.
|
Crypt offers the option of encrypting dir names or leaving them intact.
|
||||||
@@ -269,10 +266,10 @@ Example:
|
|||||||
Crypt stores modification times using the underlying remote so support
|
Crypt stores modification times using the underlying remote so support
|
||||||
depends on that.
|
depends on that.
|
||||||
|
|
||||||
Hashes are not stored for crypt. However the data integrity is
|
Hashes are not stored for crypt. However the data integrity is
|
||||||
protected by an extremely strong crypto authenticator.
|
protected by an extremely strong crypto authenticator.
|
||||||
|
|
||||||
Note that you should use the `rclone cryptcheck` command to check the
|
Use the `rclone cryptcheck` command to check the
|
||||||
integrity of a crypted remote instead of `rclone check` which can't
|
integrity of a crypted remote instead of `rclone check` which can't
|
||||||
check the checksums properly.
|
check the checksums properly.
|
||||||
|
|
||||||
|
|||||||
@@ -757,6 +757,8 @@ This can be useful for tracking down problems with syncs in
|
|||||||
combination with the `-v` flag. See the [Logging section](#logging)
|
combination with the `-v` flag. See the [Logging section](#logging)
|
||||||
for more info.
|
for more info.
|
||||||
|
|
||||||
|
If FILE exists then rclone will append to it.
|
||||||
|
|
||||||
Note that if you are using the `logrotate` program to manage rclone's
|
Note that if you are using the `logrotate` program to manage rclone's
|
||||||
logs, then you should use the `copytruncate` option as rclone doesn't
|
logs, then you should use the `copytruncate` option as rclone doesn't
|
||||||
have a signal to rotate logs.
|
have a signal to rotate logs.
|
||||||
@@ -1106,6 +1108,11 @@ Note: On Windows until [this bug](https://github.com/Azure/go-ansiterm/issues/26
|
|||||||
is fixed all non-ASCII characters will be replaced with `.` when
|
is fixed all non-ASCII characters will be replaced with `.` when
|
||||||
`--progress` is in use.
|
`--progress` is in use.
|
||||||
|
|
||||||
|
### --progress-terminal-title ###
|
||||||
|
|
||||||
|
This flag, when used with `-P/--progress`, will print the string `ETA: %s`
|
||||||
|
to the terminal title.
|
||||||
|
|
||||||
### -q, --quiet ###
|
### -q, --quiet ###
|
||||||
|
|
||||||
This flag will limit rclone's output to error messages only.
|
This flag will limit rclone's output to error messages only.
|
||||||
@@ -1251,11 +1258,17 @@ or with `--backup-dir`. See `--backup-dir` for more info.
|
|||||||
|
|
||||||
For example
|
For example
|
||||||
|
|
||||||
rclone sync -i /path/to/local/file remote:current --suffix .bak
|
rclone copy -i /path/to/local/file remote:current --suffix .bak
|
||||||
|
|
||||||
will sync `/path/to/local` to `remote:current`, but for any files
|
will copy `/path/to/local` to `remote:current`, but for any files
|
||||||
which would have been updated or deleted have .bak added.
|
which would have been updated or deleted have .bak added.
|
||||||
|
|
||||||
|
If using `rclone sync` with `--suffix` and without `--backup-dir` then
|
||||||
|
it is recommended to put a filter rule in excluding the suffix
|
||||||
|
otherwise the `sync` will delete the backup files.
|
||||||
|
|
||||||
|
rclone sync -i /path/to/local/file remote:current --suffix .bak --exclude "*.bak"
|
||||||
|
|
||||||
### --suffix-keep-extension ###
|
### --suffix-keep-extension ###
|
||||||
|
|
||||||
When using `--suffix`, setting this causes rclone put the SUFFIX
|
When using `--suffix`, setting this causes rclone put the SUFFIX
|
||||||
|
|||||||
@@ -202,6 +202,39 @@ Impersonate this user when using a business account.
|
|||||||
- Type: string
|
- Type: string
|
||||||
- Default: ""
|
- Default: ""
|
||||||
|
|
||||||
|
#### --dropbox-shared-files
|
||||||
|
|
||||||
|
Instructs rclone to work on individual shared files.
|
||||||
|
|
||||||
|
In this mode rclone's features are extremely limited - only list (ls, lsl, etc.)
|
||||||
|
operations and read operations (e.g. downloading) are supported in this mode.
|
||||||
|
All other operations will be disabled.
|
||||||
|
|
||||||
|
- Config: shared_files
|
||||||
|
- Env Var: RCLONE_DROPBOX_SHARED_FILES
|
||||||
|
- Type: bool
|
||||||
|
- Default: false
|
||||||
|
|
||||||
|
#### --dropbox-shared-folders
|
||||||
|
|
||||||
|
Instructs rclone to work on shared folders.
|
||||||
|
|
||||||
|
When this flag is used with no path only the List operation is supported and
|
||||||
|
all available shared folders will be listed. If you specify a path the first part
|
||||||
|
will be interpreted as the name of shared folder. Rclone will then try to mount this
|
||||||
|
shared to the root namespace. On success shared folder rclone proceeds normally.
|
||||||
|
The shared folder is now pretty much a normal folder and all normal operations
|
||||||
|
are supported.
|
||||||
|
|
||||||
|
Note that we don't unmount the shared folder afterwards so the
|
||||||
|
--dropbox-shared-folders can be omitted after the first use of a particular
|
||||||
|
shared folder.
|
||||||
|
|
||||||
|
- Config: shared_folders
|
||||||
|
- Env Var: RCLONE_DROPBOX_SHARED_FOLDERS
|
||||||
|
- Type: bool
|
||||||
|
- Default: false
|
||||||
|
|
||||||
#### --dropbox-encoding
|
#### --dropbox-encoding
|
||||||
|
|
||||||
This sets the encoding for the backend.
|
This sets the encoding for the backend.
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ Choose a number from below, or type in your own value
|
|||||||
1 / Connect to ftp.example.com
|
1 / Connect to ftp.example.com
|
||||||
\ "ftp.example.com"
|
\ "ftp.example.com"
|
||||||
host> ftp.example.com
|
host> ftp.example.com
|
||||||
FTP username, leave blank for current username, ncw
|
FTP username, leave blank for current username, $USER
|
||||||
Enter a string value. Press Enter for the default ("").
|
Enter a string value. Press Enter for the default ("").
|
||||||
user>
|
user>
|
||||||
FTP port, leave blank to use default (21)
|
FTP port, leave blank to use default (21)
|
||||||
|
|||||||
@@ -205,7 +205,7 @@ or the latest version (equivalent to the beta) with
|
|||||||
These will build the binary in `$(go env GOPATH)/bin`
|
These will build the binary in `$(go env GOPATH)/bin`
|
||||||
(`~/go/bin/rclone` by default) after downloading the source to the go
|
(`~/go/bin/rclone` by default) after downloading the source to the go
|
||||||
module cache. Note - do **not** use the `-u` flag here. This causes go
|
module cache. Note - do **not** use the `-u` flag here. This causes go
|
||||||
to try to update the depencencies that rclone uses and sometimes these
|
to try to update the dependencies that rclone uses and sometimes these
|
||||||
don't work with the current version of rclone.
|
don't work with the current version of rclone.
|
||||||
|
|
||||||
## Installation with Ansible ##
|
## Installation with Ansible ##
|
||||||
|
|||||||
@@ -27,8 +27,8 @@ Note that the web interface may refer to this token as a JottaCli token.
|
|||||||
### Legacy Setup
|
### Legacy Setup
|
||||||
|
|
||||||
If you are using one of the whitelabel versions (Elgiganten, Com Hem Cloud) you may not have the option
|
If you are using one of the whitelabel versions (Elgiganten, Com Hem Cloud) you may not have the option
|
||||||
to generate a CLI token. In this case you'll have to use the legacy authentification. To to this select
|
to generate a CLI token. In this case you'll have to use the legacy authentication. To to this select
|
||||||
yes when the setup asks for legacy authentification and enter your username and password.
|
yes when the setup asks for legacy authentication and enter your username and password.
|
||||||
The rest of the setup is identical to the default setup.
|
The rest of the setup is identical to the default setup.
|
||||||
|
|
||||||
Here is an example of how to make a remote called `remote` with the default setup. First run:
|
Here is an example of how to make a remote called `remote` with the default setup. First run:
|
||||||
@@ -59,7 +59,7 @@ y) Yes
|
|||||||
n) No
|
n) No
|
||||||
y/n> n
|
y/n> n
|
||||||
Remote config
|
Remote config
|
||||||
Use legacy authentification?.
|
Use legacy authentication?.
|
||||||
This is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.
|
This is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.
|
||||||
y) Yes
|
y) Yes
|
||||||
n) No (default)
|
n) No (default)
|
||||||
@@ -148,8 +148,13 @@ flag.
|
|||||||
Note that Jottacloud requires the MD5 hash before upload so if the
|
Note that Jottacloud requires the MD5 hash before upload so if the
|
||||||
source does not have an MD5 checksum then the file will be cached
|
source does not have an MD5 checksum then the file will be cached
|
||||||
temporarily on disk (wherever the `TMPDIR` environment variable points
|
temporarily on disk (wherever the `TMPDIR` environment variable points
|
||||||
to) before it is uploaded. Small files will be cached in memory - see
|
to) before it is uploaded. Small files will be cached in memory - see
|
||||||
the [--jottacloud-md5-memory-limit](#jottacloud-md5-memory-limit) flag.
|
the [--jottacloud-md5-memory-limit](#jottacloud-md5-memory-limit) flag.
|
||||||
|
When uploading from local disk the source checksum is always available,
|
||||||
|
so this does not apply. Starting with rclone version 1.52 the same is
|
||||||
|
true for crypted remotes (in older versions the crypt backend would not
|
||||||
|
calculate hashes for uploads from local disk, so the Jottacloud
|
||||||
|
backend had to do it as described above).
|
||||||
|
|
||||||
#### Restricted filename characters
|
#### Restricted filename characters
|
||||||
|
|
||||||
|
|||||||
@@ -402,7 +402,7 @@ If the server can't do `CleanUp` then `rclone cleanup` will return an
|
|||||||
error.
|
error.
|
||||||
|
|
||||||
‡‡ Note that while Box implements this it has to delete every file
|
‡‡ Note that while Box implements this it has to delete every file
|
||||||
idividually so it will be slower than emptying the trash via the WebUI
|
individually so it will be slower than emptying the trash via the WebUI
|
||||||
|
|
||||||
### ListR ###
|
### ListR ###
|
||||||
|
|
||||||
|
|||||||
@@ -259,7 +259,7 @@ Concurrency for multipart uploads.
|
|||||||
This is the number of chunks of the same file that are uploaded
|
This is the number of chunks of the same file that are uploaded
|
||||||
concurrently.
|
concurrently.
|
||||||
|
|
||||||
NB if you set this to > 1 then the checksums of multpart uploads
|
NB if you set this to > 1 then the checksums of multipart uploads
|
||||||
become corrupted (the uploads themselves are not corrupted though).
|
become corrupted (the uploads themselves are not corrupted though).
|
||||||
|
|
||||||
If you are uploading small numbers of large file over high speed link
|
If you are uploading small numbers of large file over high speed link
|
||||||
|
|||||||
@@ -537,6 +537,8 @@ OR
|
|||||||
"result": "<Raw command line output>"
|
"result": "<Raw command line output>"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
**Authentication is required for this call.**
|
**Authentication is required for this call.**
|
||||||
|
|
||||||
### core/gc: Runs a garbage collection. {#core-gc}
|
### core/gc: Runs a garbage collection. {#core-gc}
|
||||||
@@ -1212,7 +1214,7 @@ This allows you to remove a plugin using it's name
|
|||||||
|
|
||||||
This takes parameters
|
This takes parameters
|
||||||
|
|
||||||
- name: name of the plugin in the format <author>/<plugin_name>
|
- name: name of the plugin in the format `author`/`plugin_name`
|
||||||
|
|
||||||
Eg
|
Eg
|
||||||
|
|
||||||
@@ -1226,7 +1228,7 @@ This allows you to remove a plugin using it's name
|
|||||||
|
|
||||||
This takes the following parameters
|
This takes the following parameters
|
||||||
|
|
||||||
- name: name of the plugin in the format <author>/<plugin_name>
|
- name: name of the plugin in the format `author`/`plugin_name`
|
||||||
|
|
||||||
Eg
|
Eg
|
||||||
|
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ The S3 backend can be used with a number of different providers:
|
|||||||
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
|
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
|
||||||
{{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}}
|
{{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}}
|
||||||
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
||||||
|
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
|
||||||
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" end="true" >}}
|
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" end="true" >}}
|
||||||
{{< /provider_list >}}
|
{{< /provider_list >}}
|
||||||
|
|
||||||
@@ -138,7 +139,7 @@ Choose a number from below, or type in your own value
|
|||||||
/ Asia Pacific (Mumbai)
|
/ Asia Pacific (Mumbai)
|
||||||
13 | Needs location constraint ap-south-1.
|
13 | Needs location constraint ap-south-1.
|
||||||
\ "ap-south-1"
|
\ "ap-south-1"
|
||||||
/ Asia Patific (Hong Kong) Region
|
/ Asia Pacific (Hong Kong) Region
|
||||||
14 | Needs location constraint ap-east-1.
|
14 | Needs location constraint ap-east-1.
|
||||||
\ "ap-east-1"
|
\ "ap-east-1"
|
||||||
/ South America (Sao Paulo) Region
|
/ South America (Sao Paulo) Region
|
||||||
@@ -488,6 +489,8 @@ Choose your S3 provider.
|
|||||||
- StackPath Object Storage
|
- StackPath Object Storage
|
||||||
- "Wasabi"
|
- "Wasabi"
|
||||||
- Wasabi Object Storage
|
- Wasabi Object Storage
|
||||||
|
- "TencentCOS"
|
||||||
|
- Tencent Cloud Object Storage (COS)
|
||||||
- "Other"
|
- "Other"
|
||||||
- Any other S3 compatible provider
|
- Any other S3 compatible provider
|
||||||
|
|
||||||
@@ -579,7 +582,7 @@ Region to connect to.
|
|||||||
- Asia Pacific (Mumbai)
|
- Asia Pacific (Mumbai)
|
||||||
- Needs location constraint ap-south-1.
|
- Needs location constraint ap-south-1.
|
||||||
- "ap-east-1"
|
- "ap-east-1"
|
||||||
- Asia Patific (Hong Kong) Region
|
- Asia Pacific (Hong Kong) Region
|
||||||
- Needs location constraint ap-east-1.
|
- Needs location constraint ap-east-1.
|
||||||
- "sa-east-1"
|
- "sa-east-1"
|
||||||
- South America (Sao Paulo) Region
|
- South America (Sao Paulo) Region
|
||||||
@@ -1122,7 +1125,7 @@ The storage class to use when storing new objects in S3.
|
|||||||
|
|
||||||
### Advanced Options
|
### Advanced Options
|
||||||
|
|
||||||
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)).
|
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)).
|
||||||
|
|
||||||
#### --s3-bucket-acl
|
#### --s3-bucket-acl
|
||||||
|
|
||||||
@@ -1490,7 +1493,7 @@ All the objects shown will be marked for restore, then
|
|||||||
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||||
|
|
||||||
It returns a list of status dictionaries with Remote and Status
|
It returns a list of status dictionaries with Remote and Status
|
||||||
keys. The Status will be OK if it was successfull or an error message
|
keys. The Status will be OK if it was successful or an error message
|
||||||
if not.
|
if not.
|
||||||
|
|
||||||
[
|
[
|
||||||
@@ -1791,7 +1794,7 @@ Choose a number from below, or type in your own value
|
|||||||
secret_access_key> <>
|
secret_access_key> <>
|
||||||
```
|
```
|
||||||
|
|
||||||
6. Specify the endpoint for IBM COS. For Public IBM COS, choose from the option below. For On Premise IBM COS, enter an enpoint address.
|
6. Specify the endpoint for IBM COS. For Public IBM COS, choose from the option below. For On Premise IBM COS, enter an endpoint address.
|
||||||
```
|
```
|
||||||
Endpoint for IBM COS S3 API.
|
Endpoint for IBM COS S3 API.
|
||||||
Specify if using an IBM COS On Premise.
|
Specify if using an IBM COS On Premise.
|
||||||
@@ -1852,7 +1855,7 @@ Choose a number from below, or type in your own value
|
|||||||
location_constraint>1
|
location_constraint>1
|
||||||
```
|
```
|
||||||
|
|
||||||
9. Specify a canned ACL. IBM Cloud (Strorage) supports "public-read" and "private". IBM Cloud(Infra) supports all the canned ACLs. On-Premise COS supports all the canned ACLs.
|
9. Specify a canned ACL. IBM Cloud (Storage) supports "public-read" and "private". IBM Cloud(Infra) supports all the canned ACLs. On-Premise COS supports all the canned ACLs.
|
||||||
```
|
```
|
||||||
Canned ACL used when creating buckets and/or storing objects in S3.
|
Canned ACL used when creating buckets and/or storing objects in S3.
|
||||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||||
@@ -2212,6 +2215,138 @@ d) Delete this remote
|
|||||||
y/e/d> y
|
y/e/d> y
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Tencent COS {#tencent-cos}
|
||||||
|
|
||||||
|
[Tencent Cloud Object Storage (COS)](https://intl.cloud.tencent.com/product/cos) is a distributed storage service offered by Tencent Cloud for unstructured data. It is secure, stable, massive, convenient, low-delay and low-cost.
|
||||||
|
|
||||||
|
To configure access to Tencent COS, follow the steps below:
|
||||||
|
|
||||||
|
1. Run `rclone config` and select `n` for a new remote.
|
||||||
|
|
||||||
|
```
|
||||||
|
rclone config
|
||||||
|
No remotes found - make a new one
|
||||||
|
n) New remote
|
||||||
|
s) Set configuration password
|
||||||
|
q) Quit config
|
||||||
|
n/s/q> n
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Give the name of the configuration. For example, name it 'cos'.
|
||||||
|
|
||||||
|
```
|
||||||
|
name> cos
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Select `s3` storage.
|
||||||
|
|
||||||
|
```
|
||||||
|
Choose a number from below, or type in your own value
|
||||||
|
1 / 1Fichier
|
||||||
|
\ "fichier"
|
||||||
|
2 / Alias for an existing remote
|
||||||
|
\ "alias"
|
||||||
|
3 / Amazon Drive
|
||||||
|
\ "amazon cloud drive"
|
||||||
|
4 / Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)
|
||||||
|
\ "s3"
|
||||||
|
[snip]
|
||||||
|
Storage> s3
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Select `TencentCOS` provider.
|
||||||
|
```
|
||||||
|
Choose a number from below, or type in your own value
|
||||||
|
1 / Amazon Web Services (AWS) S3
|
||||||
|
\ "AWS"
|
||||||
|
[snip]
|
||||||
|
11 / Tencent Cloud Object Storage (COS)
|
||||||
|
\ "TencentCOS"
|
||||||
|
[snip]
|
||||||
|
provider> TencentCOS
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Enter your SecretId and SecretKey of Tencent Cloud.
|
||||||
|
|
||||||
|
```
|
||||||
|
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||||
|
Only applies if access_key_id and secret_access_key is blank.
|
||||||
|
Enter a boolean value (true or false). Press Enter for the default ("false").
|
||||||
|
Choose a number from below, or type in your own value
|
||||||
|
1 / Enter AWS credentials in the next step
|
||||||
|
\ "false"
|
||||||
|
2 / Get AWS credentials from the environment (env vars or IAM)
|
||||||
|
\ "true"
|
||||||
|
env_auth> 1
|
||||||
|
AWS Access Key ID.
|
||||||
|
Leave blank for anonymous access or runtime credentials.
|
||||||
|
Enter a string value. Press Enter for the default ("").
|
||||||
|
access_key_id> AKIDxxxxxxxxxx
|
||||||
|
AWS Secret Access Key (password)
|
||||||
|
Leave blank for anonymous access or runtime credentials.
|
||||||
|
Enter a string value. Press Enter for the default ("").
|
||||||
|
secret_access_key> xxxxxxxxxxx
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Select endpoint for Tencent COS. This is the standard endpoint for different region.
|
||||||
|
|
||||||
|
```
|
||||||
|
1 / Beijing Region.
|
||||||
|
\ "cos.ap-beijing.myqcloud.com"
|
||||||
|
2 / Nanjing Region.
|
||||||
|
\ "cos.ap-nanjing.myqcloud.com"
|
||||||
|
3 / Shanghai Region.
|
||||||
|
\ "cos.ap-shanghai.myqcloud.com"
|
||||||
|
4 / Guangzhou Region.
|
||||||
|
\ "cos.ap-guangzhou.myqcloud.com"
|
||||||
|
[snip]
|
||||||
|
endpoint> 4
|
||||||
|
```
|
||||||
|
|
||||||
|
7. Choose acl and storage class.
|
||||||
|
|
||||||
|
```
|
||||||
|
Note that this ACL is applied when server side copying objects as S3
|
||||||
|
doesn't copy the ACL from the source but rather writes a fresh one.
|
||||||
|
Enter a string value. Press Enter for the default ("").
|
||||||
|
Choose a number from below, or type in your own value
|
||||||
|
1 / Owner gets Full_CONTROL. No one else has access rights (default).
|
||||||
|
\ "default"
|
||||||
|
[snip]
|
||||||
|
acl> 1
|
||||||
|
The storage class to use when storing new objects in Tencent COS.
|
||||||
|
Enter a string value. Press Enter for the default ("").
|
||||||
|
Choose a number from below, or type in your own value
|
||||||
|
1 / Default
|
||||||
|
\ ""
|
||||||
|
[snip]
|
||||||
|
storage_class> 1
|
||||||
|
Edit advanced config? (y/n)
|
||||||
|
y) Yes
|
||||||
|
n) No (default)
|
||||||
|
y/n> n
|
||||||
|
Remote config
|
||||||
|
--------------------
|
||||||
|
[cos]
|
||||||
|
type = s3
|
||||||
|
provider = TencentCOS
|
||||||
|
env_auth = false
|
||||||
|
access_key_id = xxx
|
||||||
|
secret_access_key = xxx
|
||||||
|
endpoint = cos.ap-guangzhou.myqcloud.com
|
||||||
|
acl = default
|
||||||
|
--------------------
|
||||||
|
y) Yes this is OK (default)
|
||||||
|
e) Edit this remote
|
||||||
|
d) Delete this remote
|
||||||
|
y/e/d> y
|
||||||
|
Current remotes:
|
||||||
|
|
||||||
|
Name Type
|
||||||
|
==== ====
|
||||||
|
cos s3
|
||||||
|
```
|
||||||
|
|
||||||
### Netease NOS ###
|
### Netease NOS ###
|
||||||
|
|
||||||
For Netease NOS configure as per the configurator `rclone config`
|
For Netease NOS configure as per the configurator `rclone config`
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ Choose a number from below, or type in your own value
|
|||||||
1 / Connect to example.com
|
1 / Connect to example.com
|
||||||
\ "example.com"
|
\ "example.com"
|
||||||
host> example.com
|
host> example.com
|
||||||
SSH username, leave blank for current username, ncw
|
SSH username, leave blank for current username, $USER
|
||||||
user> sftpuser
|
user> sftpuser
|
||||||
SSH port, leave blank to use default (22)
|
SSH port, leave blank to use default (22)
|
||||||
port>
|
port>
|
||||||
@@ -102,7 +102,7 @@ excess files in the directory.
|
|||||||
The SFTP remote supports three authentication methods:
|
The SFTP remote supports three authentication methods:
|
||||||
|
|
||||||
* Password
|
* Password
|
||||||
* Key file
|
* Key file, including certificate signed keys
|
||||||
* ssh-agent
|
* ssh-agent
|
||||||
|
|
||||||
Key files should be PEM-encoded private key files. For instance `/home/$USER/.ssh/id_rsa`.
|
Key files should be PEM-encoded private key files. For instance `/home/$USER/.ssh/id_rsa`.
|
||||||
@@ -128,6 +128,77 @@ Using an ssh-agent is the only way to load encrypted OpenSSH keys at the moment.
|
|||||||
If you set the `--sftp-ask-password` option, rclone will prompt for a
|
If you set the `--sftp-ask-password` option, rclone will prompt for a
|
||||||
password when needed and no password has been configured.
|
password when needed and no password has been configured.
|
||||||
|
|
||||||
|
If you have a certificate then you can provide the path to the public key that contains the certificate. For example:
|
||||||
|
|
||||||
|
```
|
||||||
|
[remote]
|
||||||
|
type = sftp
|
||||||
|
host = example.com
|
||||||
|
user = sftpuser
|
||||||
|
key_file = ~/id_rsa
|
||||||
|
pubkey_file = ~/id_rsa-cert.pub
|
||||||
|
````
|
||||||
|
|
||||||
|
If you concatenate a cert with a private key then you can specify the
|
||||||
|
merged file in both places.
|
||||||
|
|
||||||
|
Note: the cert must come first in the file. e.g.
|
||||||
|
|
||||||
|
```
|
||||||
|
cat id_rsa-cert.pub id_rsa > merged_key
|
||||||
|
```
|
||||||
|
|
||||||
|
### Host key validation ###
|
||||||
|
|
||||||
|
By default rclone will not check the server's host key for validation. This
|
||||||
|
can allow an attacker to replace a server with their own and if you use
|
||||||
|
password authentication then this can lead to that password being exposed.
|
||||||
|
|
||||||
|
Host key matching, using standard `known_hosts` files can be turned on by
|
||||||
|
enabling the `known_hosts_file` option. This can point to the file maintained
|
||||||
|
by `OpenSSH` or can point to a unique file.
|
||||||
|
|
||||||
|
e.g.
|
||||||
|
|
||||||
|
```
|
||||||
|
[remote]
|
||||||
|
type = sftp
|
||||||
|
host = example.com
|
||||||
|
user = sftpuser
|
||||||
|
pass =
|
||||||
|
known_hosts_file = ~/.ssh/known_hosts
|
||||||
|
````
|
||||||
|
|
||||||
|
There are some limitations:
|
||||||
|
|
||||||
|
* `rclone` will not _manage_ this file for you. If the key is missing or
|
||||||
|
wrong then the connection will be refused.
|
||||||
|
* If the server is set up for a certificate host key then the entry in
|
||||||
|
the `known_hosts` file _must_ be the `@cert-authority` entry for the CA
|
||||||
|
* Unlike `OpenSSH`, the libraries used by `rclone` do not permit (at time
|
||||||
|
of writing) multiple host keys to be listed for a server. Only the first
|
||||||
|
entry is used.
|
||||||
|
|
||||||
|
If the host key provided by the server does not match the one in the
|
||||||
|
file (or is missing) then the connection will be aborted and an error
|
||||||
|
returned such as
|
||||||
|
|
||||||
|
NewFs: couldn't connect SSH: ssh: handshake failed: knownhosts: key mismatch
|
||||||
|
|
||||||
|
or
|
||||||
|
|
||||||
|
NewFs: couldn't connect SSH: ssh: handshake failed: knownhosts: key is unknown
|
||||||
|
|
||||||
|
If you see an error such as
|
||||||
|
|
||||||
|
NewFs: couldn't connect SSH: ssh: handshake failed: ssh: no authorities for hostname: example.com:22
|
||||||
|
|
||||||
|
then it is likely the server has presented a CA signed host certificate
|
||||||
|
and you will need to add the appropriate `@cert-authority` entry.
|
||||||
|
|
||||||
|
The `known_hosts_file` setting can be set during `rclone config` as an
|
||||||
|
advanced option.
|
||||||
|
|
||||||
### ssh-agent on macOS ###
|
### ssh-agent on macOS ###
|
||||||
|
|
||||||
Note that there seem to be various problems with using an ssh-agent on
|
Note that there seem to be various problems with using an ssh-agent on
|
||||||
@@ -172,7 +243,7 @@ SSH host to connect to
|
|||||||
|
|
||||||
#### --sftp-user
|
#### --sftp-user
|
||||||
|
|
||||||
SSH username, leave blank for current username, ncw
|
SSH username, leave blank for current username, $USER
|
||||||
|
|
||||||
- Config: user
|
- Config: user
|
||||||
- Env Var: RCLONE_SFTP_USER
|
- Env Var: RCLONE_SFTP_USER
|
||||||
@@ -234,6 +305,20 @@ in the new OpenSSH format can't be used.
|
|||||||
- Type: string
|
- Type: string
|
||||||
- Default: ""
|
- Default: ""
|
||||||
|
|
||||||
|
#### --sftp-pubkey-file
|
||||||
|
|
||||||
|
Optional path to public key file.
|
||||||
|
|
||||||
|
Set this if you have a signed certificate you want to use for authentication.
|
||||||
|
|
||||||
|
Leading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`.
|
||||||
|
|
||||||
|
|
||||||
|
- Config: pubkey_file
|
||||||
|
- Env Var: RCLONE_SFTP_PUBKEY_FILE
|
||||||
|
- Type: string
|
||||||
|
- Default: ""
|
||||||
|
|
||||||
#### --sftp-key-use-agent
|
#### --sftp-key-use-agent
|
||||||
|
|
||||||
When set forces the usage of the ssh-agent.
|
When set forces the usage of the ssh-agent.
|
||||||
@@ -286,6 +371,23 @@ Leave blank or set to false to enable hashing (recommended), set to true to disa
|
|||||||
|
|
||||||
Here are the advanced options specific to sftp (SSH/SFTP Connection).
|
Here are the advanced options specific to sftp (SSH/SFTP Connection).
|
||||||
|
|
||||||
|
#### --sftp-known-hosts-file
|
||||||
|
|
||||||
|
Optional path to known_hosts file.
|
||||||
|
|
||||||
|
Set this value to enable server host key validation.
|
||||||
|
|
||||||
|
Leading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`.
|
||||||
|
|
||||||
|
|
||||||
|
- Config: known_hosts_file
|
||||||
|
- Env Var: RCLONE_SFTP_KNOWN_HOSTS_FILE
|
||||||
|
- Type: string
|
||||||
|
- Default: ""
|
||||||
|
- Examples:
|
||||||
|
- "~/.ssh/known_hosts"
|
||||||
|
- Use OpenSSH's known_hosts file
|
||||||
|
|
||||||
#### --sftp-ask-password
|
#### --sftp-ask-password
|
||||||
|
|
||||||
Allow asking for SFTP password when needed.
|
Allow asking for SFTP password when needed.
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ d) Delete this remote
|
|||||||
y/e/d> y
|
y/e/d> y
|
||||||
```
|
```
|
||||||
|
|
||||||
### Setup with API key and passhprase
|
### Setup with API key and passphrase
|
||||||
|
|
||||||
```
|
```
|
||||||
No remotes found - make a new one
|
No remotes found - make a new one
|
||||||
|
|||||||
@@ -26,6 +26,10 @@ var ErrorMaxTransferLimitReached = errors.New("Max transfer limit reached as set
|
|||||||
// transfer limit is reached.
|
// transfer limit is reached.
|
||||||
var ErrorMaxTransferLimitReachedFatal = fserrors.FatalError(ErrorMaxTransferLimitReached)
|
var ErrorMaxTransferLimitReachedFatal = fserrors.FatalError(ErrorMaxTransferLimitReached)
|
||||||
|
|
||||||
|
// ErrorMaxTransferLimitReachedGraceful is returned from operations.Copy when the max
|
||||||
|
// transfer limit is reached and a graceful stop is required.
|
||||||
|
var ErrorMaxTransferLimitReachedGraceful = fserrors.NoRetryError(ErrorMaxTransferLimitReached)
|
||||||
|
|
||||||
// Account limits and accounts for one transfer
|
// Account limits and accounts for one transfer
|
||||||
type Account struct {
|
type Account struct {
|
||||||
stats *StatsInfo
|
stats *StatsInfo
|
||||||
@@ -446,7 +450,7 @@ func shortenName(in string, size int) string {
|
|||||||
return in
|
return in
|
||||||
}
|
}
|
||||||
name := []rune(in)
|
name := []rune(in)
|
||||||
size-- // don't count elipsis rune
|
size-- // don't count ellipsis rune
|
||||||
suffixLength := size / 2
|
suffixLength := size / 2
|
||||||
prefixLength := size - suffixLength
|
prefixLength := size - suffixLength
|
||||||
suffixStart := len(name) - suffixLength
|
suffixStart := len(name) - suffixLength
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/rc"
|
"github.com/rclone/rclone/fs/rc"
|
||||||
|
"github.com/rclone/rclone/lib/terminal"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MaxCompletedTransfers specifies maximum number of completed transfers in startedTransfers list
|
// MaxCompletedTransfers specifies maximum number of completed transfers in startedTransfers list
|
||||||
@@ -272,7 +273,7 @@ func (s *StatsInfo) String() string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s\n",
|
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s",
|
||||||
dateString,
|
dateString,
|
||||||
fs.SizeSuffix(s.bytes),
|
fs.SizeSuffix(s.bytes),
|
||||||
fs.SizeSuffix(totalSize).Unit("Bytes"),
|
fs.SizeSuffix(totalSize).Unit("Bytes"),
|
||||||
@@ -282,7 +283,13 @@ func (s *StatsInfo) String() string {
|
|||||||
xfrchkString,
|
xfrchkString,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if fs.Config.ProgressTerminalTitle {
|
||||||
|
// Writes ETA to the terminal title
|
||||||
|
terminal.WriteTerminalTitle("ETA: " + etaString(currentSize, totalSize, speed))
|
||||||
|
}
|
||||||
|
|
||||||
if !fs.Config.StatsOneLine {
|
if !fs.Config.StatsOneLine {
|
||||||
|
_, _ = buf.WriteRune('\n')
|
||||||
errorDetails := ""
|
errorDetails := ""
|
||||||
switch {
|
switch {
|
||||||
case s.fatalError:
|
case s.fatalError:
|
||||||
@@ -291,6 +298,7 @@ func (s *StatsInfo) String() string {
|
|||||||
errorDetails = " (retrying may help)"
|
errorDetails = " (retrying may help)"
|
||||||
case s.errors != 0:
|
case s.errors != 0:
|
||||||
errorDetails = " (no need to retry)"
|
errorDetails = " (no need to retry)"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add only non zero stats
|
// Add only non zero stats
|
||||||
|
|||||||
@@ -366,6 +366,8 @@ func (sg *statsGroups) sum() *StatsInfo {
|
|||||||
sum.lastError = stats.lastError
|
sum.lastError = stats.lastError
|
||||||
}
|
}
|
||||||
sum.startedTransfers = append(sum.startedTransfers, stats.startedTransfers...)
|
sum.startedTransfers = append(sum.startedTransfers, stats.startedTransfers...)
|
||||||
|
sum.oldDuration += stats.oldDuration
|
||||||
|
sum.oldTimeRanges = append(sum.oldTimeRanges, stats.oldTimeRanges...)
|
||||||
}
|
}
|
||||||
stats.mu.RUnlock()
|
stats.mu.RUnlock()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,8 +4,10 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fstest/testy"
|
"github.com/rclone/rclone/fstest/testy"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestStatsGroupOperations(t *testing.T) {
|
func TestStatsGroupOperations(t *testing.T) {
|
||||||
@@ -43,17 +45,26 @@ func TestStatsGroupOperations(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
stats1 := NewStats()
|
stats1 := NewStats()
|
||||||
stats1.bytes = 5
|
stats1.bytes = 5
|
||||||
stats1.errors = 5
|
stats1.errors = 6
|
||||||
|
stats1.oldDuration = time.Second
|
||||||
|
stats1.oldTimeRanges = []timeRange{{time.Now(), time.Now().Add(time.Second)}}
|
||||||
stats2 := NewStats()
|
stats2 := NewStats()
|
||||||
|
stats2.bytes = 10
|
||||||
|
stats2.errors = 12
|
||||||
|
stats2.oldDuration = 2 * time.Second
|
||||||
|
stats2.oldTimeRanges = []timeRange{{time.Now(), time.Now().Add(2 * time.Second)}}
|
||||||
sg := newStatsGroups()
|
sg := newStatsGroups()
|
||||||
sg.set("test1", stats1)
|
sg.set("test1", stats1)
|
||||||
sg.set("test2", stats2)
|
sg.set("test2", stats2)
|
||||||
sum := sg.sum()
|
sum := sg.sum()
|
||||||
if sum.bytes != stats1.bytes+stats2.bytes {
|
assert.Equal(t, stats1.bytes+stats2.bytes, sum.bytes)
|
||||||
t.Fatalf("sum() => bytes %d, expected %d", sum.bytes, stats1.bytes+stats2.bytes)
|
assert.Equal(t, stats1.errors+stats2.errors, sum.errors)
|
||||||
}
|
assert.Equal(t, stats1.oldDuration+stats2.oldDuration, sum.oldDuration)
|
||||||
if sum.errors != stats1.errors+stats2.errors {
|
// dict can iterate in either order
|
||||||
t.Fatalf("sum() => errors %d, expected %d", sum.errors, stats1.errors+stats2.errors)
|
a := timeRanges{stats1.oldTimeRanges[0], stats2.oldTimeRanges[0]}
|
||||||
|
b := timeRanges{stats2.oldTimeRanges[0], stats1.oldTimeRanges[0]}
|
||||||
|
if !assert.ObjectsAreEqual(a, sum.oldTimeRanges) {
|
||||||
|
assert.Equal(t, b, sum.oldTimeRanges)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
@@ -99,7 +99,7 @@ func StartTokenTicker() {
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
// limitBandwith sleeps for the correct amount of time for the passage
|
// limitBandwidth sleeps for the correct amount of time for the passage
|
||||||
// of n bytes according to the current bandwidth limit
|
// of n bytes according to the current bandwidth limit
|
||||||
func limitBandwidth(n int) {
|
func limitBandwidth(n int) {
|
||||||
tokenBucketMu.Lock()
|
tokenBucketMu.Lock()
|
||||||
|
|||||||
@@ -72,8 +72,16 @@ func (tm *transferMap) _sortedSlice() []*Transfer {
|
|||||||
for _, tr := range tm.items {
|
for _, tr := range tm.items {
|
||||||
s = append(s, tr)
|
s = append(s, tr)
|
||||||
}
|
}
|
||||||
|
// sort by time first and if equal by name. Note that the relatively
|
||||||
|
// low time resolution on Windows can cause equal times.
|
||||||
sort.Slice(s, func(i, j int) bool {
|
sort.Slice(s, func(i, j int) bool {
|
||||||
return s[i].startedAt.Before(s[j].startedAt)
|
a, b := s[i], s[j]
|
||||||
|
if a.startedAt.Before(b.startedAt) {
|
||||||
|
return true
|
||||||
|
} else if !a.startedAt.Equal(b.startedAt) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return a.remote < b.remote
|
||||||
})
|
})
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
@@ -83,7 +91,7 @@ func (tm *transferMap) _sortedSlice() []*Transfer {
|
|||||||
func (tm *transferMap) String(progress *inProgress, exclude *transferMap) string {
|
func (tm *transferMap) String(progress *inProgress, exclude *transferMap) string {
|
||||||
tm.mu.RLock()
|
tm.mu.RLock()
|
||||||
defer tm.mu.RUnlock()
|
defer tm.mu.RUnlock()
|
||||||
strngs := make([]string, 0, len(tm.items))
|
stringList := make([]string, 0, len(tm.items))
|
||||||
for _, tr := range tm._sortedSlice() {
|
for _, tr := range tm._sortedSlice() {
|
||||||
if exclude != nil {
|
if exclude != nil {
|
||||||
exclude.mu.RLock()
|
exclude.mu.RLock()
|
||||||
@@ -103,9 +111,9 @@ func (tm *transferMap) String(progress *inProgress, exclude *transferMap) string
|
|||||||
tm.name,
|
tm.name,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
strngs = append(strngs, " * "+out)
|
stringList = append(stringList, " * "+out)
|
||||||
}
|
}
|
||||||
return strings.Join(strngs, "\n")
|
return strings.Join(stringList, "\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
// progress returns total bytes read as well as the size.
|
// progress returns total bytes read as well as the size.
|
||||||
|
|||||||
@@ -80,7 +80,7 @@ func parseWeekday(dayOfWeek string) (int, error) {
|
|||||||
// Set the bandwidth timetable.
|
// Set the bandwidth timetable.
|
||||||
func (x *BwTimetable) Set(s string) error {
|
func (x *BwTimetable) Set(s string) error {
|
||||||
// The timetable is formatted as:
|
// The timetable is formatted as:
|
||||||
// "dayOfWeek-hh:mm,bandwidth dayOfWeek-hh:mm,banwidth..." ex: "Mon-10:00,10G Mon-11:30,1G Tue-18:00,off"
|
// "dayOfWeek-hh:mm,bandwidth dayOfWeek-hh:mm,bandwidth..." ex: "Mon-10:00,10G Mon-11:30,1G Tue-18:00,off"
|
||||||
// If only a single bandwidth identifier is provided, we assume constant bandwidth.
|
// If only a single bandwidth identifier is provided, we assume constant bandwidth.
|
||||||
|
|
||||||
if len(s) == 0 {
|
if len(s) == 0 {
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ type ChunkedReader struct {
|
|||||||
//
|
//
|
||||||
// An initialChunkSize of <= 0 will disable chunked reading.
|
// An initialChunkSize of <= 0 will disable chunked reading.
|
||||||
// If maxChunkSize is greater than initialChunkSize, the chunk size will be
|
// If maxChunkSize is greater than initialChunkSize, the chunk size will be
|
||||||
// doubled after each chunk read with a maximun of maxChunkSize.
|
// doubled after each chunk read with a maximum of maxChunkSize.
|
||||||
// A Seek or RangeSeek will reset the chunk size to it's initial value
|
// A Seek or RangeSeek will reset the chunk size to it's initial value
|
||||||
func New(ctx context.Context, o fs.Object, initialChunkSize int64, maxChunkSize int64) *ChunkedReader {
|
func New(ctx context.Context, o fs.Object, initialChunkSize int64, maxChunkSize int64) *ChunkedReader {
|
||||||
if initialChunkSize <= 0 {
|
if initialChunkSize <= 0 {
|
||||||
|
|||||||
@@ -42,6 +42,7 @@ var (
|
|||||||
type ConfigInfo struct {
|
type ConfigInfo struct {
|
||||||
LogLevel LogLevel
|
LogLevel LogLevel
|
||||||
StatsLogLevel LogLevel
|
StatsLogLevel LogLevel
|
||||||
|
LogSystemdSupport bool
|
||||||
UseJSONLog bool
|
UseJSONLog bool
|
||||||
DryRun bool
|
DryRun bool
|
||||||
Interactive bool
|
Interactive bool
|
||||||
@@ -61,7 +62,7 @@ type ConfigInfo struct {
|
|||||||
DeleteMode DeleteMode
|
DeleteMode DeleteMode
|
||||||
MaxDelete int64
|
MaxDelete int64
|
||||||
TrackRenames bool // Track file renames.
|
TrackRenames bool // Track file renames.
|
||||||
TrackRenamesStrategy string // Comma separated list of stratgies used to track renames
|
TrackRenamesStrategy string // Comma separated list of strategies used to track renames
|
||||||
LowLevelRetries int
|
LowLevelRetries int
|
||||||
UpdateOlder bool // Skip files that are newer on the destination
|
UpdateOlder bool // Skip files that are newer on the destination
|
||||||
NoGzip bool // Disable compression
|
NoGzip bool // Disable compression
|
||||||
@@ -106,6 +107,7 @@ type ConfigInfo struct {
|
|||||||
StatsOneLineDateFormat string // If we want to customize the prefix
|
StatsOneLineDateFormat string // If we want to customize the prefix
|
||||||
ErrorOnNoTransfer bool // Set appropriate exit code if no files transferred
|
ErrorOnNoTransfer bool // Set appropriate exit code if no files transferred
|
||||||
Progress bool
|
Progress bool
|
||||||
|
ProgressTerminalTitle bool
|
||||||
Cookie bool
|
Cookie bool
|
||||||
UseMmap bool
|
UseMmap bool
|
||||||
CaCert string // Client Side CA
|
CaCert string // Client Side CA
|
||||||
|
|||||||
@@ -177,7 +177,7 @@ func TestNewRemoteName(t *testing.T) {
|
|||||||
assert.Equal(t, "newname", NewRemoteName())
|
assert.Equal(t, "newname", NewRemoteName())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCreateUpatePasswordRemote(t *testing.T) {
|
func TestCreateUpdatePasswordRemote(t *testing.T) {
|
||||||
defer testConfigFile(t, "update.conf")()
|
defer testConfigFile(t, "update.conf")()
|
||||||
|
|
||||||
for _, doObscure := range []bool{false, true} {
|
for _, doObscure := range []bool{false, true} {
|
||||||
|
|||||||
@@ -110,6 +110,7 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
|||||||
flags.StringVarP(flagSet, &fs.Config.StatsOneLineDateFormat, "stats-one-line-date-format", "", fs.Config.StatsOneLineDateFormat, "Enables --stats-one-line-date and uses custom formatted date. Enclose date string in double quotes (\"). See https://golang.org/pkg/time/#Time.Format")
|
flags.StringVarP(flagSet, &fs.Config.StatsOneLineDateFormat, "stats-one-line-date-format", "", fs.Config.StatsOneLineDateFormat, "Enables --stats-one-line-date and uses custom formatted date. Enclose date string in double quotes (\"). See https://golang.org/pkg/time/#Time.Format")
|
||||||
flags.BoolVarP(flagSet, &fs.Config.ErrorOnNoTransfer, "error-on-no-transfer", "", fs.Config.ErrorOnNoTransfer, "Sets exit code 9 if no files are transferred, useful in scripts")
|
flags.BoolVarP(flagSet, &fs.Config.ErrorOnNoTransfer, "error-on-no-transfer", "", fs.Config.ErrorOnNoTransfer, "Sets exit code 9 if no files are transferred, useful in scripts")
|
||||||
flags.BoolVarP(flagSet, &fs.Config.Progress, "progress", "P", fs.Config.Progress, "Show progress during transfer.")
|
flags.BoolVarP(flagSet, &fs.Config.Progress, "progress", "P", fs.Config.Progress, "Show progress during transfer.")
|
||||||
|
flags.BoolVarP(flagSet, &fs.Config.ProgressTerminalTitle, "progress-terminal-title", "", fs.Config.ProgressTerminalTitle, "Show progress on the terminal title. Requires -P/--progress.")
|
||||||
flags.BoolVarP(flagSet, &fs.Config.Cookie, "use-cookies", "", fs.Config.Cookie, "Enable session cookiejar.")
|
flags.BoolVarP(flagSet, &fs.Config.Cookie, "use-cookies", "", fs.Config.Cookie, "Enable session cookiejar.")
|
||||||
flags.BoolVarP(flagSet, &fs.Config.UseMmap, "use-mmap", "", fs.Config.UseMmap, "Use mmap allocator (see docs).")
|
flags.BoolVarP(flagSet, &fs.Config.UseMmap, "use-mmap", "", fs.Config.UseMmap, "Use mmap allocator (see docs).")
|
||||||
flags.StringVarP(flagSet, &fs.Config.CaCert, "ca-cert", "", fs.Config.CaCert, "CA certificate used to verify servers")
|
flags.StringVarP(flagSet, &fs.Config.CaCert, "ca-cert", "", fs.Config.CaCert, "CA certificate used to verify servers")
|
||||||
@@ -123,6 +124,7 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
|||||||
flags.StringArrayVarP(flagSet, &downloadHeaders, "header-download", "", nil, "Set HTTP header for download transactions")
|
flags.StringArrayVarP(flagSet, &downloadHeaders, "header-download", "", nil, "Set HTTP header for download transactions")
|
||||||
flags.StringArrayVarP(flagSet, &headers, "header", "", nil, "Set HTTP header for all transactions")
|
flags.StringArrayVarP(flagSet, &headers, "header", "", nil, "Set HTTP header for all transactions")
|
||||||
flags.BoolVarP(flagSet, &fs.Config.RefreshTimes, "refresh-times", "", fs.Config.RefreshTimes, "Refresh the modtime of remote files.")
|
flags.BoolVarP(flagSet, &fs.Config.RefreshTimes, "refresh-times", "", fs.Config.RefreshTimes, "Refresh the modtime of remote files.")
|
||||||
|
flags.BoolVarP(flagSet, &fs.Config.LogSystemdSupport, "log-systemd", "", fs.Config.LogSystemdSupport, "Activate systemd integration for the logger.")
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseHeaders converts the strings passed in via the header flags into HTTPOptions
|
// ParseHeaders converts the strings passed in via the header flags into HTTPOptions
|
||||||
|
|||||||
@@ -156,6 +156,15 @@ func FVarP(flags *pflag.FlagSet, value pflag.Value, name, shorthand, usage strin
|
|||||||
setDefaultFromEnv(flags, name)
|
setDefaultFromEnv(flags, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VarPF defines a flag which can be overridden by an environment variable
|
||||||
|
//
|
||||||
|
// It is a thin wrapper around pflag.VarPF
|
||||||
|
func VarPF(flags *pflag.FlagSet, value pflag.Value, name, shorthand, usage string) *pflag.Flag {
|
||||||
|
flag := flags.VarPF(value, name, shorthand, usage)
|
||||||
|
setDefaultFromEnv(flags, name)
|
||||||
|
return flag
|
||||||
|
}
|
||||||
|
|
||||||
// StringArrayP defines a flag which can be overridden by an environment variable
|
// StringArrayP defines a flag which can be overridden by an environment variable
|
||||||
//
|
//
|
||||||
// It sets one value only - command line flags can be used to set more.
|
// It sets one value only - command line flags can be used to set more.
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user