mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
98 Commits
fix-dropbo
...
fix-4704-s
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b26d5da84e | ||
|
|
c1bf3f3999 | ||
|
|
fd2c373af1 | ||
|
|
66c8d3bf2b | ||
|
|
e00bf3d723 | ||
|
|
605f2b819a | ||
|
|
bf2b975359 | ||
|
|
00a5086ff2 | ||
|
|
be6a888e50 | ||
|
|
dad8447423 | ||
|
|
65ff109065 | ||
|
|
b7253fc1c1 | ||
|
|
d143f576c6 | ||
|
|
a152351a71 | ||
|
|
a2fa1370c5 | ||
|
|
bed83b0b64 | ||
|
|
cf0bdad5de | ||
|
|
85d35ef03c | ||
|
|
514d10b314 | ||
|
|
5164c3d2d0 | ||
|
|
ffdd0719e7 | ||
|
|
4e2b5389d7 | ||
|
|
dc4e63631f | ||
|
|
275bf456d3 | ||
|
|
7dfa871095 | ||
|
|
70cc88de22 | ||
|
|
4bc0f46955 | ||
|
|
5b09599a23 | ||
|
|
f4dd8e3fe8 | ||
|
|
d0888edc0a | ||
|
|
51a230d7fd | ||
|
|
fc5b14b620 | ||
|
|
bbddadbd04 | ||
|
|
7428e47ebc | ||
|
|
72083c65ad | ||
|
|
70f92fd6b3 | ||
|
|
a86cedbc24 | ||
|
|
0906f8dd3b | ||
|
|
664213cedb | ||
|
|
75a7226174 | ||
|
|
9e925becb6 | ||
|
|
e3a5bb9b48 | ||
|
|
b7eeb0e260 | ||
|
|
84d64ddabc | ||
|
|
6c9f92aee6 | ||
|
|
893297760b | ||
|
|
c5c56cda02 | ||
|
|
2295123cad | ||
|
|
ff0280c0cb | ||
|
|
64d736a57b | ||
|
|
5f1d5a1897 | ||
|
|
aac2406e19 | ||
|
|
6dc28ef50a | ||
|
|
66def93373 | ||
|
|
c58023a9ba | ||
|
|
3edc9ff0b0 | ||
|
|
8e8ae1edc7 | ||
|
|
20b00db390 | ||
|
|
db4bbf9521 | ||
|
|
2b7994e739 | ||
|
|
e7fbdac8e0 | ||
|
|
41ec712aa9 | ||
|
|
17acae2b00 | ||
|
|
57261c7e97 | ||
|
|
d8239e0194 | ||
|
|
004c3796de | ||
|
|
18c7549770 | ||
|
|
e5190f14ce | ||
|
|
433b73a5a8 | ||
|
|
ab88a3341f | ||
|
|
181da3ce9b | ||
|
|
b14a58c9b8 | ||
|
|
60cc2cba1f | ||
|
|
c797494d88 | ||
|
|
e2a57182be | ||
|
|
8928441466 | ||
|
|
0e8965060f | ||
|
|
f3cf6fcdd7 | ||
|
|
18ccf0f871 | ||
|
|
313647bcf3 | ||
|
|
61fe068c90 | ||
|
|
5c49096e11 | ||
|
|
a73c78545d | ||
|
|
e0fd560711 | ||
|
|
6a56ac1032 | ||
|
|
96299629b4 | ||
|
|
75de30cfa8 | ||
|
|
233bed6a73 | ||
|
|
b3964efe4d | ||
|
|
575f061629 | ||
|
|
640d7d3b4e | ||
|
|
e92294b482 | ||
|
|
22937e8982 | ||
|
|
c3d1474eb9 | ||
|
|
e2426ea87b | ||
|
|
e58a61175f | ||
|
|
05bea46c3e | ||
|
|
c8a719ae0d |
18
.github/workflows/build.yml
vendored
18
.github/workflows/build.yml
vendored
@@ -107,10 +107,10 @@ jobs:
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
|
||||
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
|
||||
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
|
||||
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
|
||||
|
||||
- name: Install Libraries on Linux
|
||||
shell: bash
|
||||
@@ -124,6 +124,8 @@ jobs:
|
||||
- name: Install Libraries on macOS
|
||||
shell: bash
|
||||
run: |
|
||||
brew untap local/homebrew-openssl # workaround for https://github.com/actions/virtual-environments/issues/1811
|
||||
brew untap local/homebrew-python2 # workaround for https://github.com/actions/virtual-environments/issues/1811
|
||||
brew update
|
||||
brew cask install osxfuse
|
||||
if: matrix.os == 'macOS-latest'
|
||||
@@ -133,10 +135,10 @@ jobs:
|
||||
run: |
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
choco install -y winfsp zip
|
||||
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
||||
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
if ($env:GOARCH -eq "386") {
|
||||
choco install -y mingw --forcex86 --force
|
||||
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
||||
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
}
|
||||
# Copy mingw32-make.exe to make.exe so the same command line
|
||||
# can be used on Windows as on macOS and Linux
|
||||
@@ -223,8 +225,8 @@ jobs:
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
||||
echo '::add-path::${{ runner.workspace }}/bin'
|
||||
echo 'GOPATH=${{ runner.workspace }}' >> $GITHUB_ENV
|
||||
echo '${{ runner.workspace }}/bin' >> $GITHUB_PATH
|
||||
|
||||
- name: Cross-compile rclone
|
||||
run: |
|
||||
|
||||
@@ -86,7 +86,7 @@ git reset --soft HEAD~2 # This squashes the 2 latest commits together.
|
||||
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
|
||||
git commit # Add a new commit message.
|
||||
git push --force # Push the squashed commit to your GitHub repo.
|
||||
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also reccommends wizardzines.com
|
||||
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
|
||||
```
|
||||
|
||||
## CI for your fork ##
|
||||
|
||||
4
Makefile
4
Makefile
@@ -93,8 +93,8 @@ build_dep:
|
||||
|
||||
# Get the release dependencies we only install on linux
|
||||
release_dep_linux:
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2'
|
||||
cd /tmp && go get github.com/goreleaser/nfpm/...
|
||||
cd /tmp && go get github.com/github-release/github-release
|
||||
|
||||
# Get the release dependencies we only install on Windows
|
||||
release_dep_windows:
|
||||
|
||||
@@ -64,6 +64,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
|
||||
@@ -21,7 +21,7 @@ This file describes how to make the various kinds of releases
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX.0"
|
||||
* make retag
|
||||
* git push --tags origin master
|
||||
* git push --follow-tags origin
|
||||
* # Wait for the GitHub builds to complete then...
|
||||
* make fetch_binaries
|
||||
* make tarball
|
||||
@@ -65,7 +65,7 @@ Now
|
||||
* git cherry-pick any fixes
|
||||
* Do the steps as above
|
||||
* make startstable
|
||||
* NB this overwrites the current beta so we need to do this - FIXME is this true any more?
|
||||
* NB this overwrites the current beta so we need to do this
|
||||
* git co master
|
||||
* # cherry pick the changes to the changelog
|
||||
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||
|
||||
@@ -1245,15 +1245,15 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
blob := o.getBlobReference()
|
||||
ac := azblob.BlobAccessConditions{}
|
||||
var dowloadResponse *azblob.DownloadResponse
|
||||
var downloadResponse *azblob.DownloadResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
dowloadResponse, err = blob.Download(ctx, offset, count, ac, false)
|
||||
downloadResponse, err = blob.Download(ctx, offset, count, ac, false)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open for download")
|
||||
}
|
||||
in = dowloadResponse.Body(azblob.RetryReaderOptions{})
|
||||
in = downloadResponse.Body(azblob.RetryReaderOptions{})
|
||||
return in, nil
|
||||
}
|
||||
|
||||
@@ -1475,7 +1475,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
|
||||
// is merged the SDK can't upload a single blob of exactly the chunk
|
||||
// size, so upload with a multpart upload to work around.
|
||||
// size, so upload with a multipart upload to work around.
|
||||
// See: https://github.com/rclone/rclone/issues/2653
|
||||
multipartUpload := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
if size == int64(o.fs.opt.ChunkSize) {
|
||||
|
||||
@@ -1013,7 +1013,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return info.SharedLink.URL, err
|
||||
}
|
||||
|
||||
// deletePermanently permenently deletes a trashed file
|
||||
// deletePermanently permanently deletes a trashed file
|
||||
func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// multpart upload for box
|
||||
// multipart upload for box
|
||||
|
||||
package box
|
||||
|
||||
|
||||
@@ -296,6 +296,8 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ServerSideAcrossConfigs: true,
|
||||
}).Fill(f).Mask(baseFs).WrapsFs(f, baseFs)
|
||||
|
||||
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
@@ -958,6 +960,8 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
|
||||
}
|
||||
info := f.wrapInfo(src, chunkRemote, size)
|
||||
|
||||
// Refill chunkLimit and let basePut repeatedly call chunkingReader.Read()
|
||||
c.chunkLimit = c.chunkSize
|
||||
// TODO: handle range/limit options
|
||||
chunk, errChunk := basePut(ctx, wrapIn, info, options...)
|
||||
if errChunk != nil {
|
||||
@@ -1126,6 +1130,12 @@ func (c *chunkingReader) wrapStream(ctx context.Context, in io.Reader, src fs.Ob
|
||||
|
||||
switch {
|
||||
case c.fs.useMD5:
|
||||
srcObj := fs.UnWrapObjectInfo(src)
|
||||
if srcObj != nil && srcObj.Fs().Features().SlowHash {
|
||||
fs.Debugf(src, "skip slow MD5 on source file, hashing in-transit")
|
||||
c.hasher = md5.New()
|
||||
break
|
||||
}
|
||||
if c.md5, _ = src.Hash(ctx, hash.MD5); c.md5 == "" {
|
||||
if c.fs.hashFallback {
|
||||
c.sha1, _ = src.Hash(ctx, hash.SHA1)
|
||||
@@ -1134,6 +1144,12 @@ func (c *chunkingReader) wrapStream(ctx context.Context, in io.Reader, src fs.Ob
|
||||
}
|
||||
}
|
||||
case c.fs.useSHA1:
|
||||
srcObj := fs.UnWrapObjectInfo(src)
|
||||
if srcObj != nil && srcObj.Fs().Features().SlowHash {
|
||||
fs.Debugf(src, "skip slow SHA1 on source file, hashing in-transit")
|
||||
c.hasher = sha1.New()
|
||||
break
|
||||
}
|
||||
if c.sha1, _ = src.Hash(ctx, hash.SHA1); c.sha1 == "" {
|
||||
if c.fs.hashFallback {
|
||||
c.md5, _ = src.Hash(ctx, hash.MD5)
|
||||
@@ -1166,10 +1182,14 @@ func (c *chunkingReader) updateHashes() {
|
||||
func (c *chunkingReader) Read(buf []byte) (bytesRead int, err error) {
|
||||
if c.chunkLimit <= 0 {
|
||||
// Chunk complete - switch to next one.
|
||||
// Note #1:
|
||||
// We might not get here because some remotes (eg. box multi-uploader)
|
||||
// read the specified size exactly and skip the concluding EOF Read.
|
||||
// Then a check in the put loop will kick in.
|
||||
c.chunkLimit = c.chunkSize
|
||||
// Note #2:
|
||||
// The crypt backend after receiving EOF here will call Read again
|
||||
// and we must insist on returning EOF, so we postpone refilling
|
||||
// chunkLimit to the main loop.
|
||||
return 0, io.EOF
|
||||
}
|
||||
if int64(len(buf)) > c.chunkLimit {
|
||||
|
||||
@@ -147,7 +147,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
||||
// If salt is "" we use a fixed salt just to make attackers lives
|
||||
// slighty harder than using no salt.
|
||||
//
|
||||
// Note that empty passsword makes all 0x00 keys which is used in the
|
||||
// Note that empty password makes all 0x00 keys which is used in the
|
||||
// tests.
|
||||
func (c *Cipher) Key(password, salt string) (err error) {
|
||||
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
|
||||
@@ -633,11 +633,8 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
// possibly err != nil here, but we will process the
|
||||
// data and the next call to ReadFull will return 0, err
|
||||
// Write nonce to start of block
|
||||
copy(fh.buf, fh.nonce[:])
|
||||
// Encrypt the block using the nonce
|
||||
block := fh.buf
|
||||
secretbox.Seal(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
fh.bufIndex = 0
|
||||
fh.bufSize = blockHeaderSize + n
|
||||
fh.nonce.increment()
|
||||
@@ -782,8 +779,7 @@ func (fh *decrypter) fillBuffer() (err error) {
|
||||
return ErrorEncryptedFileBadHeader
|
||||
}
|
||||
// Decrypt the block using the nonce
|
||||
block := fh.buf
|
||||
_, ok := secretbox.Open(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
if !ok {
|
||||
if err != nil {
|
||||
return err // return pending error as it is likely more accurate
|
||||
|
||||
@@ -159,7 +159,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
||||
}
|
||||
// Make sure to remove trailing . reffering to the current dir
|
||||
// Make sure to remove trailing . referring to the current dir
|
||||
if path.Base(rpath) == "." {
|
||||
rpath = strings.TrimSuffix(rpath, ".")
|
||||
}
|
||||
|
||||
@@ -87,7 +87,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
||||
}
|
||||
|
||||
// wrap the object in a crypt for upload using the nonce we
|
||||
// saved from the encryptor
|
||||
// saved from the encrypter
|
||||
src := f.newObjectInfo(oi, nonce)
|
||||
|
||||
// Test ObjectInfo methods
|
||||
|
||||
@@ -35,6 +35,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
@@ -470,6 +471,21 @@ Note that this detection is relying on error message strings which
|
||||
Google don't document so it may break in the future.
|
||||
|
||||
See: https://github.com/rclone/rclone/issues/3857
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "stop_on_download_limit",
|
||||
Default: false,
|
||||
Help: `Make download limit errors be fatal
|
||||
|
||||
At the time of writing it is only possible to download 10TB of data from
|
||||
Google Drive a day (this is an undocumented limit). When this limit is
|
||||
reached Google Drive produces a slightly different error message. When
|
||||
this flag is set it causes these errors to be fatal. These will stop
|
||||
the in-progress sync.
|
||||
|
||||
Note that this detection is relying on error message strings which
|
||||
Google don't document so it may break in the future.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -539,6 +555,7 @@ type Options struct {
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
DisableHTTP2 bool `config:"disable_http2"`
|
||||
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
|
||||
StopOnDownloadLimit bool `config:"stop_on_download_limit"`
|
||||
SkipShortcuts bool `config:"skip_shortcuts"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
@@ -638,6 +655,9 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
return false, fserrors.FatalError(err)
|
||||
}
|
||||
return true, err
|
||||
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
|
||||
fs.Errorf(f, "Received download limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
||||
fs.Errorf(f, "Received team drive file limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
@@ -2025,10 +2045,10 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
||||
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
@@ -2959,6 +2979,38 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
|
||||
return f.unTrash(ctx, dir, directoryID, true)
|
||||
}
|
||||
|
||||
// copy file with id to dest
|
||||
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
info, err := f.getFile(id, f.fileFields)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't find id")
|
||||
}
|
||||
if info.MimeType == driveFolderType {
|
||||
return errors.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
|
||||
}
|
||||
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
||||
o, err := f.newObjectWithInfo(info.Name, info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
destDir, destLeaf, err := fspath.Split(dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if destLeaf == "" {
|
||||
destLeaf = info.Name
|
||||
}
|
||||
dstFs, err := cache.Get(destDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "copy failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "get",
|
||||
Short: "Get command for fetching the drive config parameters",
|
||||
@@ -3059,6 +3111,29 @@ Result:
|
||||
"Errors": 0
|
||||
}
|
||||
`,
|
||||
}, {
|
||||
Name: "copyid",
|
||||
Short: "Copy files by ID",
|
||||
Long: `This command copies files by ID
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend copyid drive: ID path
|
||||
rclone backend copyid drive: ID1 path1 ID2 path2
|
||||
|
||||
It copies the drive file with ID given to the path (an rclone path which
|
||||
will be passed internally to rclone copyto). The ID and path pairs can be
|
||||
repeated.
|
||||
|
||||
The path should end with a / to indicate copy the file as named to
|
||||
this directory. If it doesn't end with a / then the last path
|
||||
component will be used as the file name.
|
||||
|
||||
If the destination is a drive backend then server side copying will be
|
||||
attempted if possible.
|
||||
|
||||
Use the -i flag to see what would be copied before copying.
|
||||
`,
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
@@ -3130,6 +3205,19 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
dir = arg[0]
|
||||
}
|
||||
return f.unTrashDir(ctx, dir, true)
|
||||
case "copyid":
|
||||
if len(arg)%2 != 0 {
|
||||
return nil, errors.New("need an even number of arguments")
|
||||
}
|
||||
for len(arg) > 0 {
|
||||
id, dest := arg[0], arg[1]
|
||||
arg = arg[2:]
|
||||
err = f.copyID(ctx, id, dest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed copying %q to %q", id, dest)
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -272,14 +274,15 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// from fstest/fstests/fstests.go
|
||||
existingDir = "hello? sausage"
|
||||
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
|
||||
existingSubDir = "êé"
|
||||
)
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts
|
||||
func (f *Fs) InternalTestShortcuts(t *testing.T) {
|
||||
const (
|
||||
// from fstest/fstests/fstests.go
|
||||
existingDir = "hello? sausage"
|
||||
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
|
||||
existingSubDir = "êé"
|
||||
)
|
||||
ctx := context.Background()
|
||||
srcObj, err := f.NewObject(ctx, existingFile)
|
||||
require.NoError(t, err)
|
||||
@@ -408,6 +411,55 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
||||
require.NoError(t, f.Purge(ctx, "trashDir"))
|
||||
}
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
|
||||
func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
obj, err := f.NewObject(ctx, existingFile)
|
||||
require.NoError(t, err)
|
||||
o := obj.(*Object)
|
||||
|
||||
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(dir)
|
||||
}()
|
||||
|
||||
checkFile := func(name string) {
|
||||
filePath := filepath.Join(dir, name)
|
||||
fi, err := os.Stat(filePath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(100), fi.Size())
|
||||
err = os.Remove(filePath)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
t.Run("BadID", func(t *testing.T) {
|
||||
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "couldn't find id")
|
||||
})
|
||||
|
||||
t.Run("Directory", func(t *testing.T) {
|
||||
rootID, err := f.dirCache.RootID(ctx, false)
|
||||
require.NoError(t, err)
|
||||
err = f.copyID(ctx, rootID, dir+"/")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "can't copy directory")
|
||||
})
|
||||
|
||||
t.Run("WithoutDestName", func(t *testing.T) {
|
||||
err = f.copyID(ctx, o.id, dir+"/")
|
||||
require.NoError(t, err)
|
||||
checkFile(path.Base(existingFile))
|
||||
})
|
||||
|
||||
t.Run("WithDestName", func(t *testing.T) {
|
||||
err = f.copyID(ctx, o.id, dir+"/potato.txt")
|
||||
require.NoError(t, err)
|
||||
checkFile("potato.txt")
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// These tests all depend on each other so run them as nested tests
|
||||
t.Run("DocumentImport", func(t *testing.T) {
|
||||
@@ -424,6 +476,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
})
|
||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||
t.Run("CopyID", f.InternalTestCopyID)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -142,6 +142,31 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||
Help: "Impersonate this user when using a business account.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "shared_files",
|
||||
Help: `Instructs rclone to work on individual shared files.
|
||||
|
||||
In this mode rclone's features are extremely limited - only list (ls, lsl, etc.)
|
||||
operations and read operations (e.g. downloading) are supported in this mode.
|
||||
All other operations will be disabled.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "shared_folders",
|
||||
Help: `Instructs rclone to work on shared folders.
|
||||
|
||||
When this flag is used with no path only the List operation is supported and
|
||||
all available shared folders will be listed. If you specify a path the first part
|
||||
will be interpreted as the name of shared folder. Rclone will then try to mount this
|
||||
shared to the root namespace. On success shared folder rclone proceeds normally.
|
||||
The shared folder is now pretty much a normal folder and all normal operations
|
||||
are supported.
|
||||
|
||||
Note that we don't unmount the shared folder afterwards so the
|
||||
--dropbox-shared-folders can be omitted after the first use of a particular
|
||||
shared folder.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -161,9 +186,11 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
SharedFiles bool `config:"shared_files"`
|
||||
SharedFolders bool `config:"shared_folders"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
@@ -186,7 +213,9 @@ type Fs struct {
|
||||
//
|
||||
// Dropbox Objects always have full metadata
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
fs *Fs // what this object is part of
|
||||
id string
|
||||
url string
|
||||
remote string // The remote path
|
||||
bytes int64 // size of the object
|
||||
modTime time.Time // time it was last modified
|
||||
@@ -332,8 +361,60 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
f.setRoot(root)
|
||||
})
|
||||
|
||||
// do not fill features yet
|
||||
if f.opt.SharedFiles {
|
||||
f.setRoot(root)
|
||||
if f.root == "" {
|
||||
return f, nil
|
||||
}
|
||||
_, err := f.findSharedFile(f.root)
|
||||
f.root = ""
|
||||
if err == nil {
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
if f.opt.SharedFolders {
|
||||
f.setRoot(root)
|
||||
if f.root == "" {
|
||||
return f, nil // our root it empty so we probably want to list shared folders
|
||||
}
|
||||
|
||||
dir := path.Dir(f.root)
|
||||
if dir == "." {
|
||||
dir = f.root
|
||||
}
|
||||
|
||||
// root is not empty so we have find the right shared folder if it exists
|
||||
id, err := f.findSharedFolder(dir)
|
||||
if err != nil {
|
||||
// if we didn't find the specified shared folder we have to bail out here
|
||||
return nil, err
|
||||
}
|
||||
// we found the specified shared folder so let's mount it
|
||||
// this will add it to the users normal root namespace and allows us
|
||||
// to actually perform operations on it using the normal api endpoints.
|
||||
err = f.mountSharedFolder(id)
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
case sharing.MountFolderAPIError:
|
||||
if e.EndpointError == nil || (e.EndpointError != nil && e.EndpointError.Tag != sharing.MountFolderErrorAlreadyMounted) {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
// if the moint failed we have to abort here
|
||||
}
|
||||
// if the mount succeeded it's now a normal folder in the users root namespace
|
||||
// we disable shared folder mode and proceed normally
|
||||
f.opt.SharedFolders = false
|
||||
}
|
||||
|
||||
f.features.Fill(f)
|
||||
|
||||
// If root starts with / then use the actual root
|
||||
if strings.HasPrefix(root, "/") {
|
||||
@@ -355,6 +436,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
fs.Debugf(f, "Using root namespace %q", f.ns)
|
||||
}
|
||||
f.setRoot(root)
|
||||
|
||||
// See if the root is actually an object
|
||||
_, err = f.getFileMetadata(f.slashRoot)
|
||||
@@ -465,9 +547,150 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
if f.opt.SharedFiles {
|
||||
return f.findSharedFile(remote)
|
||||
}
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// listSharedFoldersApi lists all available shared folders mounted and not mounted
|
||||
// we'll need the id later so we have to return them in original format
|
||||
func (f *Fs) listSharedFolders() (entries fs.DirEntries, err error) {
|
||||
started := false
|
||||
var res *sharing.ListFoldersResult
|
||||
for {
|
||||
if !started {
|
||||
arg := sharing.ListFoldersArgs{
|
||||
Limit: 100,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.sharing.ListFolders(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
started = true
|
||||
} else {
|
||||
arg := sharing.ListFoldersContinueArg{
|
||||
Cursor: res.Cursor,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.sharing.ListFoldersContinue(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list continue")
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
leaf := f.opt.Enc.ToStandardName(entry.Name)
|
||||
d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
|
||||
entries = append(entries, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if res.Cursor == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// findSharedFolder find the id for a given shared folder name
|
||||
// somewhat annoyingly there is no endpoint to query a shared folder by it's name
|
||||
// so our only option is to iterate over all shared folders
|
||||
func (f *Fs) findSharedFolder(name string) (id string, err error) {
|
||||
entries, err := f.listSharedFolders()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if entry.(*fs.Dir).Remote() == name {
|
||||
return entry.(*fs.Dir).ID(), nil
|
||||
}
|
||||
}
|
||||
return "", fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
// mountSharedFolders mount a shared folder to the root namespace
|
||||
func (f *Fs) mountSharedFolder(id string) error {
|
||||
arg := sharing.MountFolderArg{
|
||||
SharedFolderId: id,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.sharing.MountFolder(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// listSharedFolders lists shared the user as access to (note this means individual
|
||||
// files not files contained in shared folders)
|
||||
func (f *Fs) listReceivedFiles() (entries fs.DirEntries, err error) {
|
||||
started := false
|
||||
var res *sharing.ListFilesResult
|
||||
for {
|
||||
if !started {
|
||||
arg := sharing.ListFilesArg{
|
||||
Limit: 100,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.sharing.ListReceivedFiles(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
started = true
|
||||
} else {
|
||||
arg := sharing.ListFilesContinueArg{
|
||||
Cursor: res.Cursor,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.sharing.ListReceivedFilesContinue(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list continue")
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
fmt.Printf("%+v\n", entry)
|
||||
entryPath := entry.Name
|
||||
o := &Object{
|
||||
fs: f,
|
||||
url: entry.PreviewUrl,
|
||||
remote: entryPath,
|
||||
modTime: entry.TimeInvited,
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
if res.Cursor == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) findSharedFile(name string) (o *Object, err error) {
|
||||
files, err := f.listReceivedFiles()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, entry := range files {
|
||||
if entry.(*Object).remote == name {
|
||||
return entry.(*Object), nil
|
||||
}
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
@@ -478,6 +701,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if f.opt.SharedFiles {
|
||||
return f.listReceivedFiles()
|
||||
}
|
||||
if f.opt.SharedFolders {
|
||||
return f.listSharedFolders()
|
||||
}
|
||||
|
||||
root := f.slashRoot
|
||||
if dir != "" {
|
||||
root += "/" + dir
|
||||
@@ -541,7 +771,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
|
||||
remote := path.Join(dir, leaf)
|
||||
if folderInfo != nil {
|
||||
d := fs.NewDir(remote, time.Now())
|
||||
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
|
||||
entries = append(entries, d)
|
||||
} else if fileInfo != nil {
|
||||
o, err := f.newObjectWithInfo(remote, fileInfo)
|
||||
@@ -564,6 +794,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if f.opt.SharedFiles || f.opt.SharedFolders {
|
||||
return nil, fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
@@ -579,6 +812,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
if f.opt.SharedFiles || f.opt.SharedFolders {
|
||||
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
root := path.Join(f.slashRoot, dir)
|
||||
|
||||
// can't create or run metadata on root
|
||||
@@ -656,6 +892,9 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
if f.opt.SharedFiles || f.opt.SharedFolders {
|
||||
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
@@ -927,8 +1166,16 @@ func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ID returns the object id
|
||||
func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
// Hash returns the dropbox special hash
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
|
||||
return "", fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
if t != DbHashType {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -946,8 +1193,9 @@ func (o *Object) Size() int64 {
|
||||
|
||||
// setMetadataFromEntry sets the fs data from a files.FileMetadata
|
||||
//
|
||||
// This isn't a complete set of metadata and has an inacurate date
|
||||
// This isn't a complete set of metadata and has an inaccurate date
|
||||
func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
|
||||
o.id = info.Id
|
||||
o.bytes = int64(info.Size)
|
||||
o.modTime = info.ClientModified
|
||||
o.hash = info.ContentHash
|
||||
@@ -1016,10 +1264,27 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.fs.opt.SharedFiles {
|
||||
if len(options) != 0 {
|
||||
return nil, errors.New("OpenOptions not supported for shared files")
|
||||
}
|
||||
arg := sharing.GetSharedLinkMetadataArg{
|
||||
Url: o.url,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, in, err = o.fs.sharing.GetSharedLinkFile(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
arg := files.DownloadArg{
|
||||
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
|
||||
Path: o.id,
|
||||
ExtraHeaders: headers,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1153,6 +1418,9 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
|
||||
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
remote := o.remotePath()
|
||||
if ignoredFiles.MatchString(remote) {
|
||||
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
||||
@@ -1181,6 +1449,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
|
||||
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
|
||||
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
|
||||
@@ -1201,4 +1472,5 @@ var (
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -306,10 +306,10 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
||||
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
@@ -323,7 +323,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if size > int64(100e9) {
|
||||
if size > int64(300e9) {
|
||||
return nil, errors.New("File too big, cant upload")
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"crypto/tls"
|
||||
"io"
|
||||
"net/textproto"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -22,10 +21,15 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
var (
|
||||
currentUser = env.CurrentUser()
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -42,7 +46,7 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
||||
Help: "FTP username, leave blank for current username, " + currentUser,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21)",
|
||||
@@ -311,7 +315,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
}
|
||||
user := opt.User
|
||||
if user == "" {
|
||||
user = os.Getenv("USER")
|
||||
user = currentUser
|
||||
}
|
||||
port := opt.Port
|
||||
if port == "" {
|
||||
|
||||
@@ -115,7 +115,7 @@ func TestIntegration(t *testing.T) {
|
||||
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
||||
})
|
||||
|
||||
// Check it is there in the date/month/year heirachy
|
||||
// Check it is there in the date/month/year hierarchy
|
||||
// 2013-07-13 is the creation date of the folder
|
||||
checkPresent := func(t *testing.T, objPath string) {
|
||||
entries, err := f.List(ctx, objPath)
|
||||
|
||||
@@ -4,7 +4,7 @@ package hubic
|
||||
|
||||
// This uses the normal swift mechanism to update the credentials and
|
||||
// ignores the expires field returned by the Hubic API. This may need
|
||||
// to be revisted after some actual experience.
|
||||
// to be revisited after some actual experience.
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
@@ -153,9 +153,9 @@ type CustomerInfo struct {
|
||||
AccountType string `json:"account_type"`
|
||||
SubscriptionType string `json:"subscription_type"`
|
||||
Usage int64 `json:"usage"`
|
||||
Qouta int64 `json:"quota"`
|
||||
Quota int64 `json:"quota"`
|
||||
BusinessUsage int64 `json:"business_usage"`
|
||||
BusinessQouta int64 `json:"business_quota"`
|
||||
BusinessQuota int64 `json:"business_quota"`
|
||||
WriteLocked bool `json:"write_locked"`
|
||||
ReadLocked bool `json:"read_locked"`
|
||||
LockedCause interface{} `json:"locked_cause"`
|
||||
@@ -386,7 +386,7 @@ type Error struct {
|
||||
Cause string `xml:"cause"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("error %d", e.StatusCode)
|
||||
if e.Message != "" {
|
||||
|
||||
@@ -107,7 +107,7 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Use legacy authentification?.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n")
|
||||
fmt.Printf("Use legacy authentication?.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n")
|
||||
if config.Confirm(false) {
|
||||
v1config(ctx, name, m)
|
||||
} else {
|
||||
@@ -230,7 +230,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// v1config configure a jottacloud backend using legacy authentification
|
||||
// v1config configure a jottacloud backend using legacy authentication
|
||||
func v1config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
|
||||
@@ -323,7 +323,7 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
|
||||
return deviceRegistration, err
|
||||
}
|
||||
|
||||
// doAuthV1 runs the actual token request for V1 authentification
|
||||
// doAuthV1 runs the actual token request for V1 authentication
|
||||
func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
|
||||
// prepare out token request with username and password
|
||||
values := url.Values{}
|
||||
@@ -365,7 +365,7 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
|
||||
return token, err
|
||||
}
|
||||
|
||||
// v2config configure a jottacloud backend using the modern JottaCli token based authentification
|
||||
// v2config configure a jottacloud backend using the modern JottaCli token based authentication
|
||||
func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
|
||||
@@ -373,6 +373,9 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
fmt.Printf("Login Token> ")
|
||||
loginToken := config.ReadLine()
|
||||
|
||||
m.Set(configClientID, "jottacli")
|
||||
m.Set(configClientSecret, "")
|
||||
|
||||
token, err := doAuthV2(ctx, srv, loginToken, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
@@ -384,7 +387,6 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm(false) {
|
||||
oauthConfig.ClientID = "jottacli"
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
@@ -403,7 +405,7 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
m.Set("configVersion", strconv.Itoa(configVersion))
|
||||
}
|
||||
|
||||
// doAuthV2 runs the actual token request for V2 authentification
|
||||
// doAuthV2 runs the actual token request for V2 authentication
|
||||
func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
|
||||
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
|
||||
if err != nil {
|
||||
@@ -551,7 +553,7 @@ func (f *Fs) setEndpointURL() {
|
||||
if f.opt.Mountpoint == "" {
|
||||
f.opt.Mountpoint = defaultMountpoint
|
||||
}
|
||||
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
|
||||
f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
@@ -1087,8 +1089,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||
retry, _ := shouldRetry(resp, err)
|
||||
return (retry && resp.StatusCode != 500), err
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1192,18 +1193,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
||||
|
||||
// surprise! jottacloud fucked up dirmove - the api spits out an error but
|
||||
// dir gets moved regardless
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.StatusCode == 500 {
|
||||
_, err := f.NewObject(ctx, dstRemote)
|
||||
if err == fs.ErrorNotAFile {
|
||||
log.Printf("FIXME: ignoring DirMove error - move succeeded anyway\n")
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't move directory")
|
||||
}
|
||||
@@ -1523,7 +1512,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
// If the file state is INCOMPLETE and CORRPUT, try to upload a then
|
||||
// If the file state is INCOMPLETE and CORRUPT, try to upload a then
|
||||
if response.State != "COMPLETED" {
|
||||
// how much do we still have to upload?
|
||||
remainingBytes := size - response.ResumePos
|
||||
|
||||
@@ -1213,7 +1213,7 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
||||
// Set the file to be a sparse file (important on Windows)
|
||||
err = file.SetSparse(out)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to set sparse: %v", err)
|
||||
fs.Errorf(o, "Failed to set sparse: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1231,6 +1231,15 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
||||
o.modTime = info.ModTime()
|
||||
o.mode = info.Mode()
|
||||
o.fs.objectMetaMu.Unlock()
|
||||
// On Windows links read as 0 size so set the correct size here
|
||||
if runtime.GOOS == "windows" && o.translatedLink {
|
||||
linkdst, err := os.Readlink(o.path)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to read link size: %v", err)
|
||||
} else {
|
||||
o.size = int64(len(linkdst))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stat an Object into info
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -89,9 +88,6 @@ func TestSymlink(t *testing.T) {
|
||||
|
||||
// Object viewed as symlink
|
||||
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
||||
if runtime.GOOS == "windows" {
|
||||
file2.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
|
||||
// Object viewed as destination
|
||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||
@@ -121,9 +117,6 @@ func TestSymlink(t *testing.T) {
|
||||
// Create a symlink
|
||||
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||
if runtime.GOOS == "windows" {
|
||||
file3.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||
@@ -142,9 +135,7 @@ func TestSymlink(t *testing.T) {
|
||||
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
||||
if runtime.GOOS != "windows" {
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
}
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
|
||||
// Check that NewObject doesn't see the non suffixed version
|
||||
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
|
||||
|
||||
@@ -117,7 +117,7 @@ type ListItem struct {
|
||||
Name string `json:"name"`
|
||||
Home string `json:"home"`
|
||||
Size int64 `json:"size"`
|
||||
Mtime int64 `json:"mtime,omitempty"`
|
||||
Mtime uint64 `json:"mtime,omitempty"`
|
||||
Hash string `json:"hash,omitempty"`
|
||||
VirusScan string `json:"virus_scan,omitempty"`
|
||||
Tree string `json:"tree,omitempty"`
|
||||
@@ -159,71 +159,6 @@ type FolderInfoResponse struct {
|
||||
Email string `json:"email"`
|
||||
}
|
||||
|
||||
// ShardInfoResponse ...
|
||||
type ShardInfoResponse struct {
|
||||
Email string `json:"email"`
|
||||
Body struct {
|
||||
Video []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"video"`
|
||||
ViewDirect []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"view_direct"`
|
||||
WeblinkView []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_view"`
|
||||
WeblinkVideo []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_video"`
|
||||
WeblinkGet []struct {
|
||||
Count int `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_get"`
|
||||
Stock []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"stock"`
|
||||
WeblinkThumbnails []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_thumbnails"`
|
||||
PublicUpload []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"public_upload"`
|
||||
Auth []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"auth"`
|
||||
Web []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"web"`
|
||||
View []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"view"`
|
||||
Upload []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"upload"`
|
||||
Get []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"get"`
|
||||
Thumbnails []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"thumbnails"`
|
||||
} `json:"body"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
// CleanupResponse ...
|
||||
type CleanupResponse struct {
|
||||
Email string `json:"email"`
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -101,6 +102,7 @@ func init() {
|
||||
This feature is called "speedup" or "put by hash". It is especially efficient
|
||||
in case of generally available files like popular books, video or audio clips,
|
||||
because files are searched by hash in all accounts of all mailru users.
|
||||
It is meaningless and ineffective if source file is unique or encrypted.
|
||||
Please note that rclone may need local memory and disk space to calculate
|
||||
content hash in advance and decide whether full upload is required.
|
||||
Also, if rclone does not know file size in advance (e.g. in case of
|
||||
@@ -191,7 +193,7 @@ This option must not be used by an ordinary user. It is intended only to
|
||||
facilitate remote troubleshooting of backend issues. Strict meaning of
|
||||
flags is not documented and not guaranteed to persist between releases.
|
||||
Quirks will be removed when the backend grows stable.
|
||||
Supported quirks: atomicmkdir binlist gzip insecure retry400`,
|
||||
Supported quirks: atomicmkdir binlist unknowndirs`,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -237,9 +239,6 @@ func shouldRetry(res *http.Response, err error, f *Fs, opts *rest.Opts) (bool, e
|
||||
reAuthErr := f.reAuthorize(opts, err)
|
||||
return reAuthErr == nil, err // return an original error
|
||||
}
|
||||
if res != nil && res.StatusCode == 400 && f.quirks.retry400 {
|
||||
return true, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
|
||||
}
|
||||
|
||||
@@ -275,7 +274,7 @@ type Fs struct {
|
||||
root string // root path
|
||||
opt Options // parsed options
|
||||
speedupGlobs []string // list of file name patterns eligible for speedup
|
||||
speedupAny bool // true if all file names are aligible for speedup
|
||||
speedupAny bool // true if all file names are eligible for speedup
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // REST API client
|
||||
cli *http.Client // underlying HTTP client (for authorize)
|
||||
@@ -341,7 +340,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if opt.UserAgent != "" {
|
||||
clientConfig.UserAgent = opt.UserAgent
|
||||
}
|
||||
clientConfig.NoGzip = !f.quirks.gzip // Send not "Accept-Encoding: gzip" like official client
|
||||
clientConfig.NoGzip = true // Mimic official client, skip sending "Accept-Encoding: gzip"
|
||||
f.cli = fshttp.NewClient(&clientConfig)
|
||||
|
||||
f.srv = rest.NewClient(f.cli)
|
||||
@@ -349,12 +348,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.srv.SetHeader("Accept", "*/*") // Send "Accept: */*" with every request like official client
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
if f.quirks.insecure {
|
||||
transport := f.cli.Transport.(*fshttp.Transport).Transport
|
||||
transport.TLSClientConfig.InsecureSkipVerify = true
|
||||
transport.ProxyConnectHeader = http.Header{"User-Agent": {clientConfig.UserAgent}}
|
||||
}
|
||||
|
||||
if err = f.authorize(ctx, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -387,30 +380,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Internal maintenance flags (to be removed when the backend matures).
|
||||
// Primarily intended to facilitate remote support and troubleshooting.
|
||||
type quirks struct {
|
||||
gzip bool
|
||||
insecure bool
|
||||
binlist bool
|
||||
atomicmkdir bool
|
||||
retry400 bool
|
||||
unknowndirs bool
|
||||
}
|
||||
|
||||
func (q *quirks) parseQuirks(option string) {
|
||||
for _, flag := range strings.Split(option, ",") {
|
||||
switch strings.ToLower(strings.TrimSpace(flag)) {
|
||||
case "gzip":
|
||||
// This backend mimics the official client which never sends the
|
||||
// "Accept-Encoding: gzip" header. However, enabling compression
|
||||
// might be good for performance.
|
||||
// Use this quirk to investigate the performance impact.
|
||||
// Remove this quirk if performance does not improve.
|
||||
q.gzip = true
|
||||
case "insecure":
|
||||
// The mailru disk-o protocol is not documented. To compare HTTP
|
||||
// stream against the official client one can use Telerik Fiddler,
|
||||
// which introduces a self-signed certificate. This quirk forces
|
||||
// the Go http layer to accept it.
|
||||
// Remove this quirk when the backend reaches maturity.
|
||||
q.insecure = true
|
||||
case "binlist":
|
||||
// The official client sometimes uses a so called "bin" protocol,
|
||||
// implemented in the listBin file system method below. This method
|
||||
@@ -423,18 +400,14 @@ func (q *quirks) parseQuirks(option string) {
|
||||
case "atomicmkdir":
|
||||
// At the moment rclone requires Mkdir to return success if the
|
||||
// directory already exists. However, such programs as borgbackup
|
||||
// or restic use mkdir as a locking primitive and depend on its
|
||||
// atomicity. This quirk is a workaround. It can be removed
|
||||
// when the above issue is investigated.
|
||||
// use mkdir as a locking primitive and depend on its atomicity.
|
||||
// Remove this quirk when the above issue is investigated.
|
||||
q.atomicmkdir = true
|
||||
case "retry400":
|
||||
// This quirk will help in troubleshooting a very rare "Error 400"
|
||||
// issue. It can be removed if the problem does not show up
|
||||
// for a year or so. See the below issue:
|
||||
// https://github.com/ivandeex/rclone/issues/14
|
||||
q.retry400 = true
|
||||
case "unknowndirs":
|
||||
// Accepts unknown resource types as folders.
|
||||
q.unknowndirs = true
|
||||
default:
|
||||
// Just ignore all unknown flags
|
||||
// Ignore unknown flags
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -550,7 +523,7 @@ func (f *Fs) relPath(absPath string) (string, error) {
|
||||
return "", fmt.Errorf("path %q should be under %q", absPath, f.root)
|
||||
}
|
||||
|
||||
// metaServer ...
|
||||
// metaServer returns URL of current meta server
|
||||
func (f *Fs) metaServer(ctx context.Context) (string, error) {
|
||||
f.metaMu.Lock()
|
||||
defer f.metaMu.Unlock()
|
||||
@@ -655,28 +628,56 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
switch item.Kind {
|
||||
case "folder":
|
||||
dir := fs.NewDir(remote, time.Unix(item.Mtime, 0)).SetSize(item.Size)
|
||||
dirSize := item.Count.Files + item.Count.Folders
|
||||
return dir, dirSize, nil
|
||||
case "file":
|
||||
binHash, err := mrhash.DecodeString(item.Hash)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
file := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
hasMetaData: true,
|
||||
size: item.Size,
|
||||
mrHash: binHash,
|
||||
modTime: time.Unix(item.Mtime, 0),
|
||||
}
|
||||
return file, -1, nil
|
||||
default:
|
||||
return nil, -1, fmt.Errorf("Unknown resource type %q", item.Kind)
|
||||
|
||||
mTime := int64(item.Mtime)
|
||||
if mTime < 0 {
|
||||
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
|
||||
mTime = 0
|
||||
}
|
||||
modTime := time.Unix(mTime, 0)
|
||||
|
||||
isDir, err := f.isDir(item.Kind, remote)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
if isDir {
|
||||
dir := fs.NewDir(remote, modTime).SetSize(item.Size)
|
||||
return dir, item.Count.Files + item.Count.Folders, nil
|
||||
}
|
||||
|
||||
binHash, err := mrhash.DecodeString(item.Hash)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
file := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
hasMetaData: true,
|
||||
size: item.Size,
|
||||
mrHash: binHash,
|
||||
modTime: modTime,
|
||||
}
|
||||
return file, -1, nil
|
||||
}
|
||||
|
||||
// isDir returns true for directories, false for files
|
||||
func (f *Fs) isDir(kind, path string) (bool, error) {
|
||||
switch kind {
|
||||
case "":
|
||||
return false, errors.New("empty resource type")
|
||||
case "file":
|
||||
return false, nil
|
||||
case "folder":
|
||||
// fall thru
|
||||
case "camera-upload", "mounted", "shared":
|
||||
fs.Debugf(f, "[%s]: folder has type %q", path, kind)
|
||||
default:
|
||||
if !f.quirks.unknowndirs {
|
||||
return false, fmt.Errorf("unknown resource type %q", kind)
|
||||
}
|
||||
fs.Errorf(f, "[%s]: folder has unknown type %q", path, kind)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries.
|
||||
@@ -744,7 +745,11 @@ func (f *Fs) listM1(ctx context.Context, dirPath string, offset int, limit int)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if info.Body.Kind != "folder" {
|
||||
isDir, err := f.isDir(info.Body.Kind, dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !isDir {
|
||||
return nil, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
@@ -1597,23 +1602,28 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
var (
|
||||
fileBuf []byte
|
||||
fileHash []byte
|
||||
newHash []byte
|
||||
trySpeedup bool
|
||||
fileBuf []byte
|
||||
fileHash []byte
|
||||
newHash []byte
|
||||
slowHash bool
|
||||
localSrc bool
|
||||
)
|
||||
if srcObj := fs.UnWrapObjectInfo(src); srcObj != nil {
|
||||
srcFeatures := srcObj.Fs().Features()
|
||||
slowHash = srcFeatures.SlowHash
|
||||
localSrc = srcFeatures.IsLocal
|
||||
}
|
||||
|
||||
// Don't disturb the source if file fits in hash.
|
||||
// Skip an extra speedup request if file fits in hash.
|
||||
if size > mrhash.Size {
|
||||
// Request hash from source.
|
||||
// Try speedup if it's globally enabled but skip extra post
|
||||
// request if file is small and fits in the metadata request
|
||||
trySpeedup := o.fs.opt.SpeedupEnable && size > mrhash.Size
|
||||
|
||||
// Try to get the hash if it's instant
|
||||
if trySpeedup && !slowHash {
|
||||
if srcHash, err := src.Hash(ctx, MrHashType); err == nil && srcHash != "" {
|
||||
fileHash, _ = mrhash.DecodeString(srcHash)
|
||||
}
|
||||
|
||||
// Try speedup if it's globally enabled and source hash is available.
|
||||
trySpeedup = o.fs.opt.SpeedupEnable
|
||||
if trySpeedup && fileHash != nil {
|
||||
if fileHash != nil {
|
||||
if o.putByHash(ctx, fileHash, src, "source") {
|
||||
return nil
|
||||
}
|
||||
@@ -1622,13 +1632,22 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// Need to calculate hash, check whether file is still eligible for speedup
|
||||
if trySpeedup {
|
||||
trySpeedup = o.fs.eligibleForSpeedup(o.Remote(), size, options...)
|
||||
trySpeedup = trySpeedup && o.fs.eligibleForSpeedup(o.Remote(), size, options...)
|
||||
|
||||
// Attempt to put by hash if file is local and eligible
|
||||
if trySpeedup && localSrc {
|
||||
if srcHash, err := src.Hash(ctx, MrHashType); err == nil && srcHash != "" {
|
||||
fileHash, _ = mrhash.DecodeString(srcHash)
|
||||
}
|
||||
if fileHash != nil && o.putByHash(ctx, fileHash, src, "localfs") {
|
||||
return nil
|
||||
}
|
||||
// If local file hashing has failed, it's pointless to try anymore
|
||||
trySpeedup = false
|
||||
}
|
||||
|
||||
// Attempt to put by calculating hash in memory
|
||||
if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) {
|
||||
//fs.Debugf(o, "attempt to put by hash from memory")
|
||||
fileBuf, err = ioutil.ReadAll(in)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1758,6 +1777,7 @@ func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// putByHash is a thin wrapper around addFileMetaData
|
||||
func (o *Object) putByHash(ctx context.Context, mrHash []byte, info fs.ObjectInfo, method string) bool {
|
||||
oNew := new(Object)
|
||||
*oNew = *o
|
||||
@@ -1861,30 +1881,30 @@ func (f *Fs) uploadShard(ctx context.Context) (string, error) {
|
||||
return f.shardURL, nil
|
||||
}
|
||||
|
||||
token, err := f.accessToken()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/api/m1/dispatcher",
|
||||
Parameters: url.Values{
|
||||
"client_id": {api.OAuthClientID},
|
||||
"access_token": {token},
|
||||
},
|
||||
RootURL: api.DispatchServerURL,
|
||||
Method: "GET",
|
||||
Path: "/u",
|
||||
}
|
||||
|
||||
var info api.ShardInfoResponse
|
||||
var (
|
||||
res *http.Response
|
||||
url string
|
||||
err error
|
||||
)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
res, err = f.srv.Call(ctx, &opts)
|
||||
if err == nil {
|
||||
url, err = readBodyWord(res)
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
})
|
||||
if err != nil {
|
||||
closeBody(res)
|
||||
return "", err
|
||||
}
|
||||
|
||||
f.shardURL = info.Body.Upload[0].URL
|
||||
f.shardURL = url
|
||||
f.shardExpiry = time.Now().Add(shardExpirySec * time.Second)
|
||||
fs.Debugf(f, "new upload shard: %s", f.shardURL)
|
||||
|
||||
@@ -2116,7 +2136,18 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, err
|
||||
}
|
||||
|
||||
start, end, partial := getTransferRange(o.size, options...)
|
||||
start, end, partialRequest := getTransferRange(o.size, options...)
|
||||
|
||||
headers := map[string]string{
|
||||
"Accept": "*/*",
|
||||
"Content-Type": "application/octet-stream",
|
||||
}
|
||||
if partialRequest {
|
||||
rangeStr := fmt.Sprintf("bytes=%d-%d", start, end-1)
|
||||
headers["Range"] = rangeStr
|
||||
// headers["Content-Range"] = rangeStr
|
||||
headers["Accept-Ranges"] = "bytes"
|
||||
}
|
||||
|
||||
// TODO: set custom timeouts
|
||||
opts := rest.Opts{
|
||||
@@ -2127,10 +2158,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
"client_id": {api.OAuthClientID},
|
||||
"token": {token},
|
||||
},
|
||||
ExtraHeaders: map[string]string{
|
||||
"Accept": "*/*",
|
||||
"Range": fmt.Sprintf("bytes=%d-%d", start, end-1),
|
||||
},
|
||||
ExtraHeaders: headers,
|
||||
}
|
||||
|
||||
var res *http.Response
|
||||
@@ -2151,18 +2179,37 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var hasher gohash.Hash
|
||||
if !partial {
|
||||
// Server should respond with Status 206 and Content-Range header to a range
|
||||
// request. Status 200 (and no Content-Range) means a full-content response.
|
||||
partialResponse := res.StatusCode == 206
|
||||
|
||||
var (
|
||||
hasher gohash.Hash
|
||||
wrapStream io.ReadCloser
|
||||
)
|
||||
if !partialResponse {
|
||||
// Cannot check hash of partial download
|
||||
hasher = mrhash.New()
|
||||
}
|
||||
wrapStream := &endHandler{
|
||||
wrapStream = &endHandler{
|
||||
ctx: ctx,
|
||||
stream: res.Body,
|
||||
hasher: hasher,
|
||||
o: o,
|
||||
server: server,
|
||||
}
|
||||
if partialRequest && !partialResponse {
|
||||
fs.Debugf(o, "Server returned full content instead of range")
|
||||
if start > 0 {
|
||||
// Discard the beginning of the data
|
||||
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
|
||||
if err != nil {
|
||||
closeBody(res)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
wrapStream = readers.NewLimitedReadCloser(wrapStream, end-start)
|
||||
}
|
||||
return wrapStream, nil
|
||||
}
|
||||
|
||||
@@ -2215,7 +2262,7 @@ func (e *endHandler) handle(err error) error {
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
// serverPool backs server dispacher
|
||||
// serverPool backs server dispatcher
|
||||
type serverPool struct {
|
||||
pool pendingServerMap
|
||||
mu sync.Mutex
|
||||
|
||||
@@ -221,7 +221,7 @@ func (f *Fs) setRoot(root string) {
|
||||
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
|
||||
@@ -254,7 +254,7 @@ type MoveItemRequest struct {
|
||||
//Always Type:view and Scope:anonymous for public sharing
|
||||
type CreateShareLinkRequest struct {
|
||||
Type string `json:"type"` //Link type in View, Edit or Embed
|
||||
Scope string `json:"scope,omitempty"` //Optional. Scope in anonymousi, organization
|
||||
Scope string `json:"scope,omitempty"` //Optional. Scope in anonymous, organization
|
||||
}
|
||||
|
||||
//CreateShareLinkResponse is the response from CreateShareLinkRequest
|
||||
|
||||
@@ -1247,6 +1247,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
}
|
||||
q := drive.Quota
|
||||
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
|
||||
if q.Total == 0 && q.Used == 0 && q.Deleted == 0 && q.Remaining == 0 {
|
||||
return &fs.Usage{}, nil
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Total: fs.NewUsageValue(q.Total), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(q.Used), // bytes in use
|
||||
|
||||
@@ -646,7 +646,6 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
400, // Bad request (seen in "Next token is expired")
|
||||
401, // Unauthorized (seen in "Token has expired")
|
||||
408, // Request Timeout
|
||||
423, // Locked - get this on folders sometimes
|
||||
|
||||
@@ -1125,7 +1125,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Special treatment for a 0 length upload. This doesn't work
|
||||
// with PUT even with Content-Length set (by setting
|
||||
// opts.Body=0), so upload it as a multpart form POST with
|
||||
// opts.Body=0), so upload it as a multipart form POST with
|
||||
// Content-Length set.
|
||||
if size == 0 {
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(in, opts.Parameters, "content", leaf)
|
||||
|
||||
@@ -236,10 +236,10 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
|
||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
||||
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
|
||||
@@ -115,7 +115,7 @@ func (o *Object) MimeType(ctx context.Context) string {
|
||||
|
||||
// setMetadataFromEntry sets the fs data from a putio.File
|
||||
//
|
||||
// This isn't a complete set of metadata and has an inacurate date
|
||||
// This isn't a complete set of metadata and has an inaccurate date
|
||||
func (o *Object) setMetadataFromEntry(info putio.File) error {
|
||||
o.file = &info
|
||||
o.modtime = info.UpdatedAt.Time
|
||||
|
||||
@@ -104,7 +104,7 @@ enough memory, then increasing this will speed up the transfers.`,
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
NB if you set this to > 1 then the checksums of multpart uploads
|
||||
NB if you set this to > 1 then the checksums of multipart uploads
|
||||
become corrupted (the uploads themselves are not corrupted though).
|
||||
|
||||
If you are uploading small numbers of large file over high speed link
|
||||
|
||||
241
backend/s3/s3.go
241
backend/s3/s3.go
@@ -5,6 +5,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
@@ -58,7 +59,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
|
||||
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
@@ -94,6 +95,9 @@ func init() {
|
||||
}, {
|
||||
Value: "StackPath",
|
||||
Help: "StackPath Object Storage",
|
||||
}, {
|
||||
Value: "TencentCOS",
|
||||
Help: "Tencent Cloud Object Storage (COS)",
|
||||
}, {
|
||||
Value: "Wasabi",
|
||||
Help: "Wasabi Object Storage",
|
||||
@@ -119,6 +123,9 @@ func init() {
|
||||
Name: "secret_access_key",
|
||||
Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
|
||||
}, {
|
||||
// References:
|
||||
// 1. https://docs.aws.amazon.com/general/latest/gr/rande.html
|
||||
// 2. https://docs.aws.amazon.com/general/latest/gr/s3.html
|
||||
Name: "region",
|
||||
Help: "Region to connect to.",
|
||||
Provider: "AWS",
|
||||
@@ -128,12 +135,12 @@ func init() {
|
||||
}, {
|
||||
Value: "us-east-2",
|
||||
Help: "US East (Ohio) Region\nNeeds location constraint us-east-2.",
|
||||
}, {
|
||||
Value: "us-west-2",
|
||||
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
|
||||
}, {
|
||||
Value: "us-west-1",
|
||||
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
|
||||
}, {
|
||||
Value: "us-west-2",
|
||||
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
|
||||
}, {
|
||||
Value: "ca-central-1",
|
||||
Help: "Canada (Central) Region\nNeeds location constraint ca-central-1.",
|
||||
@@ -143,9 +150,15 @@ func init() {
|
||||
}, {
|
||||
Value: "eu-west-2",
|
||||
Help: "EU (London) Region\nNeeds location constraint eu-west-2.",
|
||||
}, {
|
||||
Value: "eu-west-3",
|
||||
Help: "EU (Paris) Region\nNeeds location constraint eu-west-3.",
|
||||
}, {
|
||||
Value: "eu-north-1",
|
||||
Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
|
||||
}, {
|
||||
Value: "eu-south-1",
|
||||
Help: "EU (Milan) Region\nNeeds location constraint eu-south-1.",
|
||||
}, {
|
||||
Value: "eu-central-1",
|
||||
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
|
||||
@@ -161,15 +174,36 @@ func init() {
|
||||
}, {
|
||||
Value: "ap-northeast-2",
|
||||
Help: "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.",
|
||||
}, {
|
||||
Value: "ap-northeast-3",
|
||||
Help: "Asia Pacific (Osaka-Local)\nNeeds location constraint ap-northeast-3.",
|
||||
}, {
|
||||
Value: "ap-south-1",
|
||||
Help: "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
|
||||
}, {
|
||||
Value: "ap-east-1",
|
||||
Help: "Asia Patific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
|
||||
Help: "Asia Pacific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
|
||||
}, {
|
||||
Value: "sa-east-1",
|
||||
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
|
||||
}, {
|
||||
Value: "me-south-1",
|
||||
Help: "Middle East (Bahrain) Region\nNeeds location constraint me-south-1.",
|
||||
}, {
|
||||
Value: "af-south-1",
|
||||
Help: "Africa (Cape Town) Region\nNeeds location constraint af-south-1.",
|
||||
}, {
|
||||
Value: "cn-north-1",
|
||||
Help: "China (Beijing) Region\nNeeds location constraint cn-north-1.",
|
||||
}, {
|
||||
Value: "cn-northwest-1",
|
||||
Help: "China (Ningxia) Region\nNeeds location constraint cn-northwest-1.",
|
||||
}, {
|
||||
Value: "us-gov-east-1",
|
||||
Help: "AWS GovCloud (US-East) Region\nNeeds location constraint us-gov-east-1.",
|
||||
}, {
|
||||
Value: "us-gov-west-1",
|
||||
Help: "AWS GovCloud (US) Region\nNeeds location constraint us-gov-west-1.",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
@@ -185,7 +219,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,Scaleway",
|
||||
Provider: "!AWS,Alibaba,Scaleway,TencentCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
|
||||
@@ -476,10 +510,73 @@ func init() {
|
||||
Value: "s3.eu-central-1.stackpathstorage.com",
|
||||
Help: "EU Endpoint",
|
||||
}},
|
||||
}, {
|
||||
// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Tencent COS API.",
|
||||
Provider: "TencentCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "cos.ap-beijing.myqcloud.com",
|
||||
Help: "Beijing Region.",
|
||||
}, {
|
||||
Value: "cos.ap-nanjing.myqcloud.com",
|
||||
Help: "Nanjing Region.",
|
||||
}, {
|
||||
Value: "cos.ap-shanghai.myqcloud.com",
|
||||
Help: "Shanghai Region.",
|
||||
}, {
|
||||
Value: "cos.ap-guangzhou.myqcloud.com",
|
||||
Help: "Guangzhou Region.",
|
||||
}, {
|
||||
Value: "cos.ap-nanjing.myqcloud.com",
|
||||
Help: "Nanjing Region.",
|
||||
}, {
|
||||
Value: "cos.ap-chengdu.myqcloud.com",
|
||||
Help: "Chengdu Region.",
|
||||
}, {
|
||||
Value: "cos.ap-chongqing.myqcloud.com",
|
||||
Help: "Chongqing Region.",
|
||||
}, {
|
||||
Value: "cos.ap-hongkong.myqcloud.com",
|
||||
Help: "Hong Kong (China) Region.",
|
||||
}, {
|
||||
Value: "cos.ap-singapore.myqcloud.com",
|
||||
Help: "Singapore Region.",
|
||||
}, {
|
||||
Value: "cos.ap-mumbai.myqcloud.com",
|
||||
Help: "Mumbai Region.",
|
||||
}, {
|
||||
Value: "cos.ap-seoul.myqcloud.com",
|
||||
Help: "Seoul Region.",
|
||||
}, {
|
||||
Value: "cos.ap-bangkok.myqcloud.com",
|
||||
Help: "Bangkok Region.",
|
||||
}, {
|
||||
Value: "cos.ap-tokyo.myqcloud.com",
|
||||
Help: "Tokyo Region.",
|
||||
}, {
|
||||
Value: "cos.na-siliconvalley.myqcloud.com",
|
||||
Help: "Silicon Valley Region.",
|
||||
}, {
|
||||
Value: "cos.na-ashburn.myqcloud.com",
|
||||
Help: "Virginia Region.",
|
||||
}, {
|
||||
Value: "cos.na-toronto.myqcloud.com",
|
||||
Help: "Toronto Region.",
|
||||
}, {
|
||||
Value: "cos.eu-frankfurt.myqcloud.com",
|
||||
Help: "Frankfurt Region.",
|
||||
}, {
|
||||
Value: "cos.eu-moscow.myqcloud.com",
|
||||
Help: "Moscow Region.",
|
||||
}, {
|
||||
Value: "cos.accelerate.myqcloud.com",
|
||||
Help: "Use Tencent COS Accelerate Endpoint.",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath",
|
||||
Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -519,12 +616,12 @@ func init() {
|
||||
}, {
|
||||
Value: "us-east-2",
|
||||
Help: "US East (Ohio) Region.",
|
||||
}, {
|
||||
Value: "us-west-2",
|
||||
Help: "US West (Oregon) Region.",
|
||||
}, {
|
||||
Value: "us-west-1",
|
||||
Help: "US West (Northern California) Region.",
|
||||
}, {
|
||||
Value: "us-west-2",
|
||||
Help: "US West (Oregon) Region.",
|
||||
}, {
|
||||
Value: "ca-central-1",
|
||||
Help: "Canada (Central) Region.",
|
||||
@@ -534,9 +631,15 @@ func init() {
|
||||
}, {
|
||||
Value: "eu-west-2",
|
||||
Help: "EU (London) Region.",
|
||||
}, {
|
||||
Value: "eu-west-3",
|
||||
Help: "EU (Paris) Region.",
|
||||
}, {
|
||||
Value: "eu-north-1",
|
||||
Help: "EU (Stockholm) Region.",
|
||||
}, {
|
||||
Value: "eu-south-1",
|
||||
Help: "EU (Milan) Region.",
|
||||
}, {
|
||||
Value: "EU",
|
||||
Help: "EU Region.",
|
||||
@@ -551,16 +654,37 @@ func init() {
|
||||
Help: "Asia Pacific (Tokyo) Region.",
|
||||
}, {
|
||||
Value: "ap-northeast-2",
|
||||
Help: "Asia Pacific (Seoul)",
|
||||
Help: "Asia Pacific (Seoul) Region.",
|
||||
}, {
|
||||
Value: "ap-northeast-3",
|
||||
Help: "Asia Pacific (Osaka-Local) Region.",
|
||||
}, {
|
||||
Value: "ap-south-1",
|
||||
Help: "Asia Pacific (Mumbai)",
|
||||
Help: "Asia Pacific (Mumbai) Region.",
|
||||
}, {
|
||||
Value: "ap-east-1",
|
||||
Help: "Asia Pacific (Hong Kong)",
|
||||
Help: "Asia Pacific (Hong Kong) Region.",
|
||||
}, {
|
||||
Value: "sa-east-1",
|
||||
Help: "South America (Sao Paulo) Region.",
|
||||
}, {
|
||||
Value: "me-south-1",
|
||||
Help: "Middle East (Bahrain) Region.",
|
||||
}, {
|
||||
Value: "af-south-1",
|
||||
Help: "Africa (Cape Town) Region.",
|
||||
}, {
|
||||
Value: "cn-north-1",
|
||||
Help: "China (Beijing) Region",
|
||||
}, {
|
||||
Value: "cn-northwest-1",
|
||||
Help: "China (Ningxia) Region.",
|
||||
}, {
|
||||
Value: "us-gov-east-1",
|
||||
Help: "AWS GovCloud (US-East) Region.",
|
||||
}, {
|
||||
Value: "us-gov-west-1",
|
||||
Help: "AWS GovCloud (US) Region.",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
@@ -666,7 +790,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath",
|
||||
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath,TencentCOS",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -678,9 +802,13 @@ For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview
|
||||
Note that this ACL is applied when server side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "default",
|
||||
Help: "Owner gets Full_CONTROL. No one else has access rights (default).",
|
||||
Provider: "TencentCOS",
|
||||
}, {
|
||||
Value: "private",
|
||||
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
|
||||
Provider: "!IBMCOS",
|
||||
Provider: "!IBMCOS,TencentCOS",
|
||||
}, {
|
||||
Value: "public-read",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
|
||||
@@ -842,6 +970,24 @@ isn't set then "acl" is used instead.`,
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Infrequent access storage mode.",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in Tencent COS.",
|
||||
Provider: "TencentCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Default",
|
||||
}, {
|
||||
Value: "STANDARD",
|
||||
Help: "Standard storage class",
|
||||
}, {
|
||||
Value: "ARCHIVE",
|
||||
Help: "Archive storage mode.",
|
||||
}, {
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Infrequent access storage mode.",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://www.scaleway.com/en/docs/object-storage-glacier/#-Scaleway-Storage-Classes
|
||||
Name: "storage_class",
|
||||
@@ -975,7 +1121,7 @@ if false then rclone will use virtual path style. See [the AWS S3
|
||||
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
|
||||
for more info.
|
||||
|
||||
Some providers (eg AWS, Aliyun OSS or Netease COS) require this set to
|
||||
Some providers (eg AWS, Aliyun OSS, Netease COS or Tencent COS) require this set to
|
||||
false - rclone will do this automatically based on the provider
|
||||
setting.`,
|
||||
Default: true,
|
||||
@@ -1044,8 +1190,7 @@ rclone does if you know the bucket exists already.
|
||||
// - trailing / encoding
|
||||
// so that AWS keys are always valid file names
|
||||
Default: encoder.EncodeInvalidUtf8 |
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeDot,
|
||||
encoder.EncodeSlash,
|
||||
}, {
|
||||
Name: "memory_pool_flush_time",
|
||||
Default: memoryPoolFlushTime,
|
||||
@@ -1058,6 +1203,19 @@ This option controls how often unused buffers will be removed from the pool.`,
|
||||
Default: memoryPoolUseMmap,
|
||||
Advanced: true,
|
||||
Help: `Whether to use mmap buffers in internal memory pool.`,
|
||||
}, {
|
||||
Name: "disable_http2",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Help: `Disable usage of http2 for S3 backends
|
||||
|
||||
There is currently an unsolved issue with the s3 (specifically minio) backend
|
||||
and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be
|
||||
disabled here. When the issue is solved this flag will be removed.
|
||||
|
||||
See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631
|
||||
|
||||
`,
|
||||
},
|
||||
}})
|
||||
}
|
||||
@@ -1115,6 +1273,7 @@ type Options struct {
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
DisableHTTP2 bool `config:"disable_http2"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@@ -1227,7 +1386,8 @@ func parsePath(path string) (root string) {
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
fs.Debugf(nil, "SPLIT %q %q", f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath))
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
@@ -1236,6 +1396,19 @@ func (o *Object) split() (bucket, bucketPath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
// getClient makes an http client according to the options
|
||||
func getClient(opt *Options) *http.Client {
|
||||
// TODO: Do we need cookies too?
|
||||
t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) {
|
||||
if opt.DisableHTTP2 {
|
||||
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
|
||||
}
|
||||
})
|
||||
return &http.Client{
|
||||
Transport: t,
|
||||
}
|
||||
}
|
||||
|
||||
// s3Connection makes a connection to s3
|
||||
func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
// Make the auth
|
||||
@@ -1246,6 +1419,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
}
|
||||
|
||||
lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
|
||||
|
||||
def := defaults.Get()
|
||||
def.Config.HTTPClient = lowTimeoutClient
|
||||
|
||||
@@ -1305,7 +1479,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
if opt.Region == "" {
|
||||
opt.Region = "us-east-1"
|
||||
}
|
||||
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.Provider == "Scaleway" || opt.UseAccelerateEndpoint {
|
||||
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.Provider == "Scaleway" || opt.Provider == "TencentCOS" || opt.UseAccelerateEndpoint {
|
||||
opt.ForcePathStyle = false
|
||||
}
|
||||
if opt.Provider == "Scaleway" && opt.MaxUploadParts > 1000 {
|
||||
@@ -1314,7 +1488,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
awsConfig := aws.NewConfig().
|
||||
WithMaxRetries(0). // Rely on rclone's retry logic
|
||||
WithCredentials(cred).
|
||||
WithHTTPClient(fshttp.NewClient(fs.Config)).
|
||||
WithHTTPClient(getClient(opt)).
|
||||
WithS3ForcePathStyle(opt.ForcePathStyle).
|
||||
WithS3UseAccelerate(opt.UseAccelerateEndpoint).
|
||||
WithS3UsEast1RegionalEndpoint(endpoints.RegionalS3UsEast1Endpoint)
|
||||
@@ -1326,6 +1500,9 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
awsConfig.WithEndpoint(opt.Endpoint)
|
||||
}
|
||||
|
||||
// Allow URI with "." etc
|
||||
awsConfig.DisableRestProtocolURICleaning = aws.Bool(true)
|
||||
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
awsSessionOpts := session.Options{
|
||||
Config: *awsConfig,
|
||||
@@ -1428,7 +1605,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ses: ses,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
cache: bucket.NewCache(),
|
||||
srv: fshttp.NewClient(fs.Config),
|
||||
srv: getClient(opt),
|
||||
pool: pool.New(
|
||||
time.Duration(opt.MemoryPoolFlushTime),
|
||||
int(opt.ChunkSize),
|
||||
@@ -1560,7 +1737,7 @@ type listFn func(remote string, object *s3.Object, isDirectory bool) error
|
||||
// bucket to the start.
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
|
||||
func (f *Fs) list(ctx context.Context, bucketName, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
@@ -1587,11 +1764,11 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
//
|
||||
// So we enable only on providers we know supports it properly, all others can retry when a
|
||||
// XML Syntax error is detected.
|
||||
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio")
|
||||
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio" || f.opt.Provider == "TencentCOS")
|
||||
for {
|
||||
// FIXME need to implement ALL loop
|
||||
req := s3.ListObjectsInput{
|
||||
Bucket: &bucket,
|
||||
Bucket: &bucketName,
|
||||
Delimiter: &delimiter,
|
||||
Prefix: &directory,
|
||||
MaxKeys: &f.opt.ListChunk,
|
||||
@@ -1631,7 +1808,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// 301 if wrong region for bucket
|
||||
if reqErr.StatusCode() == http.StatusMovedPermanently {
|
||||
fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucket)
|
||||
fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucketName)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -1659,7 +1836,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
remote = bucket.Join(bucketName, remote)
|
||||
}
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
@@ -1687,7 +1864,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
remote = remote[len(prefix):]
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
remote = bucket.Join(bucketName, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if isDirectory && object.Size != nil && *object.Size == 0 {
|
||||
@@ -1973,7 +2150,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
|
||||
req.Bucket = &dstBucket
|
||||
req.ACL = &f.opt.ACL
|
||||
req.Key = &dstPath
|
||||
source := pathEscape(path.Join(srcBucket, srcPath))
|
||||
source := pathEscape(bucket.Join(srcBucket, srcPath))
|
||||
req.CopySource = &source
|
||||
if f.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &f.opt.ServerSideEncryption
|
||||
@@ -2190,7 +2367,7 @@ All the objects shown will be marked for restore, then
|
||||
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||
|
||||
It returns a list of status dictionaries with Remote and Status
|
||||
keys. The Status will be OK if it was successfull or an error message
|
||||
keys. The Status will be OK if it was successful or an error message
|
||||
if not.
|
||||
|
||||
[
|
||||
@@ -2355,7 +2532,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
|
||||
//
|
||||
// Note that rather lazily we treat key as a prefix so it matches
|
||||
// directories and objects. This could suprise the user if they ask
|
||||
// directories and objects. This could surprise the user if they ask
|
||||
// for "dir" and it returns "dirKey"
|
||||
func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uploads []*s3.MultipartUpload, err error) {
|
||||
var (
|
||||
@@ -2888,7 +3065,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// read the md5sum if available
|
||||
// - for non multpart
|
||||
// - for non multipart
|
||||
// - so we can add a ContentMD5
|
||||
// - for multipart provided checksums aren't disabled
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash
|
||||
|
||||
@@ -46,7 +46,7 @@ type Library struct {
|
||||
Encrypted bool `json:"encrypted"`
|
||||
Owner string `json:"owner"`
|
||||
ID string `json:"id"`
|
||||
Size int `json:"size"`
|
||||
Size int64 `json:"size"`
|
||||
Name string `json:"name"`
|
||||
Modified int64 `json:"mtime"`
|
||||
}
|
||||
|
||||
@@ -1004,7 +1004,7 @@ func (f *Fs) listLibraries(ctx context.Context) (entries fs.DirEntries, err erro
|
||||
|
||||
for _, library := range libraries {
|
||||
d := fs.NewDir(library.Name, time.Unix(library.Modified, 0))
|
||||
d.SetSize(int64(library.Size))
|
||||
d.SetSize(library.Size)
|
||||
entries = append(entries, d)
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/user"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
@@ -33,6 +32,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
sshagent "github.com/xanzy/ssh-agent"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/crypto/ssh/knownhosts"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -43,7 +43,7 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
currentUser = readCurrentUser()
|
||||
currentUser = env.CurrentUser()
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -82,6 +82,21 @@ func init() {
|
||||
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
||||
in the new OpenSSH format can't be used.`,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "pubkey_file",
|
||||
Help: `Optional path to public key file.
|
||||
|
||||
Set this if you have a signed certificate you want to use for authentication.` + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "known_hosts_file",
|
||||
Help: `Optional path to known_hosts file.
|
||||
|
||||
Set this value to enable server host key validation.` + env.ShellExpandHelp,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "~/.ssh/known_hosts",
|
||||
Help: "Use OpenSSH's known_hosts file",
|
||||
}},
|
||||
}, {
|
||||
Name: "key_use_agent",
|
||||
Help: `When set forces the usage of the ssh-agent.
|
||||
@@ -190,6 +205,8 @@ type Options struct {
|
||||
KeyPem string `config:"key_pem"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
PubKeyFile string `config:"pubkey_file"`
|
||||
KnownHostsFile string `config:"known_hosts_file"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||
@@ -218,6 +235,7 @@ type Fs struct {
|
||||
poolMu sync.Mutex
|
||||
pool []*conn
|
||||
pacer *fs.Pacer // pacer for operations
|
||||
savedpswd string
|
||||
}
|
||||
|
||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
@@ -231,20 +249,6 @@ type Object struct {
|
||||
sha1sum *string // Cached SHA1 checksum
|
||||
}
|
||||
|
||||
// readCurrentUser finds the current user name or "" if not found
|
||||
func readCurrentUser() (userName string) {
|
||||
usr, err := user.Current()
|
||||
if err == nil {
|
||||
return usr.Username
|
||||
}
|
||||
// Fall back to reading $USER then $LOGNAME
|
||||
userName = os.Getenv("USER")
|
||||
if userName != "" {
|
||||
return userName
|
||||
}
|
||||
return os.Getenv("LOGNAME")
|
||||
}
|
||||
|
||||
// dial starts a client connection to the given SSH server. It is a
|
||||
// convenience function that connects to the given network address,
|
||||
// initiates the SSH handshake, and then sets up a Client.
|
||||
@@ -410,6 +414,10 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// This will hold the Fs object. We need to create it here
|
||||
// so we can refer to it in the SSH callback, but it's populated
|
||||
// in NewFsWithConnection
|
||||
f := &Fs{}
|
||||
ctx := context.Background()
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -423,6 +431,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if opt.Port == "" {
|
||||
opt.Port = "22"
|
||||
}
|
||||
|
||||
sshConfig := &ssh.ClientConfig{
|
||||
User: opt.User,
|
||||
Auth: []ssh.AuthMethod{},
|
||||
@@ -431,6 +440,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ClientVersion: "SSH-2.0-" + fs.Config.UserAgent,
|
||||
}
|
||||
|
||||
if opt.KnownHostsFile != "" {
|
||||
hostcallback, err := knownhosts.New(opt.KnownHostsFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't parse known_hosts_file")
|
||||
}
|
||||
sshConfig.HostKeyCallback = hostcallback
|
||||
}
|
||||
|
||||
if opt.UseInsecureCipher {
|
||||
sshConfig.Config.SetDefaults()
|
||||
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc", "aes192-cbc", "aes256-cbc", "3des-cbc")
|
||||
@@ -438,6 +455,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
keyFile := env.ShellExpand(opt.KeyFile)
|
||||
pubkeyFile := env.ShellExpand(opt.PubKeyFile)
|
||||
//keyPem := env.ShellExpand(opt.KeyPem)
|
||||
// Add ssh agent-auth if no password or file or key PEM specified
|
||||
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
|
||||
@@ -507,7 +525,38 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse private key file")
|
||||
}
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signer))
|
||||
|
||||
// If a public key has been specified then use that
|
||||
if pubkeyFile != "" {
|
||||
certfile, err := ioutil.ReadFile(pubkeyFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to read cert file")
|
||||
}
|
||||
|
||||
pk, _, _, _, err := ssh.ParseAuthorizedKey(certfile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to parse cert file")
|
||||
}
|
||||
|
||||
// And the signer for this, which includes the private key signer
|
||||
// This is what we'll pass to the ssh client.
|
||||
// Normally the ssh client will use the public key built
|
||||
// into the private key, but we need to tell it to use the user
|
||||
// specified public key cert. This signer is specific to the
|
||||
// cert and will include the private key signer. Now ssh
|
||||
// knows everything it needs.
|
||||
cert, ok := pk.(*ssh.Certificate)
|
||||
if !ok {
|
||||
return nil, errors.New("public key file is not a certificate file: " + pubkeyFile)
|
||||
}
|
||||
pubsigner, err := ssh.NewCertSigner(cert, signer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error generating cert signer")
|
||||
}
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(pubsigner))
|
||||
} else {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signer))
|
||||
}
|
||||
}
|
||||
|
||||
// Auth from password if specified
|
||||
@@ -519,30 +568,45 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
||||
}
|
||||
|
||||
// Ask for password if none was defined and we're allowed to
|
||||
// Config for password if none was defined and we're allowed to
|
||||
// We don't ask now; we ask if the ssh connection succeeds
|
||||
if opt.Pass == "" && opt.AskPassword {
|
||||
_, _ = fmt.Fprint(os.Stderr, "Enter SFTP password: ")
|
||||
clearpass := config.ReadPassword()
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PasswordCallback(f.getPass))
|
||||
}
|
||||
|
||||
return NewFsWithConnection(ctx, name, root, m, opt, sshConfig)
|
||||
return NewFsWithConnection(ctx, f, name, root, m, opt, sshConfig)
|
||||
}
|
||||
|
||||
// If we're in password mode and ssh connection succeeds then this
|
||||
// callback is called. First time around we ask the user, and then
|
||||
// save it so on reconnection we give back the previous string.
|
||||
// This removes the ability to let the user correct a mistaken entry,
|
||||
// but means that reconnects are transparent.
|
||||
// We'll re-use config.Pass for this, 'cos we know it's not been
|
||||
// specified.
|
||||
func (f *Fs) getPass() (string, error) {
|
||||
for f.savedpswd == "" {
|
||||
_, _ = fmt.Fprint(os.Stderr, "Enter SFTP password: ")
|
||||
f.savedpswd = config.ReadPassword()
|
||||
}
|
||||
return f.savedpswd, nil
|
||||
}
|
||||
|
||||
// NewFsWithConnection creates a new Fs object from the name and root and an ssh.ClientConfig. It connects to
|
||||
// the host specified in the ssh.ClientConfig
|
||||
func NewFsWithConnection(ctx context.Context, name string, root string, m configmap.Mapper, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
absRoot: root,
|
||||
opt: *opt,
|
||||
m: m,
|
||||
config: sshConfig,
|
||||
url: "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root,
|
||||
mkdirLock: newStringLock(),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m configmap.Mapper, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
|
||||
// Populate the Filesystem Object
|
||||
f.name = name
|
||||
f.root = root
|
||||
f.absRoot = root
|
||||
f.opt = *opt
|
||||
f.m = m
|
||||
f.config = sshConfig
|
||||
f.url = "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root
|
||||
f.mkdirLock = newStringLock()
|
||||
f.pacer = fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
f.savedpswd = ""
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
SlowHash: true,
|
||||
@@ -888,7 +952,7 @@ func (f *Fs) run(cmd string) ([]byte, error) {
|
||||
|
||||
session, err := c.sshClient.NewSession()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "run: get SFTP sessiion")
|
||||
return nil, errors.Wrap(err, "run: get SFTP session")
|
||||
}
|
||||
defer func() {
|
||||
_ = session.Close()
|
||||
@@ -1087,7 +1151,7 @@ func shellEscape(str string) string {
|
||||
func parseHash(bytes []byte) string {
|
||||
// For strings with backslash *sum writes a leading \
|
||||
// https://unix.stackexchange.com/q/313733/94054
|
||||
return strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0] // Split at hash / filename separator
|
||||
return strings.ToLower(strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0]) // Split at hash / filename separator / all convert to lowercase
|
||||
}
|
||||
|
||||
// Parses the byte array output from the SSH session
|
||||
|
||||
@@ -95,7 +95,7 @@ type UploadSpecification struct {
|
||||
ChunkURI string `json:"ChunkUri"` // Specifies the URI the client must send the file data to
|
||||
FinishURI string `json:"FinishUri"` // If provided, specifies the final call the client must perform to finish the upload process
|
||||
ProgressData string `json:"ProgressData"` // Allows the client to check progress of standard uploads
|
||||
IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supproted.
|
||||
IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supported.
|
||||
ResumeIndex int64 `json:"ResumeIndex"` // Specifies the initial index for resuming, if IsResume is true.
|
||||
ResumeOffset int64 `json:"ResumeOffset"` // Specifies the initial file offset by bytes, if IsResume is true
|
||||
ResumeFileHash string `json:"ResumeFileHash"` // Specifies the MD5 hash of the first ResumeOffset bytes of the partial file found at the server
|
||||
|
||||
@@ -1090,7 +1090,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
||||
} else if err != nil {
|
||||
return nil, errors.Wrap(err, "copy: failed to examine destination dir")
|
||||
} else {
|
||||
// otherwise need to copy via a temporary directlry
|
||||
// otherwise need to copy via a temporary directory
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -350,7 +350,7 @@ func (f *Fs) getAuth(req *http.Request) (err error) {
|
||||
// if have auth, check it is in date
|
||||
if f.opt.Authorization == "" || f.opt.User == "" || f.authExpiry.IsZero() || time.Until(f.authExpiry) < expiryLeeway {
|
||||
// Get the auth token
|
||||
f.srv.SetSigner(nil) // temporariliy remove the signer so we don't infinitely recurse
|
||||
f.srv.SetSigner(nil) // temporarily remove the signer so we don't infinitely recurse
|
||||
err = f.getAuthToken(ctx)
|
||||
f.srv.SetSigner(f.getAuth) // replace signer
|
||||
if err != nil {
|
||||
|
||||
@@ -67,12 +67,12 @@ func init() {
|
||||
log.Fatalf("Couldn't create access grant: %v", err)
|
||||
}
|
||||
|
||||
serialziedAccess, err := access.Serialize()
|
||||
serializedAccess, err := access.Serialize()
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't serialize access grant: %v", err)
|
||||
}
|
||||
configMapper.Set("satellite_address", satellite)
|
||||
configMapper.Set("access_grant", serialziedAccess)
|
||||
configMapper.Set("access_grant", serializedAccess)
|
||||
} else if provider == existingProvider {
|
||||
config.FileDeleteKey(name, "satellite_address")
|
||||
config.FileDeleteKey(name, "api_key")
|
||||
|
||||
@@ -61,7 +61,7 @@ func (p *EpAll) Action(ctx context.Context, upstreams []*upstream.Fs, path strin
|
||||
return p.epall(ctx, upstreams, path)
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receivng a set of candidate entries
|
||||
// ActionEntries is ACTION category policy but receiving a set of candidate entries
|
||||
func (p *EpAll) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
|
||||
@@ -106,7 +106,7 @@ func (p *EpMfs) Search(ctx context.Context, upstreams []*upstream.Fs, path strin
|
||||
return p.mfs(upstreams)
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receivng a set of candidate entries
|
||||
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
|
||||
func (p *EpMfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
|
||||
@@ -14,7 +14,7 @@ func init() {
|
||||
// FF stands for first found
|
||||
// Search category: same as epff.
|
||||
// Action category: same as epff.
|
||||
// Create category: Given the order of the candiates, act on the first one found.
|
||||
// Create category: Given the order of the candidates, act on the first one found.
|
||||
type FF struct {
|
||||
EpFF
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ func init() {
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Upstreams fs.SpaceSepList `config:"upstreams"`
|
||||
Remotes fs.SpaceSepList `config:"remotes"` // Depreated
|
||||
Remotes fs.SpaceSepList `config:"remotes"` // Deprecated
|
||||
ActionPolicy string `config:"action_policy"`
|
||||
CreatePolicy string `config:"create_policy"`
|
||||
SearchPolicy string `config:"search_policy"`
|
||||
@@ -145,11 +145,16 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
upstreams, err := f.create(ctx, dir)
|
||||
if err == fs.ErrorObjectNotFound && dir != parentDir(dir) {
|
||||
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
|
||||
return err
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
if dir != parentDir(dir) {
|
||||
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
|
||||
return err
|
||||
}
|
||||
upstreams, err = f.create(ctx, dir)
|
||||
} else if dir == "" {
|
||||
// If root dirs not created then create them
|
||||
upstreams, err = f.upstreams, nil
|
||||
}
|
||||
upstreams, err = f.create(ctx, dir)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -567,7 +572,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
entriess := make([][]upstream.Entry, len(f.upstreams))
|
||||
entriesList := make([][]upstream.Entry, len(f.upstreams))
|
||||
errs := Errors(make([]error, len(f.upstreams)))
|
||||
multithread(len(f.upstreams), func(i int) {
|
||||
u := f.upstreams[i]
|
||||
@@ -580,7 +585,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
for j, e := range entries {
|
||||
uEntries[j], _ = u.WrapEntry(e)
|
||||
}
|
||||
entriess[i] = uEntries
|
||||
entriesList[i] = uEntries
|
||||
})
|
||||
if len(errs) == len(errs.FilterNil()) {
|
||||
errs = errs.Map(func(e error) error {
|
||||
@@ -594,7 +599,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
return nil, errs.Err()
|
||||
}
|
||||
return f.mergeDirEntries(entriess)
|
||||
return f.mergeDirEntries(entriesList)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -614,7 +619,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
var entriess [][]upstream.Entry
|
||||
var entriesList [][]upstream.Entry
|
||||
errs := Errors(make([]error, len(f.upstreams)))
|
||||
var mutex sync.Mutex
|
||||
multithread(len(f.upstreams), func(i int) {
|
||||
@@ -626,7 +631,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
uEntries[j], _ = u.WrapEntry(e)
|
||||
}
|
||||
mutex.Lock()
|
||||
entriess = append(entriess, uEntries)
|
||||
entriesList = append(entriesList, uEntries)
|
||||
mutex.Unlock()
|
||||
return nil
|
||||
}
|
||||
@@ -653,7 +658,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
}
|
||||
return errs.Err()
|
||||
}
|
||||
entries, err := f.mergeDirEntries(entriess)
|
||||
entries, err := f.mergeDirEntries(entriesList)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -724,9 +729,9 @@ func (f *Fs) searchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
return f.searchPolicy.SearchEntries(entries...)
|
||||
}
|
||||
|
||||
func (f *Fs) mergeDirEntries(entriess [][]upstream.Entry) (fs.DirEntries, error) {
|
||||
func (f *Fs) mergeDirEntries(entriesList [][]upstream.Entry) (fs.DirEntries, error) {
|
||||
entryMap := make(map[string]([]upstream.Entry))
|
||||
for _, en := range entriess {
|
||||
for _, en := range entriesList {
|
||||
if en == nil {
|
||||
continue
|
||||
}
|
||||
@@ -818,6 +823,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(f, "actionPolicy = %T, createPolicy = %T, searchPolicy = %T", f.actionPolicy, f.createPolicy, f.searchPolicy)
|
||||
var features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
|
||||
@@ -52,7 +52,7 @@ type Object struct {
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// Entry describe a warpped fs.DirEntry interface with the
|
||||
// Entry describe a wrapped fs.DirEntry interface with the
|
||||
// information of upstream Fs
|
||||
type Entry interface {
|
||||
fs.DirEntry
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestWebdavNexcloud:",
|
||||
RemoteName: "TestWebdavNextcloud:",
|
||||
NilObject: (*webdav.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ type ResourceInfoRequestOptions struct {
|
||||
Fields []string
|
||||
}
|
||||
|
||||
//ResourceInfoResponse struct is returned by the API for metedata requests.
|
||||
//ResourceInfoResponse struct is returned by the API for metadata requests.
|
||||
type ResourceInfoResponse struct {
|
||||
PublicKey string `json:"public_key"`
|
||||
Name string `json:"name"`
|
||||
|
||||
@@ -324,7 +324,7 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||
artifacts := []string{buildZip(dir)}
|
||||
// build a .deb and .rpm if appropriate
|
||||
if goos == "linux" {
|
||||
artifacts = append(artifacts, buildDebAndRpm(dir, version, goarch)...)
|
||||
artifacts = append(artifacts, buildDebAndRpm(dir, version, stripVersion(goarch))...)
|
||||
}
|
||||
if *copyAs != "" {
|
||||
for _, artifact := range artifacts {
|
||||
|
||||
@@ -141,7 +141,7 @@ def main():
|
||||
for name in sorted(bugfixes.keys()):
|
||||
out(name)
|
||||
|
||||
# Read old changlog and split
|
||||
# Read old changelog and split
|
||||
with open("docs/content/changelog.md") as fd:
|
||||
old_changelog = fd.read()
|
||||
heading = "# Changelog"
|
||||
|
||||
@@ -19,7 +19,7 @@ var (
|
||||
// Flags
|
||||
numberOfFiles = flag.Int("n", 1000, "Number of files to create")
|
||||
averageFilesPerDirectory = flag.Int("files-per-directory", 10, "Average number of files per directory")
|
||||
maxDepth = flag.Int("max-depth", 10, "Maximum depth of directory heirachy")
|
||||
maxDepth = flag.Int("max-depth", 10, "Maximum depth of directory hierarchy")
|
||||
minFileSize = flag.Int64("min-size", 0, "Minimum size of file to create")
|
||||
maxFileSize = flag.Int64("max-size", 100, "Maximum size of files to create")
|
||||
minFileNameLength = flag.Int("min-name-length", 4, "Minimum size of file to create")
|
||||
@@ -61,7 +61,7 @@ func fileName() (name string) {
|
||||
return name
|
||||
}
|
||||
|
||||
// dir is a directory in the directory heirachy being built up
|
||||
// dir is a directory in the directory hierarchy being built up
|
||||
type dir struct {
|
||||
name string
|
||||
depth int
|
||||
@@ -69,7 +69,7 @@ type dir struct {
|
||||
parent *dir
|
||||
}
|
||||
|
||||
// Create a random directory heirachy under d
|
||||
// Create a random directory hierarchy under d
|
||||
func (d *dir) createDirectories() {
|
||||
for totalDirectories < directoriesToCreate {
|
||||
newDir := &dir{
|
||||
@@ -91,7 +91,7 @@ func (d *dir) createDirectories() {
|
||||
return
|
||||
}
|
||||
|
||||
// list the directory heirachy
|
||||
// list the directory hierarchy
|
||||
func (d *dir) list(path string, output []string) []string {
|
||||
dirPath := filepath.Join(path, d.name)
|
||||
output = append(output, dirPath)
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
)
|
||||
|
||||
// version=$(sed <VERSION -e 's/\.[0-9]+*$//g')
|
||||
@@ -28,7 +30,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
||||
cmd := exec.Command("git", "log", "--oneline", from+".."+to)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to run git log: %v", err)
|
||||
log.Fatalf("failed to run git log %s: %v", from+".."+to, err)
|
||||
}
|
||||
logMap = map[string]string{}
|
||||
logs = []string{}
|
||||
@@ -53,15 +55,20 @@ func main() {
|
||||
if len(args) != 0 {
|
||||
log.Fatalf("Syntax: %s", os.Args[0])
|
||||
}
|
||||
// v1.54.0
|
||||
versionBytes, err := ioutil.ReadFile("VERSION")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read version: %v", err)
|
||||
}
|
||||
i := bytes.LastIndexByte(versionBytes, '.')
|
||||
version := string(versionBytes[:i])
|
||||
log.Printf("Finding commits not in stable %s", version)
|
||||
masterMap, masterLogs := readCommits(version+".0", "master")
|
||||
stableMap, _ := readCommits(version+".0", version+"-stable")
|
||||
if versionBytes[0] == 'v' {
|
||||
versionBytes = versionBytes[1:]
|
||||
}
|
||||
versionBytes = bytes.TrimSpace(versionBytes)
|
||||
semver := semver.New(string(versionBytes))
|
||||
stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1)
|
||||
log.Printf("Finding commits in %v not in stable %s", semver, stable)
|
||||
masterMap, masterLogs := readCommits(stable+".0", "master")
|
||||
stableMap, _ := readCommits(stable+".0", stable+"-stable")
|
||||
for _, logMessage := range masterLogs {
|
||||
// Commit found in stable already
|
||||
if _, found := stableMap[logMessage]; found {
|
||||
|
||||
@@ -14,7 +14,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "cleanup remote:path",
|
||||
Short: `Clean up the remote if possible`,
|
||||
Short: `Clean up the remote if possible.`,
|
||||
Long: `
|
||||
Clean up the remote if possible. Empty the trash or delete old file
|
||||
versions. Not supported by all remotes.
|
||||
|
||||
24
cmd/cmd.go
24
cmd/cmd.go
@@ -1,4 +1,4 @@
|
||||
// Package cmd implemnts the rclone command
|
||||
// Package cmd implements the rclone command
|
||||
//
|
||||
// It is in a sub package so it's internals can be re-used elsewhere
|
||||
package cmd
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
systemd "github.com/iguanesolutions/go-systemd/v5"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
@@ -35,6 +36,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
"github.com/rclone/rclone/fs/rc/rcserver"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
@@ -288,6 +290,11 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
}
|
||||
fs.Debugf(nil, "%d go routines active\n", runtime.NumGoroutine())
|
||||
|
||||
if fs.Config.Progress && fs.Config.ProgressTerminalTitle {
|
||||
// Clear terminal title
|
||||
terminal.WriteTerminalTitle("")
|
||||
}
|
||||
|
||||
// dump all running go-routines
|
||||
if fs.Config.Dump&fs.DumpGoRoutines != 0 {
|
||||
err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
||||
@@ -364,6 +371,12 @@ func StartStats() func() {
|
||||
|
||||
// initConfig is run by cobra after initialising the flags
|
||||
func initConfig() {
|
||||
// Activate logger systemd support if systemd invocation ID is detected
|
||||
_, sysdLaunch := systemd.GetInvocationID()
|
||||
if sysdLaunch {
|
||||
fs.Config.LogSystemdSupport = true // used during fslog.InitLogging()
|
||||
}
|
||||
|
||||
// Start the logger
|
||||
fslog.InitLogging()
|
||||
|
||||
@@ -379,6 +392,13 @@ func initConfig() {
|
||||
// Write the args for debug purposes
|
||||
fs.Debugf("rclone", "Version %q starting with parameters %q", fs.Version, os.Args)
|
||||
|
||||
// Inform user about systemd log support now that we have a logger
|
||||
if sysdLaunch {
|
||||
fs.Debugf("rclone", "systemd logging support automatically activated")
|
||||
} else if fs.Config.LogSystemdSupport {
|
||||
fs.Debugf("rclone", "systemd logging support manually activated")
|
||||
}
|
||||
|
||||
// Start the remote control server if configured
|
||||
_, err = rcserver.Start(&rcflags.Opt)
|
||||
if err != nil {
|
||||
@@ -493,7 +513,7 @@ func AddBackendFlags() {
|
||||
if opt.IsPassword {
|
||||
help += " (obscured)"
|
||||
}
|
||||
flag := pflag.CommandLine.VarPF(opt, name, opt.ShortOpt, help)
|
||||
flag := flags.VarPF(pflag.CommandLine, opt, name, opt.ShortOpt, help)
|
||||
if _, isBool := opt.Default.(bool); isBool {
|
||||
flag.NoOptDefVal = "true"
|
||||
}
|
||||
|
||||
@@ -270,7 +270,7 @@ func (fsys *FS) Releasedir(path string, fh uint64) (errc int) {
|
||||
return fsys.closeHandle(fh)
|
||||
}
|
||||
|
||||
// Statfs reads overall stats on the filessystem
|
||||
// Statfs reads overall stats on the filesystem
|
||||
func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
|
||||
defer log.Trace(path, "")("stat=%+v, errc=%d", stat, &errc)
|
||||
const blockSize = 4096
|
||||
|
||||
@@ -111,7 +111,7 @@ whether the password is already obscured or not and put unobscured
|
||||
passwords into the config file. If you want to be 100% certain that
|
||||
the passwords get obscured then use the "--obscure" flag, or if you
|
||||
are 100% certain you are already passing obscured passwords then use
|
||||
"--no-obscure". You can also set osbscured passwords using the
|
||||
"--no-obscure". You can also set obscured passwords using the
|
||||
"rclone config password" command.
|
||||
`
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "copy source:path dest:path",
|
||||
Short: `Copy files from source to dest, skipping already copied`,
|
||||
Short: `Copy files from source to dest, skipping already copied.`,
|
||||
Long: `
|
||||
Copy the source to the destination. Doesn't transfer
|
||||
unchanged files, testing by size and modification time or
|
||||
|
||||
@@ -15,7 +15,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "copyto source:path dest:path",
|
||||
Short: `Copy files from source to dest, skipping already copied`,
|
||||
Short: `Copy files from source to dest, skipping already copied.`,
|
||||
Long: `
|
||||
If source:path is a file or directory then it copies it to a file or
|
||||
directory named dest:path.
|
||||
|
||||
@@ -43,7 +43,7 @@ This means that for most duplicated files the ` + "`dedupe`" + `
|
||||
command will not be interactive.
|
||||
|
||||
` + "`dedupe`" + ` considers files to be identical if they have the
|
||||
same hash. If the backend does not support hashes (eg crypt wrapping
|
||||
same file path and the same hash. If the backend does not support hashes (eg crypt wrapping
|
||||
Google Drive) then they will never be found to be identical. If you
|
||||
use the ` + "`--size-only`" + ` flag then files will be considered
|
||||
identical if they have the same size (any hash will be ignored). This
|
||||
|
||||
@@ -2,6 +2,7 @@ package genautocomplete
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -29,11 +30,20 @@ them directly
|
||||
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is "-", then the output will be written to stdout.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
out := "/etc/bash_completion.d/rclone"
|
||||
if len(args) > 0 {
|
||||
if args[0] == "-" {
|
||||
err := cmd.Root.GenBashCompletion(os.Stdout)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
out = args[0]
|
||||
}
|
||||
err := cmd.Root.GenBashCompletionFile(out)
|
||||
|
||||
@@ -2,6 +2,7 @@ package genautocomplete
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -29,11 +30,20 @@ them directly
|
||||
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is "-", then the output will be written to stdout.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
out := "/etc/fish/completions/rclone.fish"
|
||||
if len(args) > 0 {
|
||||
if args[0] == "-" {
|
||||
err := cmd.Root.GenFishCompletion(os.Stdout, true)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
out = args[0]
|
||||
}
|
||||
err := cmd.Root.GenFishCompletionFile(out, true)
|
||||
|
||||
@@ -11,8 +11,10 @@ import (
|
||||
func TestCompletionBash(t *testing.T) {
|
||||
tempFile, err := ioutil.TempFile("", "completion_bash")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = tempFile.Close() }()
|
||||
defer func() { _ = os.Remove(tempFile.Name()) }()
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
bashCommandDefinition.Run(bashCommandDefinition, []string{tempFile.Name()})
|
||||
|
||||
@@ -21,11 +23,32 @@ func TestCompletionBash(t *testing.T) {
|
||||
assert.NotEmpty(t, string(bs))
|
||||
}
|
||||
|
||||
func TestCompletionBashStdout(t *testing.T) {
|
||||
originalStdout := os.Stdout
|
||||
tempFile, err := ioutil.TempFile("", "completion_zsh")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
os.Stdout = tempFile
|
||||
defer func() { os.Stdout = originalStdout }()
|
||||
|
||||
bashCommandDefinition.Run(bashCommandDefinition, []string{"-"})
|
||||
|
||||
output, err := ioutil.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(output))
|
||||
}
|
||||
|
||||
func TestCompletionZsh(t *testing.T) {
|
||||
tempFile, err := ioutil.TempFile("", "completion_zsh")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = tempFile.Close() }()
|
||||
defer func() { _ = os.Remove(tempFile.Name()) }()
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
zshCommandDefinition.Run(zshCommandDefinition, []string{tempFile.Name()})
|
||||
|
||||
@@ -34,11 +57,31 @@ func TestCompletionZsh(t *testing.T) {
|
||||
assert.NotEmpty(t, string(bs))
|
||||
}
|
||||
|
||||
func TestCompletionZshStdout(t *testing.T) {
|
||||
originalStdout := os.Stdout
|
||||
tempFile, err := ioutil.TempFile("", "completion_zsh")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
os.Stdout = tempFile
|
||||
defer func() { os.Stdout = originalStdout }()
|
||||
|
||||
zshCommandDefinition.Run(zshCommandDefinition, []string{"-"})
|
||||
output, err := ioutil.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(output))
|
||||
}
|
||||
|
||||
func TestCompletionFish(t *testing.T) {
|
||||
tempFile, err := ioutil.TempFile("", "completion_fish")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = tempFile.Close() }()
|
||||
defer func() { _ = os.Remove(tempFile.Name()) }()
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
fishCommandDefinition.Run(fishCommandDefinition, []string{tempFile.Name()})
|
||||
|
||||
@@ -46,3 +89,22 @@ func TestCompletionFish(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(bs))
|
||||
}
|
||||
|
||||
func TestCompletionFishStdout(t *testing.T) {
|
||||
originalStdout := os.Stdout
|
||||
tempFile, err := ioutil.TempFile("", "completion_zsh")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
os.Stdout = tempFile
|
||||
defer func() { os.Stdout = originalStdout }()
|
||||
|
||||
fishCommandDefinition.Run(fishCommandDefinition, []string{"-"})
|
||||
|
||||
output, err := ioutil.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(output))
|
||||
}
|
||||
|
||||
@@ -30,11 +30,20 @@ them directly
|
||||
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is "-", then the output will be written to stdout.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
out := "/usr/share/zsh/vendor-completions/_rclone"
|
||||
if len(args) > 0 {
|
||||
if args[0] == "-" {
|
||||
err := cmd.Root.GenZshCompletion(os.Stdout)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
out = args[0]
|
||||
}
|
||||
outFile, err := os.Create(out)
|
||||
|
||||
@@ -49,7 +49,7 @@ func init() {
|
||||
flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", true, "Check control characters.")
|
||||
flags.DurationVarP(cmdFlags, &uploadWait, "upload-wait", "", 0, "Wait after writing a file.")
|
||||
flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", true, "Check max filename length.")
|
||||
flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", true, "Check uploadxs with indeterminate file size.")
|
||||
flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", true, "Check uploads with indeterminate file size.")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
|
||||
@@ -44,7 +44,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "lsf remote:path",
|
||||
Short: `List directories and objects in remote:path formatted for parsing`,
|
||||
Short: `List directories and objects in remote:path formatted for parsing.`,
|
||||
Long: `
|
||||
List the contents of the source path (directories and objects) to
|
||||
standard output in a form which is easy to parse by scripts. By
|
||||
|
||||
@@ -45,7 +45,7 @@ func newFileHandle(h vfs.Handle, fsys *FS) *FileHandle {
|
||||
}
|
||||
}
|
||||
|
||||
// Check interface satistfied
|
||||
// Check interface satisfied
|
||||
var _ fusefs.FileHandle = (*FileHandle)(nil)
|
||||
|
||||
// The String method is for debug printing.
|
||||
|
||||
@@ -67,8 +67,8 @@ func setAttr(node vfs.Node, attr *fuse.Attr) {
|
||||
modTime := node.ModTime()
|
||||
// set attributes
|
||||
vfs := node.VFS()
|
||||
attr.Owner.Gid = vfs.Opt.UID
|
||||
attr.Owner.Uid = vfs.Opt.GID
|
||||
attr.Owner.Gid = vfs.Opt.GID
|
||||
attr.Owner.Uid = vfs.Opt.UID
|
||||
attr.Mode = getMode(node)
|
||||
attr.Size = Size
|
||||
attr.Nlink = 1
|
||||
|
||||
@@ -258,7 +258,7 @@ var _ fusefs.DirStream = (*dirStream)(nil)
|
||||
|
||||
// Readdir opens a stream of directory entries.
|
||||
//
|
||||
// Readdir essentiallly returns a list of strings, and it is allowed
|
||||
// Readdir essentially returns a list of strings, and it is allowed
|
||||
// for Readdir to return different results from Lookup. For example,
|
||||
// you can return nothing for Readdir ("ls my-fuse-mount" is empty),
|
||||
// while still implementing Lookup ("ls my-fuse-mount/a-specific-file"
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/okzk/sdnotify"
|
||||
sysdnotify "github.com/iguanesolutions/go-systemd/v5/notify"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -83,8 +83,8 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
rc.AddOption("mount", &Opt)
|
||||
flags.BoolVarP(flagSet, &Opt.DebugFUSE, "debug-fuse", "", Opt.DebugFUSE, "Debug the FUSE internals - needs -v.")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowNonEmpty, "allow-non-empty", "", Opt.AllowNonEmpty, "Allow mounting over a non-empty directory (not Windows).")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowRoot, "allow-root", "", Opt.AllowRoot, "Allow access to root user.")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowOther, "allow-other", "", Opt.AllowOther, "Allow access to other users.")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowRoot, "allow-root", "", Opt.AllowRoot, "Allow access to root user (not Windows).")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowOther, "allow-other", "", Opt.AllowOther, "Allow access to other users (not Windows).")
|
||||
flags.BoolVarP(flagSet, &Opt.DefaultPermissions, "default-permissions", "", Opt.DefaultPermissions, "Makes kernel enforce access control based on the file mode.")
|
||||
flags.BoolVarP(flagSet, &Opt.WritebackCache, "write-back-cache", "", Opt.WritebackCache, "Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used.")
|
||||
flags.FVarP(flagSet, &Opt.MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads.")
|
||||
@@ -101,7 +101,7 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
}
|
||||
}
|
||||
|
||||
// Check is folder is empty
|
||||
// Check if folder is empty
|
||||
func checkMountEmpty(mountpoint string) error {
|
||||
fp, fpErr := os.Open(mountpoint)
|
||||
|
||||
@@ -162,7 +162,7 @@ FUSE.
|
||||
First set up your remote using ` + "`rclone config`" + `. Check it works with ` + "`rclone ls`" + ` etc.
|
||||
|
||||
You can either run mount in foreground mode or background (daemon) mode. Mount runs in
|
||||
foreground mode by default, use the --daemon flag to specify background mode mode.
|
||||
foreground mode by default, use the --daemon flag to specify background mode.
|
||||
Background mode is only supported on Linux and OSX, you can only run mount in
|
||||
foreground mode on Windows.
|
||||
|
||||
@@ -192,6 +192,9 @@ Stopping the mount manually:
|
||||
# OS X
|
||||
umount /path/to/local/mount
|
||||
|
||||
**Note**: As of ` + "`rclone` 1.52.2, `rclone mount`" + ` now requires Go version 1.13
|
||||
or newer on some platforms depending on the underlying FUSE library in use.
|
||||
|
||||
### Installing on Windows
|
||||
|
||||
To run rclone ` + commandName + ` on Windows, you will need to
|
||||
@@ -333,9 +336,6 @@ With --vfs-read-chunk-size 100M and --vfs-read-chunk-size-limit 0 the following
|
||||
parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
|
||||
When --vfs-read-chunk-size-limit 500M is specified, the result would be
|
||||
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
|
||||
|
||||
Chunked reading will only work with --vfs-cache-mode < full, as the file will always
|
||||
be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
` + vfs.Help,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
@@ -359,15 +359,24 @@ be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
defer cmd.StartStats()()
|
||||
}
|
||||
|
||||
// Skip checkMountEmpty if --allow-non-empty flag is used or if
|
||||
// the Operating System is Windows
|
||||
if !opt.AllowNonEmpty && runtime.GOOS != "windows" {
|
||||
// Inform about ignored flags on Windows,
|
||||
// and if not on Windows and not --allow-non-empty flag is used
|
||||
// verify that mountpoint is empty.
|
||||
if runtime.GOOS == "windows" {
|
||||
if opt.AllowNonEmpty {
|
||||
fs.Logf(nil, "--allow-non-empty flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowRoot {
|
||||
fs.Logf(nil, "--allow-root flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowOther {
|
||||
fs.Logf(nil, "--allow-other flag does nothing on Windows")
|
||||
}
|
||||
} else if !opt.AllowNonEmpty {
|
||||
err := checkMountEmpty(mountpoint)
|
||||
if err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
} else if opt.AllowNonEmpty && runtime.GOOS == "windows" {
|
||||
fs.Logf(nil, "--allow-non-empty flag does nothing on Windows")
|
||||
}
|
||||
|
||||
// Work out the volume name, removing special
|
||||
@@ -448,13 +457,13 @@ func Mount(VFS *vfs.VFS, mountpoint string, mount MountFn, opt *Options) error {
|
||||
|
||||
// Unmount on exit
|
||||
fnHandle := atexit.Register(func() {
|
||||
_ = sysdnotify.Stopping()
|
||||
_ = unmount()
|
||||
_ = sdnotify.Stopping()
|
||||
})
|
||||
defer atexit.Unregister(fnHandle)
|
||||
|
||||
// Notify systemd
|
||||
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
|
||||
if err := sysdnotify.Ready(); err != nil {
|
||||
return errors.Wrap(err, "failed to notify systemd")
|
||||
}
|
||||
|
||||
@@ -479,8 +488,8 @@ waitloop:
|
||||
}
|
||||
}
|
||||
|
||||
_ = sysdnotify.Stopping()
|
||||
_ = unmount()
|
||||
_ = sdnotify.Stopping()
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to umount FUSE fs")
|
||||
|
||||
@@ -71,11 +71,11 @@ func helpText() (tr []string) {
|
||||
" ←,h to return",
|
||||
" c toggle counts",
|
||||
" g toggle graph",
|
||||
" n,s,C sort by name,size,count",
|
||||
" n,s,C,A sort by name,size,count,average size",
|
||||
" d delete file/directory",
|
||||
}
|
||||
if !clipboard.Unsupported {
|
||||
tr = append(tr, " y copy current path to clipbard")
|
||||
tr = append(tr, " y copy current path to clipboard")
|
||||
}
|
||||
tr = append(tr, []string{
|
||||
" Y display current path",
|
||||
@@ -88,27 +88,28 @@ func helpText() (tr []string) {
|
||||
|
||||
// UI contains the state of the user interface
|
||||
type UI struct {
|
||||
f fs.Fs // fs being displayed
|
||||
fsName string // human name of Fs
|
||||
root *scan.Dir // root directory
|
||||
d *scan.Dir // current directory being displayed
|
||||
path string // path of current directory
|
||||
showBox bool // whether to show a box
|
||||
boxText []string // text to show in box
|
||||
boxMenu []string // box menu options
|
||||
boxMenuButton int
|
||||
boxMenuHandler func(fs fs.Fs, path string, option int) (string, error)
|
||||
entries fs.DirEntries // entries of current directory
|
||||
sortPerm []int // order to display entries in after sorting
|
||||
invSortPerm []int // inverse order
|
||||
dirListHeight int // height of listing
|
||||
listing bool // whether listing is in progress
|
||||
showGraph bool // toggle showing graph
|
||||
showCounts bool // toggle showing counts
|
||||
sortByName int8 // +1 for normal, 0 for off, -1 for reverse
|
||||
sortBySize int8
|
||||
sortByCount int8
|
||||
dirPosMap map[string]dirPos // store for directory positions
|
||||
f fs.Fs // fs being displayed
|
||||
fsName string // human name of Fs
|
||||
root *scan.Dir // root directory
|
||||
d *scan.Dir // current directory being displayed
|
||||
path string // path of current directory
|
||||
showBox bool // whether to show a box
|
||||
boxText []string // text to show in box
|
||||
boxMenu []string // box menu options
|
||||
boxMenuButton int
|
||||
boxMenuHandler func(fs fs.Fs, path string, option int) (string, error)
|
||||
entries fs.DirEntries // entries of current directory
|
||||
sortPerm []int // order to display entries in after sorting
|
||||
invSortPerm []int // inverse order
|
||||
dirListHeight int // height of listing
|
||||
listing bool // whether listing is in progress
|
||||
showGraph bool // toggle showing graph
|
||||
showCounts bool // toggle showing counts
|
||||
sortByName int8 // +1 for normal, 0 for off, -1 for reverse
|
||||
sortBySize int8
|
||||
sortByCount int8
|
||||
sortByAverageSize int8
|
||||
dirPosMap map[string]dirPos // store for directory positions
|
||||
}
|
||||
|
||||
// Where we have got to in the directory listing
|
||||
@@ -496,9 +497,17 @@ type ncduSort struct {
|
||||
|
||||
// Less is part of sort.Interface.
|
||||
func (ds *ncduSort) Less(i, j int) bool {
|
||||
var iAvgSize, jAvgSize float64
|
||||
isize, icount, _, _ := ds.d.AttrI(ds.sortPerm[i])
|
||||
jsize, jcount, _, _ := ds.d.AttrI(ds.sortPerm[j])
|
||||
iname, jname := ds.entries[ds.sortPerm[i]].Remote(), ds.entries[ds.sortPerm[j]].Remote()
|
||||
if icount > 0 {
|
||||
iAvgSize = float64(isize / icount)
|
||||
}
|
||||
if jcount > 0 {
|
||||
jAvgSize = float64(jsize / jcount)
|
||||
}
|
||||
|
||||
switch {
|
||||
case ds.u.sortByName < 0:
|
||||
return iname > jname
|
||||
@@ -520,6 +529,18 @@ func (ds *ncduSort) Less(i, j int) bool {
|
||||
if icount != jcount {
|
||||
return icount > jcount
|
||||
}
|
||||
case ds.u.sortByAverageSize < 0:
|
||||
if iAvgSize != jAvgSize {
|
||||
return iAvgSize < jAvgSize
|
||||
}
|
||||
// if avgSize is equal, sort by size
|
||||
return isize < jsize
|
||||
case ds.u.sortByAverageSize > 0:
|
||||
if iAvgSize != jAvgSize {
|
||||
return iAvgSize > jAvgSize
|
||||
}
|
||||
// if avgSize is equal, sort by size
|
||||
return isize > jsize
|
||||
}
|
||||
// if everything equal, sort by name
|
||||
return iname < jname
|
||||
@@ -628,6 +649,7 @@ func (u *UI) toggleSort(sortType *int8) {
|
||||
u.sortBySize = 0
|
||||
u.sortByCount = 0
|
||||
u.sortByName = 0
|
||||
u.sortByAverageSize = 0
|
||||
if old == 0 {
|
||||
*sortType = 1
|
||||
} else {
|
||||
@@ -742,6 +764,8 @@ outer:
|
||||
u.toggleSort(&u.sortBySize)
|
||||
case 'C':
|
||||
u.toggleSort(&u.sortByCount)
|
||||
case 'A':
|
||||
u.toggleSort(&u.sortByAverageSize)
|
||||
case 'y':
|
||||
u.copyPath()
|
||||
case 'Y':
|
||||
|
||||
@@ -17,7 +17,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "obscure password",
|
||||
Short: `Obscure password for use in the rclone config file`,
|
||||
Short: `Obscure password for use in the rclone config file.`,
|
||||
Long: `In the rclone config file, human readable passwords are
|
||||
obscured. Obscuring them is done by encrypting them and writing them
|
||||
out in base64. This is **not** a secure way of encrypting these
|
||||
|
||||
@@ -208,7 +208,7 @@ func TestGET(t *testing.T) {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check we got a Last-Modifed header and that it is a valid date
|
||||
// Check we got a Last-Modified header and that it is a valid date
|
||||
if test.Status == http.StatusOK || test.Status == http.StatusPartialContent {
|
||||
lastModified := resp.Header.Get("Last-Modified")
|
||||
assert.NotEqual(t, "", lastModified, test.Golden)
|
||||
|
||||
@@ -61,7 +61,7 @@ to be used within the template to server pages:
|
||||
| .Name | The full path of a file/directory. |
|
||||
| .Title | Directory listing of .Name |
|
||||
| .Sort | The current sort used. This is changeable via ?sort= parameter |
|
||||
| | Sort Options: namedirfist,name,size,time (default namedirfirst) |
|
||||
| | Sort Options: namedirfirst,name,size,time (default namedirfirst) |
|
||||
| .Order | The current ordering used. This is changeable via ?order= parameter |
|
||||
| | Order Options: asc,desc (default asc) |
|
||||
| .Query | Currently unused. |
|
||||
|
||||
@@ -132,7 +132,7 @@ func Error(what interface{}, w http.ResponseWriter, text string, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessQueryParams takes and sorts/orders based on the request sort/order parameters and default is namedirfist/asc
|
||||
// ProcessQueryParams takes and sorts/orders based on the request sort/order parameters and default is namedirfirst/asc
|
||||
func (d *Directory) ProcessQueryParams(sortParm string, orderParm string) *Directory {
|
||||
d.Sort = sortParm
|
||||
d.Order = orderParm
|
||||
|
||||
@@ -27,7 +27,7 @@ var Help = strings.Replace(`
|
||||
If you supply the parameter |--auth-proxy /path/to/program| then
|
||||
rclone will use that program to generate backends on the fly which
|
||||
then are used to authenticate incoming requests. This uses a simple
|
||||
JSON based protocl with input on STDIN and output on STDOUT.
|
||||
JSON based protocol with input on STDIN and output on STDOUT.
|
||||
|
||||
**PLEASE NOTE:** |--auth-proxy| and |--authorized-keys| cannot be used
|
||||
together, if |--auth-proxy| is set the authorized keys option will be
|
||||
|
||||
@@ -64,7 +64,7 @@ func TestTouchWithTimestamp(t *testing.T) {
|
||||
checkFile(t, r.Fremote, srcFileName, "")
|
||||
}
|
||||
|
||||
func TestTouchWithLognerTimestamp(t *testing.T) {
|
||||
func TestTouchWithLongerTimestamp(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
|
||||
@@ -148,6 +148,7 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
||||
{{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}
|
||||
{{< provider name="Tardigrade" home="https://tardigrade.io/" config="/tardigrade/" >}}
|
||||
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
|
||||
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
|
||||
{{< provider name="WebDAV" home="https://en.wikipedia.org/wiki/WebDAV" config="/webdav/" >}}
|
||||
{{< provider name="Yandex Disk" home="https://disk.yandex.com/" config="/yandex/" >}}
|
||||
|
||||
@@ -409,3 +409,16 @@ put them back in again.` >}}
|
||||
* Lucas Kanashiro <lucas.kanashiro@canonical.com>
|
||||
* WarpedPixel <WarpedPixel@users.noreply.github.com>
|
||||
* Sam Edwards <sam@samedwards.ca>
|
||||
* wjielai <gouki0123@gmail.com>
|
||||
* Muffin King <jinxz_k@live.com>
|
||||
* Christopher Stewart <6573710+1f47a@users.noreply.github.com>
|
||||
* Russell Cattelan <cattelan@digitalelves.com>
|
||||
* gyutw <30371241+gyutw@users.noreply.github.com>
|
||||
* Hekmon <edouardhur@gmail.com>
|
||||
* LaSombra <lasombra@users.noreply.github.com>
|
||||
* Dov Murik <dov.murik@gmail.com>
|
||||
* Ameer Dawood <ameer1234567890@gmail.com>
|
||||
* Dan Hipschman <dan.hipschman@opendoor.com>
|
||||
* Josh Soref <jsoref@users.noreply.github.com>
|
||||
* David <david@staron.nl>
|
||||
* Ingo <ingo@hoffmann.cx>
|
||||
|
||||
@@ -404,6 +404,7 @@ Note that Box is case insensitive so you can't have a file called
|
||||
"Hello.doc" and one called "hello.doc".
|
||||
|
||||
Box file names can't have the `\` character in. rclone maps this to
|
||||
and from an identical looking unicode equivalent `\`.
|
||||
and from an identical looking unicode equivalent `\` (U+FF3C Fullwidth
|
||||
Reverse Solidus).
|
||||
|
||||
Box only supports filenames up to 255 characters in length.
|
||||
|
||||
@@ -5,6 +5,86 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.53.2 - 2020-10-26
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.1...v1.53.2)
|
||||
|
||||
* Bug Fixes
|
||||
* acounting
|
||||
* Fix incorrect speed and transferTime in core/stats (Nick Craig-Wood)
|
||||
* Stabilize display order of transfers on Windows (Nick Craig-Wood)
|
||||
* operations
|
||||
* Fix use of --suffix without --backup-dir (Nick Craig-Wood)
|
||||
* Fix spurious "--checksum is in use but the source and destination have no hashes in common" (Nick Craig-Wood)
|
||||
* build
|
||||
* Work around GitHub actions brew problem (Nick Craig-Wood)
|
||||
* Stop using set-env and set-path in the GitHub actions (Nick Craig-Wood)
|
||||
* Mount
|
||||
* mount2: Fix the swapped UID / GID values (Russell Cattelan)
|
||||
* VFS
|
||||
* Detect and recover from a file being removed externally from the cache (Nick Craig-Wood)
|
||||
* Fix a deadlock vulnerability in downloaders.Close (Leo Luan)
|
||||
* Fix a race condition in retryFailedResets (Leo Luan)
|
||||
* Fix missed concurrency control between some item operations and reset (Leo Luan)
|
||||
* Add exponential backoff during ENOSPC retries (Leo Luan)
|
||||
* Add a missed update of used cache space (Leo Luan)
|
||||
* Fix --no-modtime to not attempt to set modtimes (as documented) (Nick Craig-Wood)
|
||||
* Local
|
||||
* Fix sizes and syncing with --links option on Windows (Nick Craig-Wood)
|
||||
* Chunker
|
||||
* Disable ListR to fix missing files on GDrive (workaround) (Ivan Andreev)
|
||||
* Fix upload over crypt (Ivan Andreev)
|
||||
* Fichier
|
||||
* Increase maximum file size from 100GB to 300GB (gyutw)
|
||||
* Jottacloud
|
||||
* Remove clientSecret from config when upgrading to token based authentication (buengese)
|
||||
* Avoid double url escaping of device/mountpoint (albertony)
|
||||
* Remove DirMove workaround as it's not required anymore - also (buengese)
|
||||
* Mailru
|
||||
* Fix uploads after recent changes on server (Ivan Andreev)
|
||||
* Fix range requests after june changes on server (Ivan Andreev)
|
||||
* Fix invalid timestamp on corrupted files (fixes) (Ivan Andreev)
|
||||
* Onedrive
|
||||
* Fix disk usage for sharepoint (Nick Craig-Wood)
|
||||
* S3
|
||||
* Add missing regions for AWS (Anagh Kumar Baranwal)
|
||||
* Seafile
|
||||
* Fix accessing libraries > 2GB on 32 bit systems (Muffin King)
|
||||
* SFTP
|
||||
* Always convert the checksum to lower case (buengese)
|
||||
* Union
|
||||
* Create root directories if none exist (Nick Craig-Wood)
|
||||
|
||||
## v1.53.1 - 2020-09-13
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.0...v1.53.1)
|
||||
|
||||
* Bug Fixes
|
||||
* accounting: Remove new line from end of --stats-one-line display (Nick Craig-Wood)
|
||||
* check
|
||||
* Add back missing --download flag (Nick Craig-Wood)
|
||||
* Fix docs (Nick Craig-Wood)
|
||||
* docs
|
||||
* Note --log-file does append (Nick Craig-Wood)
|
||||
* Add full stops for consistency in rclone --help (edwardxml)
|
||||
* Add Tencent COS to s3 provider list (wjielai)
|
||||
* Updated mount command to reflect that it requires Go 1.13 or newer (Evan Harris)
|
||||
* jottacloud: Mention that uploads from local disk will not need to cache files to disk for md5 calculation (albertony)
|
||||
* Fix formatting of rc docs page (Nick Craig-Wood)
|
||||
* build
|
||||
* Include vendor tar ball in release and fix startdev (Nick Craig-Wood)
|
||||
* Fix "Illegal instruction" error for ARMv6 builds (Nick Craig-Wood)
|
||||
* Fix architecture name in ARMv7 build (Nick Craig-Wood)
|
||||
* VFS
|
||||
* Fix spurious error "vfs cache: failed to _ensure cache EOF" (Nick Craig-Wood)
|
||||
* Log an ERROR if we fail to set the file to be sparse (Nick Craig-Wood)
|
||||
* Local
|
||||
* Log an ERROR if we fail to set the file to be sparse (Nick Craig-Wood)
|
||||
* Drive
|
||||
* Re-adds special oauth help text (Tim Gallant)
|
||||
* Opendrive
|
||||
* Do not retry 400 errors (Evan Harris)
|
||||
|
||||
## v1.53.0 - 2020-09-02
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.52.0...v1.53.0)
|
||||
@@ -33,7 +113,7 @@ description: "Rclone Changelog"
|
||||
* Add reverse proxy pluginsHandler for serving plugins (Chaitanya Bankanhal)
|
||||
* Add `mount/listmounts` option for listing current mounts (Chaitanya Bankanhal)
|
||||
* Add `operations/uploadfile` to upload a file through rc using encoding multipart/form-data (Chaitanya Bankanhal)
|
||||
* Add `core/copmmand` to execute rclone terminal commands. (Chaitanya Bankanhal)
|
||||
* Add `core/command` to execute rclone terminal commands. (Chaitanya Bankanhal)
|
||||
* `rclone check`
|
||||
* Add reporting of filenames for same/missing/changed (Nick Craig-Wood)
|
||||
* Make check command obey `--dry-run`/`-i`/`--interactive` (Nick Craig-Wood)
|
||||
@@ -142,7 +222,7 @@ description: "Rclone Changelog"
|
||||
* Google Cloud Storage
|
||||
* Add support for anonymous access (Kai Lüke)
|
||||
* Jottacloud
|
||||
* Bring back legacy authentification for use with whitelabel versions (buengese)
|
||||
* Bring back legacy authentication for use with whitelabel versions (buengese)
|
||||
* Switch to new api root - also implement a very ugly workaround for the DirMove failures (buengese)
|
||||
* Onedrive
|
||||
* Rework cancel of multipart uploads on rclone exit (Nick Craig-Wood)
|
||||
@@ -292,7 +372,7 @@ all the docs and Edward Barker for helping re-write the front page.
|
||||
* Add `--header` flag to add HTTP headers to every HTTP transaction (Nick Craig-Wood)
|
||||
* Add `--check-first` to do all checking before starting transfers (Nick Craig-Wood)
|
||||
* Add `--track-renames-strategy` for configurable matching criteria for `--track-renames` (Bernd Schoolmann)
|
||||
* Add `--cutoff-mode` hard,soft,catious (Shing Kit Chan & Franklyn Tackitt)
|
||||
* Add `--cutoff-mode` hard,soft,cautious (Shing Kit Chan & Franklyn Tackitt)
|
||||
* Filter flags (eg `--files-from -`) can read from stdin (fishbullet)
|
||||
* Add `--error-on-no-transfer` option (Jon Fautley)
|
||||
* Implement `--order-by xxx,mixed` for copying some small and some big files (Nick Craig-Wood)
|
||||
@@ -575,7 +655,7 @@ all the docs and Edward Barker for helping re-write the front page.
|
||||
* dbhashsum: Stop it returning UNSUPPORTED on dropbox (Nick Craig-Wood)
|
||||
* dedupe: Add missing modes to help string (Nick Craig-Wood)
|
||||
* operations
|
||||
* Fix dedupe continuing on errors like insufficientFilePermisson (SezalAgrawal)
|
||||
* Fix dedupe continuing on errors like insufficientFilePersimmon (SezalAgrawal)
|
||||
* Clear accounting before low level retry (Maciej Zimnoch)
|
||||
* Write debug message when hashes could not be checked (Ole Schütt)
|
||||
* Move interface assertion to tests to remove pflag dependency (Nick Craig-Wood)
|
||||
@@ -639,7 +719,7 @@ all the docs and Edward Barker for helping re-write the front page.
|
||||
* S3
|
||||
* Re-implement multipart upload to fix memory issues (Nick Craig-Wood)
|
||||
* Add `--s3-copy-cutoff` for size to switch to multipart copy (Nick Craig-Wood)
|
||||
* Add new region Asia Patific (Hong Kong) (Outvi V)
|
||||
* Add new region Asia Pacific (Hong Kong) (Outvi V)
|
||||
* Reduce memory usage streaming files by reducing max stream upload size (Nick Craig-Wood)
|
||||
* Add `--s3-list-chunk` option for bucket listing (Thomas Kriechbaumer)
|
||||
* Force path style bucket access to off for AWS deprecation (Nick Craig-Wood)
|
||||
@@ -900,7 +980,7 @@ all the docs and Edward Barker for helping re-write the front page.
|
||||
* rcat: Fix slowdown on systems with multiple hashes (Nick Craig-Wood)
|
||||
* rcd: Fix permissions problems on cache directory with web gui download (Nick Craig-Wood)
|
||||
* Mount
|
||||
* Default `--deamon-timout` to 15 minutes on macOS and FreeBSD (Nick Craig-Wood)
|
||||
* Default `--daemon-timout` to 15 minutes on macOS and FreeBSD (Nick Craig-Wood)
|
||||
* Update docs to show mounting from root OK for bucket based (Nick Craig-Wood)
|
||||
* Remove nonseekable flag from write files (Nick Craig-Wood)
|
||||
* VFS
|
||||
@@ -1063,7 +1143,7 @@ all the docs and Edward Barker for helping re-write the front page.
|
||||
* Add client side TLS/SSL flags `--ca-cert`/`--client-cert`/`--client-key` (Nick Craig-Wood)
|
||||
* Implement `--suffix-keep-extension` for use with `--suffix` (Nick Craig-Wood)
|
||||
* build:
|
||||
* Switch to semvar compliant version tags to be go modules compliant (Nick Craig-Wood)
|
||||
* Switch to semver compliant version tags to be go modules compliant (Nick Craig-Wood)
|
||||
* Update to use go1.12.x for the build (Nick Craig-Wood)
|
||||
* serve dlna: Add connection manager service description to improve compatibility (Dan Walters)
|
||||
* lsf: Add 'e' format to show encrypted names and 'o' for original IDs (Nick Craig-Wood)
|
||||
|
||||
@@ -27,6 +27,7 @@ them directly
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is `-`, then the output will be written to stdout.
|
||||
|
||||
```
|
||||
rclone genautocomplete bash [output_file] [flags]
|
||||
|
||||
@@ -27,6 +27,7 @@ them directly
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is `-`, then the output will be written to stdout.
|
||||
|
||||
```
|
||||
rclone genautocomplete fish [output_file] [flags]
|
||||
|
||||
@@ -27,6 +27,7 @@ them directly
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is `-`, then the output will be written to stdout.
|
||||
|
||||
```
|
||||
rclone genautocomplete zsh [output_file] [flags]
|
||||
|
||||
@@ -6,23 +6,26 @@ description: "Encryption overlay remote"
|
||||
{{< icon "fa fa-lock" >}}Crypt
|
||||
----------------------------------------
|
||||
|
||||
The `crypt` remote encrypts and decrypts another remote.
|
||||
Rclone `crypt` remotes encrypt and decrypt other remotes.
|
||||
|
||||
To use it first set up the underlying remote following the config
|
||||
instructions for that remote. You can also use a local pathname
|
||||
instead of a remote which will encrypt and decrypt from that directory
|
||||
which might be useful for encrypting onto a USB stick for example.
|
||||
To use `crypt`, first set up the underlying remote. Follow the `rclone
|
||||
config` instructions for that remote.
|
||||
|
||||
First check your chosen remote is working - we'll call it
|
||||
`remote:path` in these docs. Note that anything inside `remote:path`
|
||||
will be encrypted and anything outside won't. This means that if you
|
||||
are using a bucket based remote (eg S3, B2, swift) then you should
|
||||
probably put the bucket in the remote `s3:bucket`. If you just use
|
||||
`s3:` then rclone will make encrypted bucket names too (if using file
|
||||
name encryption) which may or may not be what you want.
|
||||
`crypt` applied to a local pathname instead of a remote will
|
||||
encrypt and decrypt that directory, and can be used to encrypt USB
|
||||
removable drives.
|
||||
|
||||
Now configure `crypt` using `rclone config`. We will call this one
|
||||
`secret` to differentiate it from the `remote`.
|
||||
Before configuring the crypt remote, check the underlying remote is
|
||||
working. In this example the underlying remote is called `remote:path`.
|
||||
Anything inside `remote:path` will be encrypted and anything outside
|
||||
will not. In the case of an S3 based underlying remote (eg Amazon S3,
|
||||
B2, Swift) it is generally advisable to define a crypt remote in the
|
||||
underlying remote `s3:bucket`. If `s3:` alone is specified alongside
|
||||
file name encryption, rclone will encrypt the bucket name.
|
||||
|
||||
Configure `crypt` using `rclone config`. In this example the `crypt`
|
||||
remote is called `secret`, to differentiate it from the underlying
|
||||
`remote`.
|
||||
|
||||
```
|
||||
No remotes found - make a new one
|
||||
@@ -96,49 +99,42 @@ d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
**Important** The password is stored in the config file is lightly
|
||||
obscured so it isn't immediately obvious what it is. It is in no way
|
||||
secure unless you use config file encryption.
|
||||
**Important** The crypt password stored in `rclone.conf` is lightly
|
||||
obscured. That only protects it from cursory inspection. It is not
|
||||
secure unless encryption of `rclone.conf` is specified.
|
||||
|
||||
A long passphrase is recommended, or you can use a random one.
|
||||
A long passphrase is recommended, or `rclone config` can generate a
|
||||
random one.
|
||||
|
||||
The obscured password is created by using AES-CTR with a static key, with
|
||||
the salt stored verbatim at the beginning of the obscured password. This
|
||||
static key is shared by between all versions of rclone.
|
||||
The obscured password is created using AES-CTR with a static key. The
|
||||
salt is stored verbatim at the beginning of the obscured password. This
|
||||
static key is shared between all versions of rclone.
|
||||
|
||||
If you reconfigure rclone with the same passwords/passphrases
|
||||
elsewhere it will be compatible, but the obscured version will be different
|
||||
due to the different salt.
|
||||
|
||||
Note that rclone does not encrypt
|
||||
Rclone does not encrypt
|
||||
|
||||
* file length - this can be calculated within 16 bytes
|
||||
* modification time - used for syncing
|
||||
|
||||
## Specifying the remote ##
|
||||
|
||||
In normal use, make sure the remote has a `:` in. If you specify the
|
||||
remote without a `:` then rclone will use a local directory of that
|
||||
name. So if you use a remote of `/path/to/secret/files` then rclone
|
||||
will encrypt stuff to that directory. If you use a remote of `name`
|
||||
then rclone will put files in a directory called `name` in the current
|
||||
directory.
|
||||
In normal use, ensure the remote has a `:` in. If specified without,
|
||||
rclone uses a local directory of that name. For example if a remote
|
||||
`/path/to/secret/files` is specified, rclone encrypts content to that
|
||||
directory. If a remote `name` is specified, rclone targets a directory
|
||||
`name` in the current directory.
|
||||
|
||||
If you specify the remote as `remote:path/to/dir` then rclone will
|
||||
store encrypted files in `path/to/dir` on the remote. If you are using
|
||||
file name encryption, then when you save files to
|
||||
`secret:subdir/subfile` this will store them in the unencrypted path
|
||||
`path/to/dir` but the `subdir/subpath` bit will be encrypted.
|
||||
|
||||
Note that unless you want encrypted bucket names (which are difficult
|
||||
to manage because you won't know what directory they represent in web
|
||||
interfaces etc), you should probably specify a bucket, eg
|
||||
`remote:secretbucket` when using bucket based remotes such as S3,
|
||||
Swift, Hubic, B2, GCS.
|
||||
If remote `remote:path/to/dir` is specified, rclone stores encrypted
|
||||
files in `path/to/dir` on the remote. With file name encryption, files
|
||||
saved to `secret:subdir/subfile` are stored in the unencrypted path
|
||||
`path/to/dir` but the `subdir/subpath` element is encrypted.
|
||||
|
||||
## Example ##
|
||||
|
||||
To test I made a little directory of files using "standard" file name
|
||||
Create the following file structure using "standard" file name
|
||||
encryption.
|
||||
|
||||
```
|
||||
@@ -152,7 +148,7 @@ plaintext/
|
||||
└── file4.txt
|
||||
```
|
||||
|
||||
Copy these to the remote and list them back
|
||||
Copy these to the remote, and list them
|
||||
|
||||
```
|
||||
$ rclone -q copy plaintext secret:
|
||||
@@ -164,7 +160,7 @@ $ rclone -q ls secret:
|
||||
9 subdir/file3.txt
|
||||
```
|
||||
|
||||
Now see what that looked like when encrypted
|
||||
The crypt remote looks like
|
||||
|
||||
```
|
||||
$ rclone -q ls remote:path
|
||||
@@ -175,7 +171,7 @@ $ rclone -q ls remote:path
|
||||
56 86vhrsv86mpbtd3a0akjuqslj8/8njh1sk437gttmep3p70g81aps
|
||||
```
|
||||
|
||||
Note that this retains the directory structure which means you can do this
|
||||
The directory structure is preserved
|
||||
|
||||
```
|
||||
$ rclone -q ls secret:subdir
|
||||
@@ -184,9 +180,9 @@ $ rclone -q ls secret:subdir
|
||||
10 subsubdir/file4.txt
|
||||
```
|
||||
|
||||
If don't use file name encryption then the remote will look like this
|
||||
- note the `.bin` extensions added to prevent the cloud provider
|
||||
attempting to interpret the data.
|
||||
Without file name encryption `.bin` extensions are added to underlying
|
||||
names. This prevents the cloud provider attempting to interpret file
|
||||
content.
|
||||
|
||||
```
|
||||
$ rclone -q ls remote:path
|
||||
@@ -199,8 +195,6 @@ $ rclone -q ls remote:path
|
||||
|
||||
### File name encryption modes ###
|
||||
|
||||
Here are some of the features of the file name encryption modes
|
||||
|
||||
Off
|
||||
|
||||
* doesn't hide file names or directory structure
|
||||
@@ -219,17 +213,19 @@ Standard
|
||||
Obfuscation
|
||||
|
||||
This is a simple "rotate" of the filename, with each file having a rot
|
||||
distance based on the filename. We store the distance at the beginning
|
||||
of the filename. So a file called "hello" may become "53.jgnnq".
|
||||
distance based on the filename. Rclone stores the distance at the
|
||||
beginning of the filename. A file called "hello" may become "53.jgnnq".
|
||||
|
||||
This is not a strong encryption of filenames, but it may stop automated
|
||||
scanning tools from picking up on filename patterns. As such it's an
|
||||
intermediate between "off" and "standard". The advantage is that it
|
||||
allows for longer path segment names.
|
||||
Obfuscation is not a strong encryption of filenames, but hinders
|
||||
automated scanning tools picking up on filename patterns. It is an
|
||||
intermediate between "off" and "standard" which allows for longer path
|
||||
segment names.
|
||||
|
||||
There is a possibility with some unicode based filenames that the
|
||||
obfuscation is weak and may map lower case characters to upper case
|
||||
equivalents. You can not rely on this for strong protection.
|
||||
equivalents.
|
||||
|
||||
Obfuscation cannot be relied upon for strong protection.
|
||||
|
||||
* file names very lightly obfuscated
|
||||
* file names can be longer than standard encryption
|
||||
@@ -237,13 +233,14 @@ equivalents. You can not rely on this for strong protection.
|
||||
* directory structure visible
|
||||
* identical files names will have identical uploaded names
|
||||
|
||||
Cloud storage systems have various limits on file name length and
|
||||
total path length which you are more likely to hit using "Standard"
|
||||
file name encryption. If you keep your file names to below 156
|
||||
characters in length then you should be OK on all providers.
|
||||
Cloud storage systems have limits on file name length and
|
||||
total path length which rclone is more likely to breach using
|
||||
"Standard" file name encryption. Where file names are less thn 156
|
||||
characters in length issues should not be encountered, irrespective of
|
||||
cloud storage provider.
|
||||
|
||||
There may be an even more secure file name encryption mode in the
|
||||
future which will address the long file name problem.
|
||||
An alternative, future rclone file name encryption mode may tolerate
|
||||
backend provider path length limits.
|
||||
|
||||
### Directory name encryption ###
|
||||
Crypt offers the option of encrypting dir names or leaving them intact.
|
||||
@@ -269,10 +266,10 @@ Example:
|
||||
Crypt stores modification times using the underlying remote so support
|
||||
depends on that.
|
||||
|
||||
Hashes are not stored for crypt. However the data integrity is
|
||||
Hashes are not stored for crypt. However the data integrity is
|
||||
protected by an extremely strong crypto authenticator.
|
||||
|
||||
Note that you should use the `rclone cryptcheck` command to check the
|
||||
Use the `rclone cryptcheck` command to check the
|
||||
integrity of a crypted remote instead of `rclone check` which can't
|
||||
check the checksums properly.
|
||||
|
||||
|
||||
@@ -757,6 +757,8 @@ This can be useful for tracking down problems with syncs in
|
||||
combination with the `-v` flag. See the [Logging section](#logging)
|
||||
for more info.
|
||||
|
||||
If FILE exists then rclone will append to it.
|
||||
|
||||
Note that if you are using the `logrotate` program to manage rclone's
|
||||
logs, then you should use the `copytruncate` option as rclone doesn't
|
||||
have a signal to rotate logs.
|
||||
@@ -1106,6 +1108,11 @@ Note: On Windows until [this bug](https://github.com/Azure/go-ansiterm/issues/26
|
||||
is fixed all non-ASCII characters will be replaced with `.` when
|
||||
`--progress` is in use.
|
||||
|
||||
### --progress-terminal-title ###
|
||||
|
||||
This flag, when used with `-P/--progress`, will print the string `ETA: %s`
|
||||
to the terminal title.
|
||||
|
||||
### -q, --quiet ###
|
||||
|
||||
This flag will limit rclone's output to error messages only.
|
||||
@@ -1251,11 +1258,17 @@ or with `--backup-dir`. See `--backup-dir` for more info.
|
||||
|
||||
For example
|
||||
|
||||
rclone sync -i /path/to/local/file remote:current --suffix .bak
|
||||
rclone copy -i /path/to/local/file remote:current --suffix .bak
|
||||
|
||||
will sync `/path/to/local` to `remote:current`, but for any files
|
||||
will copy `/path/to/local` to `remote:current`, but for any files
|
||||
which would have been updated or deleted have .bak added.
|
||||
|
||||
If using `rclone sync` with `--suffix` and without `--backup-dir` then
|
||||
it is recommended to put a filter rule in excluding the suffix
|
||||
otherwise the `sync` will delete the backup files.
|
||||
|
||||
rclone sync -i /path/to/local/file remote:current --suffix .bak --exclude "*.bak"
|
||||
|
||||
### --suffix-keep-extension ###
|
||||
|
||||
When using `--suffix`, setting this causes rclone put the SUFFIX
|
||||
|
||||
@@ -202,6 +202,39 @@ Impersonate this user when using a business account.
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --dropbox-shared-files
|
||||
|
||||
Instructs rclone to work on individual shared files.
|
||||
|
||||
In this mode rclone's features are extremely limited - only list (ls, lsl, etc.)
|
||||
operations and read operations (e.g. downloading) are supported in this mode.
|
||||
All other operations will be disabled.
|
||||
|
||||
- Config: shared_files
|
||||
- Env Var: RCLONE_DROPBOX_SHARED_FILES
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --dropbox-shared-folders
|
||||
|
||||
Instructs rclone to work on shared folders.
|
||||
|
||||
When this flag is used with no path only the List operation is supported and
|
||||
all available shared folders will be listed. If you specify a path the first part
|
||||
will be interpreted as the name of shared folder. Rclone will then try to mount this
|
||||
shared to the root namespace. On success shared folder rclone proceeds normally.
|
||||
The shared folder is now pretty much a normal folder and all normal operations
|
||||
are supported.
|
||||
|
||||
Note that we don't unmount the shared folder afterwards so the
|
||||
--dropbox-shared-folders can be omitted after the first use of a particular
|
||||
shared folder.
|
||||
|
||||
- Config: shared_folders
|
||||
- Env Var: RCLONE_DROPBOX_SHARED_FOLDERS
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --dropbox-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
|
||||
@@ -48,7 +48,7 @@ Choose a number from below, or type in your own value
|
||||
1 / Connect to ftp.example.com
|
||||
\ "ftp.example.com"
|
||||
host> ftp.example.com
|
||||
FTP username, leave blank for current username, ncw
|
||||
FTP username, leave blank for current username, $USER
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
user>
|
||||
FTP port, leave blank to use default (21)
|
||||
|
||||
@@ -205,7 +205,7 @@ or the latest version (equivalent to the beta) with
|
||||
These will build the binary in `$(go env GOPATH)/bin`
|
||||
(`~/go/bin/rclone` by default) after downloading the source to the go
|
||||
module cache. Note - do **not** use the `-u` flag here. This causes go
|
||||
to try to update the depencencies that rclone uses and sometimes these
|
||||
to try to update the dependencies that rclone uses and sometimes these
|
||||
don't work with the current version of rclone.
|
||||
|
||||
## Installation with Ansible ##
|
||||
|
||||
@@ -27,8 +27,8 @@ Note that the web interface may refer to this token as a JottaCli token.
|
||||
### Legacy Setup
|
||||
|
||||
If you are using one of the whitelabel versions (Elgiganten, Com Hem Cloud) you may not have the option
|
||||
to generate a CLI token. In this case you'll have to use the legacy authentification. To to this select
|
||||
yes when the setup asks for legacy authentification and enter your username and password.
|
||||
to generate a CLI token. In this case you'll have to use the legacy authentication. To to this select
|
||||
yes when the setup asks for legacy authentication and enter your username and password.
|
||||
The rest of the setup is identical to the default setup.
|
||||
|
||||
Here is an example of how to make a remote called `remote` with the default setup. First run:
|
||||
@@ -59,7 +59,7 @@ y) Yes
|
||||
n) No
|
||||
y/n> n
|
||||
Remote config
|
||||
Use legacy authentification?.
|
||||
Use legacy authentication?.
|
||||
This is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.
|
||||
y) Yes
|
||||
n) No (default)
|
||||
|
||||
@@ -402,7 +402,7 @@ If the server can't do `CleanUp` then `rclone cleanup` will return an
|
||||
error.
|
||||
|
||||
‡‡ Note that while Box implements this it has to delete every file
|
||||
idividually so it will be slower than emptying the trash via the WebUI
|
||||
individually so it will be slower than emptying the trash via the WebUI
|
||||
|
||||
### ListR ###
|
||||
|
||||
|
||||
@@ -259,7 +259,7 @@ Concurrency for multipart uploads.
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
NB if you set this to > 1 then the checksums of multpart uploads
|
||||
NB if you set this to > 1 then the checksums of multipart uploads
|
||||
become corrupted (the uploads themselves are not corrupted though).
|
||||
|
||||
If you are uploading small numbers of large file over high speed link
|
||||
|
||||
@@ -18,6 +18,7 @@ The S3 backend can be used with a number of different providers:
|
||||
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
|
||||
{{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}}
|
||||
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
||||
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
|
||||
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" end="true" >}}
|
||||
{{< /provider_list >}}
|
||||
|
||||
@@ -138,7 +139,7 @@ Choose a number from below, or type in your own value
|
||||
/ Asia Pacific (Mumbai)
|
||||
13 | Needs location constraint ap-south-1.
|
||||
\ "ap-south-1"
|
||||
/ Asia Patific (Hong Kong) Region
|
||||
/ Asia Pacific (Hong Kong) Region
|
||||
14 | Needs location constraint ap-east-1.
|
||||
\ "ap-east-1"
|
||||
/ South America (Sao Paulo) Region
|
||||
@@ -488,6 +489,8 @@ Choose your S3 provider.
|
||||
- StackPath Object Storage
|
||||
- "Wasabi"
|
||||
- Wasabi Object Storage
|
||||
- "TencentCOS"
|
||||
- Tencent Cloud Object Storage (COS)
|
||||
- "Other"
|
||||
- Any other S3 compatible provider
|
||||
|
||||
@@ -579,7 +582,7 @@ Region to connect to.
|
||||
- Asia Pacific (Mumbai)
|
||||
- Needs location constraint ap-south-1.
|
||||
- "ap-east-1"
|
||||
- Asia Patific (Hong Kong) Region
|
||||
- Asia Pacific (Hong Kong) Region
|
||||
- Needs location constraint ap-east-1.
|
||||
- "sa-east-1"
|
||||
- South America (Sao Paulo) Region
|
||||
@@ -1122,7 +1125,7 @@ The storage class to use when storing new objects in S3.
|
||||
|
||||
### Advanced Options
|
||||
|
||||
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)).
|
||||
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)).
|
||||
|
||||
#### --s3-bucket-acl
|
||||
|
||||
@@ -1490,7 +1493,7 @@ All the objects shown will be marked for restore, then
|
||||
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||
|
||||
It returns a list of status dictionaries with Remote and Status
|
||||
keys. The Status will be OK if it was successfull or an error message
|
||||
keys. The Status will be OK if it was successful or an error message
|
||||
if not.
|
||||
|
||||
[
|
||||
@@ -1791,7 +1794,7 @@ Choose a number from below, or type in your own value
|
||||
secret_access_key> <>
|
||||
```
|
||||
|
||||
6. Specify the endpoint for IBM COS. For Public IBM COS, choose from the option below. For On Premise IBM COS, enter an enpoint address.
|
||||
6. Specify the endpoint for IBM COS. For Public IBM COS, choose from the option below. For On Premise IBM COS, enter an endpoint address.
|
||||
```
|
||||
Endpoint for IBM COS S3 API.
|
||||
Specify if using an IBM COS On Premise.
|
||||
@@ -1852,7 +1855,7 @@ Choose a number from below, or type in your own value
|
||||
location_constraint>1
|
||||
```
|
||||
|
||||
9. Specify a canned ACL. IBM Cloud (Strorage) supports "public-read" and "private". IBM Cloud(Infra) supports all the canned ACLs. On-Premise COS supports all the canned ACLs.
|
||||
9. Specify a canned ACL. IBM Cloud (Storage) supports "public-read" and "private". IBM Cloud(Infra) supports all the canned ACLs. On-Premise COS supports all the canned ACLs.
|
||||
```
|
||||
Canned ACL used when creating buckets and/or storing objects in S3.
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
@@ -2212,6 +2215,138 @@ d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
### Tencent COS {#tencent-cos}
|
||||
|
||||
[Tencent Cloud Object Storage (COS)](https://intl.cloud.tencent.com/product/cos) is a distributed storage service offered by Tencent Cloud for unstructured data. It is secure, stable, massive, convenient, low-delay and low-cost.
|
||||
|
||||
To configure access to Tencent COS, follow the steps below:
|
||||
|
||||
1. Run `rclone config` and select `n` for a new remote.
|
||||
|
||||
```
|
||||
rclone config
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
```
|
||||
|
||||
2. Give the name of the configuration. For example, name it 'cos'.
|
||||
|
||||
```
|
||||
name> cos
|
||||
```
|
||||
|
||||
3. Select `s3` storage.
|
||||
|
||||
```
|
||||
Choose a number from below, or type in your own value
|
||||
1 / 1Fichier
|
||||
\ "fichier"
|
||||
2 / Alias for an existing remote
|
||||
\ "alias"
|
||||
3 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
4 / Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)
|
||||
\ "s3"
|
||||
[snip]
|
||||
Storage> s3
|
||||
```
|
||||
|
||||
4. Select `TencentCOS` provider.
|
||||
```
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Amazon Web Services (AWS) S3
|
||||
\ "AWS"
|
||||
[snip]
|
||||
11 / Tencent Cloud Object Storage (COS)
|
||||
\ "TencentCOS"
|
||||
[snip]
|
||||
provider> TencentCOS
|
||||
```
|
||||
|
||||
5. Enter your SecretId and SecretKey of Tencent Cloud.
|
||||
|
||||
```
|
||||
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
Only applies if access_key_id and secret_access_key is blank.
|
||||
Enter a boolean value (true or false). Press Enter for the default ("false").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Enter AWS credentials in the next step
|
||||
\ "false"
|
||||
2 / Get AWS credentials from the environment (env vars or IAM)
|
||||
\ "true"
|
||||
env_auth> 1
|
||||
AWS Access Key ID.
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
access_key_id> AKIDxxxxxxxxxx
|
||||
AWS Secret Access Key (password)
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
secret_access_key> xxxxxxxxxxx
|
||||
```
|
||||
|
||||
6. Select endpoint for Tencent COS. This is the standard endpoint for different region.
|
||||
|
||||
```
|
||||
1 / Beijing Region.
|
||||
\ "cos.ap-beijing.myqcloud.com"
|
||||
2 / Nanjing Region.
|
||||
\ "cos.ap-nanjing.myqcloud.com"
|
||||
3 / Shanghai Region.
|
||||
\ "cos.ap-shanghai.myqcloud.com"
|
||||
4 / Guangzhou Region.
|
||||
\ "cos.ap-guangzhou.myqcloud.com"
|
||||
[snip]
|
||||
endpoint> 4
|
||||
```
|
||||
|
||||
7. Choose acl and storage class.
|
||||
|
||||
```
|
||||
Note that this ACL is applied when server side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Owner gets Full_CONTROL. No one else has access rights (default).
|
||||
\ "default"
|
||||
[snip]
|
||||
acl> 1
|
||||
The storage class to use when storing new objects in Tencent COS.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Default
|
||||
\ ""
|
||||
[snip]
|
||||
storage_class> 1
|
||||
Edit advanced config? (y/n)
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
Remote config
|
||||
--------------------
|
||||
[cos]
|
||||
type = s3
|
||||
provider = TencentCOS
|
||||
env_auth = false
|
||||
access_key_id = xxx
|
||||
secret_access_key = xxx
|
||||
endpoint = cos.ap-guangzhou.myqcloud.com
|
||||
acl = default
|
||||
--------------------
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
Current remotes:
|
||||
|
||||
Name Type
|
||||
==== ====
|
||||
cos s3
|
||||
```
|
||||
|
||||
### Netease NOS ###
|
||||
|
||||
For Netease NOS configure as per the configurator `rclone config`
|
||||
|
||||
@@ -52,7 +52,7 @@ Choose a number from below, or type in your own value
|
||||
1 / Connect to example.com
|
||||
\ "example.com"
|
||||
host> example.com
|
||||
SSH username, leave blank for current username, ncw
|
||||
SSH username, leave blank for current username, $USER
|
||||
user> sftpuser
|
||||
SSH port, leave blank to use default (22)
|
||||
port>
|
||||
@@ -102,7 +102,7 @@ excess files in the directory.
|
||||
The SFTP remote supports three authentication methods:
|
||||
|
||||
* Password
|
||||
* Key file
|
||||
* Key file, including certificate signed keys
|
||||
* ssh-agent
|
||||
|
||||
Key files should be PEM-encoded private key files. For instance `/home/$USER/.ssh/id_rsa`.
|
||||
@@ -128,6 +128,77 @@ Using an ssh-agent is the only way to load encrypted OpenSSH keys at the moment.
|
||||
If you set the `--sftp-ask-password` option, rclone will prompt for a
|
||||
password when needed and no password has been configured.
|
||||
|
||||
If you have a certificate then you can provide the path to the public key that contains the certificate. For example:
|
||||
|
||||
```
|
||||
[remote]
|
||||
type = sftp
|
||||
host = example.com
|
||||
user = sftpuser
|
||||
key_file = ~/id_rsa
|
||||
pubkey_file = ~/id_rsa-cert.pub
|
||||
````
|
||||
|
||||
If you concatenate a cert with a private key then you can specify the
|
||||
merged file in both places.
|
||||
|
||||
Note: the cert must come first in the file. e.g.
|
||||
|
||||
```
|
||||
cat id_rsa-cert.pub id_rsa > merged_key
|
||||
```
|
||||
|
||||
### Host key validation ###
|
||||
|
||||
By default rclone will not check the server's host key for validation. This
|
||||
can allow an attacker to replace a server with their own and if you use
|
||||
password authentication then this can lead to that password being exposed.
|
||||
|
||||
Host key matching, using standard `known_hosts` files can be turned on by
|
||||
enabling the `known_hosts_file` option. This can point to the file maintained
|
||||
by `OpenSSH` or can point to a unique file.
|
||||
|
||||
e.g.
|
||||
|
||||
```
|
||||
[remote]
|
||||
type = sftp
|
||||
host = example.com
|
||||
user = sftpuser
|
||||
pass =
|
||||
known_hosts_file = ~/.ssh/known_hosts
|
||||
````
|
||||
|
||||
There are some limitations:
|
||||
|
||||
* `rclone` will not _manage_ this file for you. If the key is missing or
|
||||
wrong then the connection will be refused.
|
||||
* If the server is set up for a certificate host key then the entry in
|
||||
the `known_hosts` file _must_ be the `@cert-authority` entry for the CA
|
||||
* Unlike `OpenSSH`, the libraries used by `rclone` do not permit (at time
|
||||
of writing) multiple host keys to be listed for a server. Only the first
|
||||
entry is used.
|
||||
|
||||
If the host key provided by the server does not match the one in the
|
||||
file (or is missing) then the connection will be aborted and an error
|
||||
returned such as
|
||||
|
||||
NewFs: couldn't connect SSH: ssh: handshake failed: knownhosts: key mismatch
|
||||
|
||||
or
|
||||
|
||||
NewFs: couldn't connect SSH: ssh: handshake failed: knownhosts: key is unknown
|
||||
|
||||
If you see an error such as
|
||||
|
||||
NewFs: couldn't connect SSH: ssh: handshake failed: ssh: no authorities for hostname: example.com:22
|
||||
|
||||
then it is likely the server has presented a CA signed host certificate
|
||||
and you will need to add the appropriate `@cert-authority` entry.
|
||||
|
||||
The `known_hosts_file` setting can be set during `rclone config` as an
|
||||
advanced option.
|
||||
|
||||
### ssh-agent on macOS ###
|
||||
|
||||
Note that there seem to be various problems with using an ssh-agent on
|
||||
@@ -172,7 +243,7 @@ SSH host to connect to
|
||||
|
||||
#### --sftp-user
|
||||
|
||||
SSH username, leave blank for current username, ncw
|
||||
SSH username, leave blank for current username, $USER
|
||||
|
||||
- Config: user
|
||||
- Env Var: RCLONE_SFTP_USER
|
||||
@@ -234,6 +305,20 @@ in the new OpenSSH format can't be used.
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --sftp-pubkey-file
|
||||
|
||||
Optional path to public key file.
|
||||
|
||||
Set this if you have a signed certificate you want to use for authentication.
|
||||
|
||||
Leading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`.
|
||||
|
||||
|
||||
- Config: pubkey_file
|
||||
- Env Var: RCLONE_SFTP_PUBKEY_FILE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --sftp-key-use-agent
|
||||
|
||||
When set forces the usage of the ssh-agent.
|
||||
@@ -286,6 +371,23 @@ Leave blank or set to false to enable hashing (recommended), set to true to disa
|
||||
|
||||
Here are the advanced options specific to sftp (SSH/SFTP Connection).
|
||||
|
||||
#### --sftp-known-hosts-file
|
||||
|
||||
Optional path to known_hosts file.
|
||||
|
||||
Set this value to enable server host key validation.
|
||||
|
||||
Leading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`.
|
||||
|
||||
|
||||
- Config: known_hosts_file
|
||||
- Env Var: RCLONE_SFTP_KNOWN_HOSTS_FILE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Examples:
|
||||
- "~/.ssh/known_hosts"
|
||||
- Use OpenSSH's known_hosts file
|
||||
|
||||
#### --sftp-ask-password
|
||||
|
||||
Allow asking for SFTP password when needed.
|
||||
|
||||
@@ -65,7 +65,7 @@ d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
### Setup with API key and passhprase
|
||||
### Setup with API key and passphrase
|
||||
|
||||
```
|
||||
No remotes found - make a new one
|
||||
|
||||
@@ -26,6 +26,10 @@ var ErrorMaxTransferLimitReached = errors.New("Max transfer limit reached as set
|
||||
// transfer limit is reached.
|
||||
var ErrorMaxTransferLimitReachedFatal = fserrors.FatalError(ErrorMaxTransferLimitReached)
|
||||
|
||||
// ErrorMaxTransferLimitReachedGraceful is returned from operations.Copy when the max
|
||||
// transfer limit is reached and a graceful stop is required.
|
||||
var ErrorMaxTransferLimitReachedGraceful = fserrors.NoRetryError(ErrorMaxTransferLimitReached)
|
||||
|
||||
// Account limits and accounts for one transfer
|
||||
type Account struct {
|
||||
stats *StatsInfo
|
||||
@@ -446,7 +450,7 @@ func shortenName(in string, size int) string {
|
||||
return in
|
||||
}
|
||||
name := []rune(in)
|
||||
size-- // don't count elipsis rune
|
||||
size-- // don't count ellipsis rune
|
||||
suffixLength := size / 2
|
||||
prefixLength := size - suffixLength
|
||||
suffixStart := len(name) - suffixLength
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
// MaxCompletedTransfers specifies maximum number of completed transfers in startedTransfers list
|
||||
@@ -272,7 +273,7 @@ func (s *StatsInfo) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s\n",
|
||||
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s",
|
||||
dateString,
|
||||
fs.SizeSuffix(s.bytes),
|
||||
fs.SizeSuffix(totalSize).Unit("Bytes"),
|
||||
@@ -282,7 +283,13 @@ func (s *StatsInfo) String() string {
|
||||
xfrchkString,
|
||||
)
|
||||
|
||||
if fs.Config.ProgressTerminalTitle {
|
||||
// Writes ETA to the terminal title
|
||||
terminal.WriteTerminalTitle("ETA: " + etaString(currentSize, totalSize, speed))
|
||||
}
|
||||
|
||||
if !fs.Config.StatsOneLine {
|
||||
_, _ = buf.WriteRune('\n')
|
||||
errorDetails := ""
|
||||
switch {
|
||||
case s.fatalError:
|
||||
@@ -291,6 +298,7 @@ func (s *StatsInfo) String() string {
|
||||
errorDetails = " (retrying may help)"
|
||||
case s.errors != 0:
|
||||
errorDetails = " (no need to retry)"
|
||||
|
||||
}
|
||||
|
||||
// Add only non zero stats
|
||||
|
||||
@@ -366,6 +366,8 @@ func (sg *statsGroups) sum() *StatsInfo {
|
||||
sum.lastError = stats.lastError
|
||||
}
|
||||
sum.startedTransfers = append(sum.startedTransfers, stats.startedTransfers...)
|
||||
sum.oldDuration += stats.oldDuration
|
||||
sum.oldTimeRanges = append(sum.oldTimeRanges, stats.oldTimeRanges...)
|
||||
}
|
||||
stats.mu.RUnlock()
|
||||
}
|
||||
|
||||
@@ -4,8 +4,10 @@ import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestStatsGroupOperations(t *testing.T) {
|
||||
@@ -43,17 +45,26 @@ func TestStatsGroupOperations(t *testing.T) {
|
||||
t.Parallel()
|
||||
stats1 := NewStats()
|
||||
stats1.bytes = 5
|
||||
stats1.errors = 5
|
||||
stats1.errors = 6
|
||||
stats1.oldDuration = time.Second
|
||||
stats1.oldTimeRanges = []timeRange{{time.Now(), time.Now().Add(time.Second)}}
|
||||
stats2 := NewStats()
|
||||
stats2.bytes = 10
|
||||
stats2.errors = 12
|
||||
stats2.oldDuration = 2 * time.Second
|
||||
stats2.oldTimeRanges = []timeRange{{time.Now(), time.Now().Add(2 * time.Second)}}
|
||||
sg := newStatsGroups()
|
||||
sg.set("test1", stats1)
|
||||
sg.set("test2", stats2)
|
||||
sum := sg.sum()
|
||||
if sum.bytes != stats1.bytes+stats2.bytes {
|
||||
t.Fatalf("sum() => bytes %d, expected %d", sum.bytes, stats1.bytes+stats2.bytes)
|
||||
}
|
||||
if sum.errors != stats1.errors+stats2.errors {
|
||||
t.Fatalf("sum() => errors %d, expected %d", sum.errors, stats1.errors+stats2.errors)
|
||||
assert.Equal(t, stats1.bytes+stats2.bytes, sum.bytes)
|
||||
assert.Equal(t, stats1.errors+stats2.errors, sum.errors)
|
||||
assert.Equal(t, stats1.oldDuration+stats2.oldDuration, sum.oldDuration)
|
||||
// dict can iterate in either order
|
||||
a := timeRanges{stats1.oldTimeRanges[0], stats2.oldTimeRanges[0]}
|
||||
b := timeRanges{stats2.oldTimeRanges[0], stats1.oldTimeRanges[0]}
|
||||
if !assert.ObjectsAreEqual(a, sum.oldTimeRanges) {
|
||||
assert.Equal(t, b, sum.oldTimeRanges)
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -99,7 +99,7 @@ func StartTokenTicker() {
|
||||
}()
|
||||
}
|
||||
|
||||
// limitBandwith sleeps for the correct amount of time for the passage
|
||||
// limitBandwidth sleeps for the correct amount of time for the passage
|
||||
// of n bytes according to the current bandwidth limit
|
||||
func limitBandwidth(n int) {
|
||||
tokenBucketMu.Lock()
|
||||
|
||||
@@ -72,8 +72,16 @@ func (tm *transferMap) _sortedSlice() []*Transfer {
|
||||
for _, tr := range tm.items {
|
||||
s = append(s, tr)
|
||||
}
|
||||
// sort by time first and if equal by name. Note that the relatively
|
||||
// low time resolution on Windows can cause equal times.
|
||||
sort.Slice(s, func(i, j int) bool {
|
||||
return s[i].startedAt.Before(s[j].startedAt)
|
||||
a, b := s[i], s[j]
|
||||
if a.startedAt.Before(b.startedAt) {
|
||||
return true
|
||||
} else if !a.startedAt.Equal(b.startedAt) {
|
||||
return false
|
||||
}
|
||||
return a.remote < b.remote
|
||||
})
|
||||
return s
|
||||
}
|
||||
@@ -83,7 +91,7 @@ func (tm *transferMap) _sortedSlice() []*Transfer {
|
||||
func (tm *transferMap) String(progress *inProgress, exclude *transferMap) string {
|
||||
tm.mu.RLock()
|
||||
defer tm.mu.RUnlock()
|
||||
strngs := make([]string, 0, len(tm.items))
|
||||
stringList := make([]string, 0, len(tm.items))
|
||||
for _, tr := range tm._sortedSlice() {
|
||||
if exclude != nil {
|
||||
exclude.mu.RLock()
|
||||
@@ -103,9 +111,9 @@ func (tm *transferMap) String(progress *inProgress, exclude *transferMap) string
|
||||
tm.name,
|
||||
)
|
||||
}
|
||||
strngs = append(strngs, " * "+out)
|
||||
stringList = append(stringList, " * "+out)
|
||||
}
|
||||
return strings.Join(strngs, "\n")
|
||||
return strings.Join(stringList, "\n")
|
||||
}
|
||||
|
||||
// progress returns total bytes read as well as the size.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user