mirror of
https://github.com/rclone/rclone.git
synced 2026-01-31 16:53:44 +00:00
Compare commits
99 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2f0ef2e983 | ||
|
|
a8db0be891 | ||
|
|
c85438d34b | ||
|
|
a2fa1370c5 | ||
|
|
bed83b0b64 | ||
|
|
cf0bdad5de | ||
|
|
85d35ef03c | ||
|
|
514d10b314 | ||
|
|
5164c3d2d0 | ||
|
|
ffdd0719e7 | ||
|
|
4e2b5389d7 | ||
|
|
dc4e63631f | ||
|
|
275bf456d3 | ||
|
|
7dfa871095 | ||
|
|
70cc88de22 | ||
|
|
4bc0f46955 | ||
|
|
5b09599a23 | ||
|
|
f4dd8e3fe8 | ||
|
|
d0888edc0a | ||
|
|
51a230d7fd | ||
|
|
fc5b14b620 | ||
|
|
bbddadbd04 | ||
|
|
7428e47ebc | ||
|
|
72083c65ad | ||
|
|
70f92fd6b3 | ||
|
|
a86cedbc24 | ||
|
|
0906f8dd3b | ||
|
|
664213cedb | ||
|
|
75a7226174 | ||
|
|
9e925becb6 | ||
|
|
e3a5bb9b48 | ||
|
|
b7eeb0e260 | ||
|
|
84d64ddabc | ||
|
|
6c9f92aee6 | ||
|
|
893297760b | ||
|
|
c5c56cda02 | ||
|
|
2295123cad | ||
|
|
ff0280c0cb | ||
|
|
64d736a57b | ||
|
|
5f1d5a1897 | ||
|
|
aac2406e19 | ||
|
|
6dc28ef50a | ||
|
|
66def93373 | ||
|
|
c58023a9ba | ||
|
|
3edc9ff0b0 | ||
|
|
8e8ae1edc7 | ||
|
|
20b00db390 | ||
|
|
db4bbf9521 | ||
|
|
2b7994e739 | ||
|
|
e7fbdac8e0 | ||
|
|
41ec712aa9 | ||
|
|
17acae2b00 | ||
|
|
57261c7e97 | ||
|
|
d8239e0194 | ||
|
|
004c3796de | ||
|
|
18c7549770 | ||
|
|
e5190f14ce | ||
|
|
433b73a5a8 | ||
|
|
ab88a3341f | ||
|
|
181da3ce9b | ||
|
|
b14a58c9b8 | ||
|
|
60cc2cba1f | ||
|
|
c797494d88 | ||
|
|
e2a57182be | ||
|
|
8928441466 | ||
|
|
0e8965060f | ||
|
|
f3cf6fcdd7 | ||
|
|
18ccf0f871 | ||
|
|
313647bcf3 | ||
|
|
61fe068c90 | ||
|
|
5c49096e11 | ||
|
|
a73c78545d | ||
|
|
e0fd560711 | ||
|
|
6a56ac1032 | ||
|
|
96299629b4 | ||
|
|
75de30cfa8 | ||
|
|
233bed6a73 | ||
|
|
b3964efe4d | ||
|
|
575f061629 | ||
|
|
640d7d3b4e | ||
|
|
e92294b482 | ||
|
|
22937e8982 | ||
|
|
c3d1474eb9 | ||
|
|
e2426ea87b | ||
|
|
e58a61175f | ||
|
|
05bea46c3e | ||
|
|
c8a719ae0d | ||
|
|
c3884aafd9 | ||
|
|
0a9785a4ff | ||
|
|
8140f67092 | ||
|
|
4a001b8a02 | ||
|
|
525433e6dd | ||
|
|
f71f6c57d7 | ||
|
|
e35623c72e | ||
|
|
344bce7e2a | ||
|
|
3a4322a7ba | ||
|
|
27b9ae4fc3 | ||
|
|
7e2488af10 | ||
|
|
41ecb586c4 |
23
.github/workflows/build.yml
vendored
23
.github/workflows/build.yml
vendored
@@ -46,7 +46,6 @@ jobs:
|
||||
go: '1.15.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
build_args: '-buildmode exe'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
@@ -58,7 +57,6 @@ jobs:
|
||||
goarch: '386'
|
||||
cgo: '1'
|
||||
build_flags: '-include "^windows/386" -cgo'
|
||||
build_args: '-buildmode exe'
|
||||
quicktest: true
|
||||
deploy: true
|
||||
|
||||
@@ -109,11 +107,10 @@ jobs:
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
|
||||
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
|
||||
echo 'BUILD_ARGS=${{ matrix.build_args }}' >> $GITHUB_ENV
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
|
||||
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
|
||||
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
|
||||
|
||||
- name: Install Libraries on Linux
|
||||
shell: bash
|
||||
@@ -127,8 +124,10 @@ jobs:
|
||||
- name: Install Libraries on macOS
|
||||
shell: bash
|
||||
run: |
|
||||
brew untap local/homebrew-openssl # workaround for https://github.com/actions/virtual-environments/issues/1811
|
||||
brew untap local/homebrew-python2 # workaround for https://github.com/actions/virtual-environments/issues/1811
|
||||
brew update
|
||||
brew install --cask osxfuse
|
||||
brew cask install osxfuse
|
||||
if: matrix.os == 'macOS-latest'
|
||||
|
||||
- name: Install Libraries on Windows
|
||||
@@ -136,10 +135,10 @@ jobs:
|
||||
run: |
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
choco install -y winfsp zip
|
||||
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
||||
if ($env:GOARCH -eq "386") {
|
||||
choco install -y mingw --forcex86 --force
|
||||
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
||||
}
|
||||
# Copy mingw32-make.exe to make.exe so the same command line
|
||||
# can be used on Windows as on macOS and Linux
|
||||
@@ -226,8 +225,8 @@ jobs:
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'GOPATH=${{ runner.workspace }}' >> $GITHUB_ENV
|
||||
echo '${{ runner.workspace }}/bin' >> $GITHUB_PATH
|
||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
||||
echo '::add-path::${{ runner.workspace }}/bin'
|
||||
|
||||
- name: Cross-compile rclone
|
||||
run: |
|
||||
|
||||
@@ -15,7 +15,7 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
|
||||
with:
|
||||
tag: beta
|
||||
imageName: rclone/rclone
|
||||
|
||||
@@ -23,7 +23,7 @@ jobs:
|
||||
id: actual_major_version
|
||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
|
||||
with:
|
||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
imageName: rclone/rclone
|
||||
|
||||
@@ -86,7 +86,7 @@ git reset --soft HEAD~2 # This squashes the 2 latest commits together.
|
||||
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
|
||||
git commit # Add a new commit message.
|
||||
git push --force # Push the squashed commit to your GitHub repo.
|
||||
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also reccommends wizardzines.com
|
||||
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
|
||||
```
|
||||
|
||||
## CI for your fork ##
|
||||
|
||||
17564
MANUAL.html
generated
17564
MANUAL.html
generated
File diff suppressed because one or more lines are too long
16862
MANUAL.txt
generated
16862
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
16
Makefile
16
Makefile
@@ -46,13 +46,13 @@ endif
|
||||
.PHONY: rclone test_all vars version
|
||||
|
||||
rclone:
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
mkdir -p `go env GOPATH`/bin/
|
||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
||||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||
|
||||
test_all:
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
||||
|
||||
vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
@@ -188,10 +188,10 @@ upload_github:
|
||||
./bin/upload-github $(TAG)
|
||||
|
||||
cross: doc
|
||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
|
||||
|
||||
beta:
|
||||
go run bin/cross-compile.go $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
|
||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||
|
||||
@@ -199,23 +199,23 @@ log_since_last_release:
|
||||
git log $(LAST_TAG)..
|
||||
|
||||
compile_all:
|
||||
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
|
||||
|
||||
ci_upload:
|
||||
sudo chown -R $$USER build
|
||||
find build -type l -delete
|
||||
gzip -r9v build
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||
ifndef BRANCH_PATH
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||
|
||||
ci_beta:
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
@@ -21,7 +21,7 @@ This file describes how to make the various kinds of releases
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX.0"
|
||||
* make retag
|
||||
* git push --tags origin master
|
||||
* git push --follow-tags origin
|
||||
* # Wait for the GitHub builds to complete then...
|
||||
* make fetch_binaries
|
||||
* make tarball
|
||||
@@ -65,8 +65,9 @@ Now
|
||||
* git cherry-pick any fixes
|
||||
* Do the steps as above
|
||||
* make startstable
|
||||
* NB this overwrites the current beta so we need to do this
|
||||
* git co master
|
||||
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
|
||||
* # cherry pick the changes to the changelog
|
||||
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||
* git push
|
||||
|
||||
@@ -1245,15 +1245,15 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
blob := o.getBlobReference()
|
||||
ac := azblob.BlobAccessConditions{}
|
||||
var dowloadResponse *azblob.DownloadResponse
|
||||
var downloadResponse *azblob.DownloadResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
dowloadResponse, err = blob.Download(ctx, offset, count, ac, false)
|
||||
downloadResponse, err = blob.Download(ctx, offset, count, ac, false)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open for download")
|
||||
}
|
||||
in = dowloadResponse.Body(azblob.RetryReaderOptions{})
|
||||
in = downloadResponse.Body(azblob.RetryReaderOptions{})
|
||||
return in, nil
|
||||
}
|
||||
|
||||
@@ -1475,7 +1475,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
|
||||
// is merged the SDK can't upload a single blob of exactly the chunk
|
||||
// size, so upload with a multpart upload to work around.
|
||||
// size, so upload with a multipart upload to work around.
|
||||
// See: https://github.com/rclone/rclone/issues/2653
|
||||
multipartUpload := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
if size == int64(o.fs.opt.ChunkSize) {
|
||||
|
||||
@@ -1013,7 +1013,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return info.SharedLink.URL, err
|
||||
}
|
||||
|
||||
// deletePermanently permenently deletes a trashed file
|
||||
// deletePermanently permanently deletes a trashed file
|
||||
func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// multpart upload for box
|
||||
// multipart upload for box
|
||||
|
||||
package box
|
||||
|
||||
|
||||
@@ -121,8 +121,6 @@ const maxTransactionProbes = 100
|
||||
// standard chunker errors
|
||||
var (
|
||||
ErrChunkOverflow = errors.New("chunk number overflow")
|
||||
ErrMetaTooBig = errors.New("metadata is too big")
|
||||
ErrMetaUnknown = errors.New("unknown metadata, please upgrade rclone")
|
||||
)
|
||||
|
||||
// variants of baseMove's parameter delMode
|
||||
@@ -695,47 +693,43 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
||||
switch entry := dirOrObject.(type) {
|
||||
case fs.Object:
|
||||
remote := entry.Remote()
|
||||
mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(remote)
|
||||
if mainRemote == "" {
|
||||
// this is meta object or standalone file
|
||||
object := f.newObject("", entry, nil)
|
||||
byRemote[remote] = object
|
||||
tempEntries = append(tempEntries, object)
|
||||
break
|
||||
}
|
||||
// this is some kind of chunk
|
||||
// metobject should have been created above if present
|
||||
isSpecial := xactID != "" || ctrlType != ""
|
||||
mainObject := byRemote[mainRemote]
|
||||
if mainObject == nil && f.useMeta && !isSpecial {
|
||||
fs.Debugf(f, "skip orphan data chunk %q", remote)
|
||||
break
|
||||
}
|
||||
if mainObject == nil && !f.useMeta {
|
||||
// this is the "nometa" case
|
||||
// create dummy chunked object without metadata
|
||||
mainObject = f.newObject(mainRemote, nil, nil)
|
||||
byRemote[mainRemote] = mainObject
|
||||
if !badEntry[mainRemote] {
|
||||
tempEntries = append(tempEntries, mainObject)
|
||||
if mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(remote); mainRemote != "" {
|
||||
if xactID != "" {
|
||||
if revealHidden {
|
||||
fs.Infof(f, "ignore temporary chunk %q", remote)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if isSpecial {
|
||||
if revealHidden {
|
||||
fs.Infof(f, "ignore non-data chunk %q", remote)
|
||||
if ctrlType != "" {
|
||||
if revealHidden {
|
||||
fs.Infof(f, "ignore control chunk %q", remote)
|
||||
}
|
||||
break
|
||||
}
|
||||
// need to read metadata to ensure actual object type
|
||||
if f.useMeta && mainObject != nil && mainObject.size <= maxMetadataSize {
|
||||
mainObject.unsure = true
|
||||
mainObject := byRemote[mainRemote]
|
||||
if mainObject == nil && f.useMeta {
|
||||
fs.Debugf(f, "skip chunk %q without meta object", remote)
|
||||
break
|
||||
}
|
||||
if mainObject == nil {
|
||||
// useMeta is false - create chunked object without metadata
|
||||
mainObject = f.newObject(mainRemote, nil, nil)
|
||||
byRemote[mainRemote] = mainObject
|
||||
if !badEntry[mainRemote] {
|
||||
tempEntries = append(tempEntries, mainObject)
|
||||
}
|
||||
}
|
||||
if err := mainObject.addChunk(entry, chunkNo); err != nil {
|
||||
if f.opt.FailHard {
|
||||
return nil, err
|
||||
}
|
||||
badEntry[mainRemote] = true
|
||||
}
|
||||
break
|
||||
}
|
||||
if err := mainObject.addChunk(entry, chunkNo); err != nil {
|
||||
if f.opt.FailHard {
|
||||
return nil, err
|
||||
}
|
||||
badEntry[mainRemote] = true
|
||||
}
|
||||
object := f.newObject("", entry, nil)
|
||||
byRemote[remote] = object
|
||||
tempEntries = append(tempEntries, object)
|
||||
case fs.Directory:
|
||||
isSubdir[entry.Remote()] = true
|
||||
wrapDir := fs.NewDirCopy(ctx, entry)
|
||||
@@ -790,13 +784,6 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
||||
// but opening even a small file can be slow on some backends.
|
||||
//
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.scanObject(ctx, remote, false)
|
||||
}
|
||||
|
||||
// scanObject is like NewObject with optional quick scan mode.
|
||||
// The quick mode avoids directory requests other than `List`,
|
||||
// ignores non-chunked objects and skips chunk size checks.
|
||||
func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.Object, error) {
|
||||
if err := f.forbidChunk(false, remote); err != nil {
|
||||
return nil, errors.Wrap(err, "can't access")
|
||||
}
|
||||
@@ -857,15 +844,8 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
||||
continue // bypass regexp to save cpu
|
||||
}
|
||||
mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(entryRemote)
|
||||
if mainRemote == "" || mainRemote != remote {
|
||||
continue // skip non-conforming chunks
|
||||
}
|
||||
if ctrlType != "" || xactID != "" {
|
||||
if f.useMeta {
|
||||
// temporary/control chunk calls for lazy metadata read
|
||||
o.unsure = true
|
||||
}
|
||||
continue
|
||||
if mainRemote == "" || mainRemote != remote || ctrlType != "" || xactID != "" {
|
||||
continue // skip non-conforming, temporary and control chunks
|
||||
}
|
||||
//fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
|
||||
if err := o.addChunk(entry, chunkNo); err != nil {
|
||||
@@ -875,7 +855,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
||||
|
||||
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
|
||||
// Scanning hasn't found data chunks with conforming names.
|
||||
if f.useMeta || quickScan {
|
||||
if f.useMeta {
|
||||
// Metadata is required but absent and there are no chunks.
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
@@ -898,10 +878,8 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
||||
// file without metadata. Validate it and update the total data size.
|
||||
// As an optimization, skip metadata reading here - we will call
|
||||
// readMetadata lazily when needed (reading can be expensive).
|
||||
if !quickScan {
|
||||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
@@ -910,24 +888,13 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
if o.isFull {
|
||||
return nil
|
||||
}
|
||||
if !o.f.useMeta || (!o.isComposite() && !o.unsure) {
|
||||
if !o.isComposite() || !o.f.useMeta {
|
||||
o.isFull = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// validate metadata
|
||||
metaObject := o.main
|
||||
if metaObject.Size() > maxMetadataSize {
|
||||
if o.unsure {
|
||||
// this is not metadata but a foreign object
|
||||
o.unsure = false
|
||||
o.chunks = nil // make isComposite return false
|
||||
o.isFull = true // cache results
|
||||
return nil
|
||||
}
|
||||
return ErrMetaTooBig
|
||||
}
|
||||
|
||||
reader, err := metaObject.Open(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -940,22 +907,8 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
|
||||
switch o.f.opt.MetaFormat {
|
||||
case "simplejson":
|
||||
metaInfo, madeByChunker, err := unmarshalSimpleJSON(ctx, metaObject, metadata)
|
||||
if o.unsure {
|
||||
o.unsure = false
|
||||
if !madeByChunker {
|
||||
// this is not metadata but a foreign object
|
||||
o.chunks = nil // make isComposite return false
|
||||
o.isFull = true // cache results
|
||||
return nil
|
||||
}
|
||||
}
|
||||
switch err {
|
||||
case nil:
|
||||
// fall thru
|
||||
case ErrMetaTooBig, ErrMetaUnknown:
|
||||
return err // return these errors unwrapped for unit tests
|
||||
default:
|
||||
metaInfo, err := unmarshalSimpleJSON(ctx, metaObject, metadata, true)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "invalid metadata")
|
||||
}
|
||||
if o.size != metaInfo.Size() || len(o.chunks) != metaInfo.nChunks {
|
||||
@@ -970,27 +923,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// put implements Put, PutStream, PutUnchecked, Update
|
||||
func (f *Fs) put(
|
||||
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
|
||||
basePut putFn, action string, target fs.Object) (obj fs.Object, err error) {
|
||||
|
||||
if err := f.forbidChunk(src, remote); err != nil {
|
||||
return nil, errors.Wrap(err, action+" refused")
|
||||
}
|
||||
if target == nil {
|
||||
// Get target object with a quick directory scan
|
||||
if obj, err := f.scanObject(ctx, remote, true); err == nil {
|
||||
target = obj
|
||||
}
|
||||
}
|
||||
if target != nil {
|
||||
obj := target.(*Object)
|
||||
if err := obj.readMetadata(ctx); err == ErrMetaUnknown {
|
||||
// refuse to update a file of unsupported format
|
||||
return nil, errors.Wrap(err, "refusing to "+action)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption, basePut putFn) (obj fs.Object, err error) {
|
||||
c := f.newChunkingReader(src)
|
||||
wrapIn := c.wrapStream(ctx, in, src)
|
||||
|
||||
@@ -1080,8 +1013,8 @@ func (f *Fs) put(
|
||||
// Check for input that looks like valid metadata
|
||||
needMeta := len(c.chunks) > 1
|
||||
if c.readCount <= maxMetadataSize && len(c.chunks) == 1 {
|
||||
_, madeByChunker, _ := unmarshalSimpleJSON(ctx, c.chunks[0], c.smallHead)
|
||||
needMeta = madeByChunker
|
||||
_, err := unmarshalSimpleJSON(ctx, c.chunks[0], c.smallHead, false)
|
||||
needMeta = err == nil
|
||||
}
|
||||
|
||||
// Finalize small object as non-chunked.
|
||||
@@ -1328,16 +1261,29 @@ func (f *Fs) removeOldChunks(ctx context.Context, remote string) {
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.put(ctx, in, src, src.Remote(), options, f.base.Put, "put", nil)
|
||||
if err := f.forbidChunk(src, src.Remote()); err != nil {
|
||||
return nil, errors.Wrap(err, "refusing to put")
|
||||
}
|
||||
return f.put(ctx, in, src, src.Remote(), options, f.base.Put)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.put(ctx, in, src, src.Remote(), options, f.base.Features().PutStream, "upload", nil)
|
||||
if err := f.forbidChunk(src, src.Remote()); err != nil {
|
||||
return nil, errors.Wrap(err, "refusing to upload")
|
||||
}
|
||||
return f.put(ctx, in, src, src.Remote(), options, f.base.Features().PutStream)
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
|
||||
return errors.Wrap(err, "update refused")
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
// refuse to update a file of unsupported format
|
||||
return errors.Wrap(err, "refusing to update")
|
||||
}
|
||||
basePut := o.f.base.Put
|
||||
if src.Size() < 0 {
|
||||
basePut = o.f.base.Features().PutStream
|
||||
@@ -1345,7 +1291,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return errors.New("wrapped file system does not support streaming uploads")
|
||||
}
|
||||
}
|
||||
oNew, err := o.f.put(ctx, in, src, o.Remote(), options, basePut, "update", o)
|
||||
oNew, err := o.f.put(ctx, in, src, o.Remote(), options, basePut)
|
||||
if err == nil {
|
||||
*o = *oNew.(*Object)
|
||||
}
|
||||
@@ -1459,7 +1405,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
// to corrupt file in hard mode. Hence, refuse to Remove, too.
|
||||
return errors.Wrap(err, "refuse to corrupt")
|
||||
}
|
||||
if err := o.readMetadata(ctx); err == ErrMetaUnknown {
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
// Proceed but warn user that unexpected things can happen.
|
||||
fs.Errorf(o, "Removing a file with unsupported metadata: %v", err)
|
||||
}
|
||||
@@ -1487,11 +1433,6 @@ func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMo
|
||||
if err := f.forbidChunk(o, remote); err != nil {
|
||||
return nil, errors.Wrapf(err, "can't %s", opName)
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
// Refuse to copy/move composite files with invalid or future
|
||||
// metadata format which might involve unsupported chunk types.
|
||||
return nil, errors.Wrapf(err, "can't %s this file", opName)
|
||||
}
|
||||
if !o.isComposite() {
|
||||
fs.Debugf(o, "%s non-chunked object...", opName)
|
||||
oResult, err := do(ctx, o.mainChunk(), remote) // chain operation to a single wrapped chunk
|
||||
@@ -1500,6 +1441,11 @@ func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMo
|
||||
}
|
||||
return f.newObject("", oResult, nil), nil
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
// Refuse to copy/move composite files with invalid or future
|
||||
// metadata format which might involve unsupported chunk types.
|
||||
return nil, errors.Wrapf(err, "can't %s this file", opName)
|
||||
}
|
||||
|
||||
fs.Debugf(o, "%s %d data chunks...", opName, len(o.chunks))
|
||||
mainRemote := o.remote
|
||||
@@ -1590,10 +1536,6 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
|
||||
return
|
||||
}
|
||||
|
||||
if obj.unsure {
|
||||
// ensure object is composite if need to re-read metadata
|
||||
_ = obj.readMetadata(ctx)
|
||||
}
|
||||
requireMetaHash := obj.isComposite() && f.opt.MetaFormat == "simplejson"
|
||||
if !requireMetaHash && !f.hashAll {
|
||||
ok = true // hash is not required for metadata
|
||||
@@ -1777,7 +1719,6 @@ type Object struct {
|
||||
chunks []fs.Object // active data chunks if file is composite, or wrapped file as a single chunk if meta format is 'none'
|
||||
size int64 // cached total size of chunks in a composite file or -1 for non-chunked files
|
||||
isFull bool // true if metadata has been read
|
||||
unsure bool // true if need to read metadata to detect object type
|
||||
md5 string
|
||||
sha1 string
|
||||
f *Fs
|
||||
@@ -1928,16 +1869,15 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
||||
// on the level of wrapped remote but chunker is unaware of that.
|
||||
//
|
||||
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
return "", err // valid metadata is required to get hash, abort
|
||||
}
|
||||
if !o.isComposite() {
|
||||
// First, chain to the wrapped non-chunked file if possible.
|
||||
if value, err := o.mainChunk().Hash(ctx, hashType); err == nil && value != "" {
|
||||
return value, nil
|
||||
}
|
||||
}
|
||||
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
return "", err // valid metadata is required to get hash, abort
|
||||
}
|
||||
// Try hash from metadata if the file is composite or if wrapped remote fails.
|
||||
switch hashType {
|
||||
case hash.MD5:
|
||||
@@ -1962,13 +1902,13 @@ func (o *Object) UnWrap() fs.Object {
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
if !o.isComposite() {
|
||||
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
// refuse to open unsupported format
|
||||
return nil, errors.Wrap(err, "can't open")
|
||||
}
|
||||
if !o.isComposite() {
|
||||
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
|
||||
}
|
||||
|
||||
var openOptions []fs.OpenOption
|
||||
var offset, limit int64 = 0, -1
|
||||
@@ -2241,57 +2181,57 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1 s
|
||||
// handled by current implementation.
|
||||
// The version check below will then explicitly ask user to upgrade rclone.
|
||||
//
|
||||
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
|
||||
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte, strictChecks bool) (info *ObjectInfo, err error) {
|
||||
// Be strict about JSON format
|
||||
// to reduce possibility that a random small file resembles metadata.
|
||||
if data != nil && len(data) > maxMetadataSize {
|
||||
return nil, false, ErrMetaTooBig
|
||||
return nil, errors.New("too big")
|
||||
}
|
||||
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
return nil, false, errors.New("invalid json")
|
||||
return nil, errors.New("invalid json")
|
||||
}
|
||||
var metadata metaSimpleJSON
|
||||
err = json.Unmarshal(data, &metadata)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
// Basic fields are strictly required
|
||||
// to reduce possibility that a random small file resembles metadata.
|
||||
if metadata.Version == nil || metadata.Size == nil || metadata.ChunkNum == nil {
|
||||
return nil, false, errors.New("missing required field")
|
||||
return nil, errors.New("missing required field")
|
||||
}
|
||||
// Perform strict checks, avoid corruption of future metadata formats.
|
||||
if *metadata.Version < 1 {
|
||||
return nil, false, errors.New("wrong version")
|
||||
return nil, errors.New("wrong version")
|
||||
}
|
||||
if *metadata.Size < 0 {
|
||||
return nil, false, errors.New("negative file size")
|
||||
return nil, errors.New("negative file size")
|
||||
}
|
||||
if *metadata.ChunkNum < 0 {
|
||||
return nil, false, errors.New("negative number of chunks")
|
||||
return nil, errors.New("negative number of chunks")
|
||||
}
|
||||
if *metadata.ChunkNum > maxSafeChunkNumber {
|
||||
return nil, true, ErrChunkOverflow // produced by incompatible version of rclone
|
||||
return nil, ErrChunkOverflow
|
||||
}
|
||||
if metadata.MD5 != "" {
|
||||
_, err = hex.DecodeString(metadata.MD5)
|
||||
if len(metadata.MD5) != 32 || err != nil {
|
||||
return nil, false, errors.New("wrong md5 hash")
|
||||
return nil, errors.New("wrong md5 hash")
|
||||
}
|
||||
}
|
||||
if metadata.SHA1 != "" {
|
||||
_, err = hex.DecodeString(metadata.SHA1)
|
||||
if len(metadata.SHA1) != 40 || err != nil {
|
||||
return nil, false, errors.New("wrong sha1 hash")
|
||||
return nil, errors.New("wrong sha1 hash")
|
||||
}
|
||||
}
|
||||
// ChunkNum is allowed to be 0 in future versions
|
||||
if *metadata.ChunkNum < 1 && *metadata.Version <= metadataVersion {
|
||||
return nil, false, errors.New("wrong number of chunks")
|
||||
return nil, errors.New("wrong number of chunks")
|
||||
}
|
||||
// Non-strict mode also accepts future metadata versions
|
||||
if *metadata.Version > metadataVersion {
|
||||
return nil, true, ErrMetaUnknown // produced by incompatible version of rclone
|
||||
if *metadata.Version > metadataVersion && strictChecks {
|
||||
return nil, fmt.Errorf("version %d is not supported, please upgrade rclone", metadata.Version)
|
||||
}
|
||||
|
||||
var nilFs *Fs // nil object triggers appropriate type method
|
||||
@@ -2299,7 +2239,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
|
||||
info.nChunks = *metadata.ChunkNum
|
||||
info.md5 = metadata.MD5
|
||||
info.sha1 = metadata.SHA1
|
||||
return info, true, nil
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func silentlyRemove(ctx context.Context, o fs.Object) {
|
||||
|
||||
@@ -147,7 +147,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
||||
// If salt is "" we use a fixed salt just to make attackers lives
|
||||
// slighty harder than using no salt.
|
||||
//
|
||||
// Note that empty passsword makes all 0x00 keys which is used in the
|
||||
// Note that empty password makes all 0x00 keys which is used in the
|
||||
// tests.
|
||||
func (c *Cipher) Key(password, salt string) (err error) {
|
||||
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
|
||||
@@ -633,11 +633,8 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
// possibly err != nil here, but we will process the
|
||||
// data and the next call to ReadFull will return 0, err
|
||||
// Write nonce to start of block
|
||||
copy(fh.buf, fh.nonce[:])
|
||||
// Encrypt the block using the nonce
|
||||
block := fh.buf
|
||||
secretbox.Seal(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
fh.bufIndex = 0
|
||||
fh.bufSize = blockHeaderSize + n
|
||||
fh.nonce.increment()
|
||||
@@ -782,8 +779,7 @@ func (fh *decrypter) fillBuffer() (err error) {
|
||||
return ErrorEncryptedFileBadHeader
|
||||
}
|
||||
// Decrypt the block using the nonce
|
||||
block := fh.buf
|
||||
_, ok := secretbox.Open(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
if !ok {
|
||||
if err != nil {
|
||||
return err // return pending error as it is likely more accurate
|
||||
|
||||
@@ -159,7 +159,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
||||
}
|
||||
// Make sure to remove trailing . reffering to the current dir
|
||||
// Make sure to remove trailing . referring to the current dir
|
||||
if path.Base(rpath) == "." {
|
||||
rpath = strings.TrimSuffix(rpath, ".")
|
||||
}
|
||||
|
||||
@@ -87,7 +87,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
||||
}
|
||||
|
||||
// wrap the object in a crypt for upload using the nonce we
|
||||
// saved from the encryptor
|
||||
// saved from the encrypter
|
||||
src := f.newObjectInfo(oi, nonce)
|
||||
|
||||
// Test ObjectInfo methods
|
||||
|
||||
@@ -35,6 +35,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
@@ -470,6 +471,21 @@ Note that this detection is relying on error message strings which
|
||||
Google don't document so it may break in the future.
|
||||
|
||||
See: https://github.com/rclone/rclone/issues/3857
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "stop_on_download_limit",
|
||||
Default: false,
|
||||
Help: `Make download limit errors be fatal
|
||||
|
||||
At the time of writing it is only possible to download 10TB of data from
|
||||
Google Drive a day (this is an undocumented limit). When this limit is
|
||||
reached Google Drive produces a slightly different error message. When
|
||||
this flag is set it causes these errors to be fatal. These will stop
|
||||
the in-progress sync.
|
||||
|
||||
Note that this detection is relying on error message strings which
|
||||
Google don't document so it may break in the future.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -539,6 +555,7 @@ type Options struct {
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
DisableHTTP2 bool `config:"disable_http2"`
|
||||
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
|
||||
StopOnDownloadLimit bool `config:"stop_on_download_limit"`
|
||||
SkipShortcuts bool `config:"skip_shortcuts"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
@@ -638,6 +655,9 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
return false, fserrors.FatalError(err)
|
||||
}
|
||||
return true, err
|
||||
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
|
||||
fs.Errorf(f, "Received download limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
||||
fs.Errorf(f, "Received team drive file limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
@@ -2025,10 +2045,10 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
||||
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
@@ -2959,6 +2979,38 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
|
||||
return f.unTrash(ctx, dir, directoryID, true)
|
||||
}
|
||||
|
||||
// copy file with id to dest
|
||||
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
info, err := f.getFile(id, f.fileFields)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't find id")
|
||||
}
|
||||
if info.MimeType == driveFolderType {
|
||||
return errors.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
|
||||
}
|
||||
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
||||
o, err := f.newObjectWithInfo(info.Name, info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
destDir, destLeaf, err := fspath.Split(dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if destLeaf == "" {
|
||||
destLeaf = info.Name
|
||||
}
|
||||
dstFs, err := cache.Get(destDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "copy failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "get",
|
||||
Short: "Get command for fetching the drive config parameters",
|
||||
@@ -3059,6 +3111,29 @@ Result:
|
||||
"Errors": 0
|
||||
}
|
||||
`,
|
||||
}, {
|
||||
Name: "copyid",
|
||||
Short: "Copy files by ID",
|
||||
Long: `This command copies files by ID
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend copyid drive: ID path
|
||||
rclone backend copyid drive: ID1 path1 ID2 path2
|
||||
|
||||
It copies the drive file with ID given to the path (an rclone path which
|
||||
will be passed internally to rclone copyto). The ID and path pairs can be
|
||||
repeated.
|
||||
|
||||
The path should end with a / to indicate copy the file as named to
|
||||
this directory. If it doesn't end with a / then the last path
|
||||
component will be used as the file name.
|
||||
|
||||
If the destination is a drive backend then server side copying will be
|
||||
attempted if possible.
|
||||
|
||||
Use the -i flag to see what would be copied before copying.
|
||||
`,
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
@@ -3130,6 +3205,19 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
dir = arg[0]
|
||||
}
|
||||
return f.unTrashDir(ctx, dir, true)
|
||||
case "copyid":
|
||||
if len(arg)%2 != 0 {
|
||||
return nil, errors.New("need an even number of arguments")
|
||||
}
|
||||
for len(arg) > 0 {
|
||||
id, dest := arg[0], arg[1]
|
||||
arg = arg[2:]
|
||||
err = f.copyID(ctx, id, dest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed copying %q to %q", id, dest)
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -272,14 +274,15 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// from fstest/fstests/fstests.go
|
||||
existingDir = "hello? sausage"
|
||||
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
|
||||
existingSubDir = "êé"
|
||||
)
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts
|
||||
func (f *Fs) InternalTestShortcuts(t *testing.T) {
|
||||
const (
|
||||
// from fstest/fstests/fstests.go
|
||||
existingDir = "hello? sausage"
|
||||
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
|
||||
existingSubDir = "êé"
|
||||
)
|
||||
ctx := context.Background()
|
||||
srcObj, err := f.NewObject(ctx, existingFile)
|
||||
require.NoError(t, err)
|
||||
@@ -408,6 +411,55 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
||||
require.NoError(t, f.Purge(ctx, "trashDir"))
|
||||
}
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
|
||||
func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
obj, err := f.NewObject(ctx, existingFile)
|
||||
require.NoError(t, err)
|
||||
o := obj.(*Object)
|
||||
|
||||
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(dir)
|
||||
}()
|
||||
|
||||
checkFile := func(name string) {
|
||||
filePath := filepath.Join(dir, name)
|
||||
fi, err := os.Stat(filePath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(100), fi.Size())
|
||||
err = os.Remove(filePath)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
t.Run("BadID", func(t *testing.T) {
|
||||
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "couldn't find id")
|
||||
})
|
||||
|
||||
t.Run("Directory", func(t *testing.T) {
|
||||
rootID, err := f.dirCache.RootID(ctx, false)
|
||||
require.NoError(t, err)
|
||||
err = f.copyID(ctx, rootID, dir+"/")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "can't copy directory")
|
||||
})
|
||||
|
||||
t.Run("WithoutDestName", func(t *testing.T) {
|
||||
err = f.copyID(ctx, o.id, dir+"/")
|
||||
require.NoError(t, err)
|
||||
checkFile(path.Base(existingFile))
|
||||
})
|
||||
|
||||
t.Run("WithDestName", func(t *testing.T) {
|
||||
err = f.copyID(ctx, o.id, dir+"/potato.txt")
|
||||
require.NoError(t, err)
|
||||
checkFile("potato.txt")
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// These tests all depend on each other so run them as nested tests
|
||||
t.Run("DocumentImport", func(t *testing.T) {
|
||||
@@ -424,6 +476,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
})
|
||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||
t.Run("CopyID", f.InternalTestCopyID)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -142,6 +142,31 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||
Help: "Impersonate this user when using a business account.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "shared_files",
|
||||
Help: `Instructs rclone to work on individual shared files.
|
||||
|
||||
In this mode rclone's features are extremely limited - only list (ls, lsl, etc.)
|
||||
operations and read operations (e.g. downloading) are supported in this mode.
|
||||
All other operations will be disabled.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "shared_folders",
|
||||
Help: `Instructs rclone to work on shared folders.
|
||||
|
||||
When this flag is used with no path only the List operation is supported and
|
||||
all available shared folders will be listed. If you specify a path the first part
|
||||
will be interpreted as the name of shared folder. Rclone will then try to mount this
|
||||
shared to the root namespace. On success shared folder rclone proceeds normally.
|
||||
The shared folder is now pretty much a normal folder and all normal operations
|
||||
are supported.
|
||||
|
||||
Note that we don't unmount the shared folder afterwards so the
|
||||
--dropbox-shared-folders can be omitted after the first use of a particular
|
||||
shared folder.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -161,9 +186,11 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
SharedFiles bool `config:"shared_files"`
|
||||
SharedFolders bool `config:"shared_folders"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
@@ -186,7 +213,9 @@ type Fs struct {
|
||||
//
|
||||
// Dropbox Objects always have full metadata
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
fs *Fs // what this object is part of
|
||||
id string
|
||||
url string
|
||||
remote string // The remote path
|
||||
bytes int64 // size of the object
|
||||
modTime time.Time // time it was last modified
|
||||
@@ -332,8 +361,60 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
f.setRoot(root)
|
||||
})
|
||||
|
||||
// do not fill features yet
|
||||
if f.opt.SharedFiles {
|
||||
f.setRoot(root)
|
||||
if f.root == "" {
|
||||
return f, nil
|
||||
}
|
||||
_, err := f.findSharedFile(f.root)
|
||||
f.root = ""
|
||||
if err == nil {
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
if f.opt.SharedFolders {
|
||||
f.setRoot(root)
|
||||
if f.root == "" {
|
||||
return f, nil // our root it empty so we probably want to list shared folders
|
||||
}
|
||||
|
||||
dir := path.Dir(f.root)
|
||||
if dir == "." {
|
||||
dir = f.root
|
||||
}
|
||||
|
||||
// root is not empty so we have find the right shared folder if it exists
|
||||
id, err := f.findSharedFolder(dir)
|
||||
if err != nil {
|
||||
// if we didn't find the specified shared folder we have to bail out here
|
||||
return nil, err
|
||||
}
|
||||
// we found the specified shared folder so let's mount it
|
||||
// this will add it to the users normal root namespace and allows us
|
||||
// to actually perform operations on it using the normal api endpoints.
|
||||
err = f.mountSharedFolder(id)
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
case sharing.MountFolderAPIError:
|
||||
if e.EndpointError == nil || (e.EndpointError != nil && e.EndpointError.Tag != sharing.MountFolderErrorAlreadyMounted) {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
// if the moint failed we have to abort here
|
||||
}
|
||||
// if the mount succeeded it's now a normal folder in the users root namespace
|
||||
// we disable shared folder mode and proceed normally
|
||||
f.opt.SharedFolders = false
|
||||
}
|
||||
|
||||
f.features.Fill(f)
|
||||
|
||||
// If root starts with / then use the actual root
|
||||
if strings.HasPrefix(root, "/") {
|
||||
@@ -355,6 +436,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
fs.Debugf(f, "Using root namespace %q", f.ns)
|
||||
}
|
||||
f.setRoot(root)
|
||||
|
||||
// See if the root is actually an object
|
||||
_, err = f.getFileMetadata(f.slashRoot)
|
||||
@@ -465,9 +547,150 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
if f.opt.SharedFiles {
|
||||
return f.findSharedFile(remote)
|
||||
}
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// listSharedFoldersApi lists all available shared folders mounted and not mounted
|
||||
// we'll need the id later so we have to return them in original format
|
||||
func (f *Fs) listSharedFolders() (entries fs.DirEntries, err error) {
|
||||
started := false
|
||||
var res *sharing.ListFoldersResult
|
||||
for {
|
||||
if !started {
|
||||
arg := sharing.ListFoldersArgs{
|
||||
Limit: 100,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.sharing.ListFolders(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
started = true
|
||||
} else {
|
||||
arg := sharing.ListFoldersContinueArg{
|
||||
Cursor: res.Cursor,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.sharing.ListFoldersContinue(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list continue")
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
leaf := f.opt.Enc.ToStandardName(entry.Name)
|
||||
d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
|
||||
entries = append(entries, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if res.Cursor == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// findSharedFolder find the id for a given shared folder name
|
||||
// somewhat annoyingly there is no endpoint to query a shared folder by it's name
|
||||
// so our only option is to iterate over all shared folders
|
||||
func (f *Fs) findSharedFolder(name string) (id string, err error) {
|
||||
entries, err := f.listSharedFolders()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if entry.(*fs.Dir).Remote() == name {
|
||||
return entry.(*fs.Dir).ID(), nil
|
||||
}
|
||||
}
|
||||
return "", fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
// mountSharedFolders mount a shared folder to the root namespace
|
||||
func (f *Fs) mountSharedFolder(id string) error {
|
||||
arg := sharing.MountFolderArg{
|
||||
SharedFolderId: id,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.sharing.MountFolder(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// listSharedFolders lists shared the user as access to (note this means individual
|
||||
// files not files contained in shared folders)
|
||||
func (f *Fs) listReceivedFiles() (entries fs.DirEntries, err error) {
|
||||
started := false
|
||||
var res *sharing.ListFilesResult
|
||||
for {
|
||||
if !started {
|
||||
arg := sharing.ListFilesArg{
|
||||
Limit: 100,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.sharing.ListReceivedFiles(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
started = true
|
||||
} else {
|
||||
arg := sharing.ListFilesContinueArg{
|
||||
Cursor: res.Cursor,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.sharing.ListReceivedFilesContinue(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list continue")
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
fmt.Printf("%+v\n", entry)
|
||||
entryPath := entry.Name
|
||||
o := &Object{
|
||||
fs: f,
|
||||
url: entry.PreviewUrl,
|
||||
remote: entryPath,
|
||||
modTime: entry.TimeInvited,
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
if res.Cursor == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) findSharedFile(name string) (o *Object, err error) {
|
||||
files, err := f.listReceivedFiles()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, entry := range files {
|
||||
if entry.(*Object).remote == name {
|
||||
return entry.(*Object), nil
|
||||
}
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
@@ -478,6 +701,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if f.opt.SharedFiles {
|
||||
return f.listReceivedFiles()
|
||||
}
|
||||
if f.opt.SharedFolders {
|
||||
return f.listSharedFolders()
|
||||
}
|
||||
|
||||
root := f.slashRoot
|
||||
if dir != "" {
|
||||
root += "/" + dir
|
||||
@@ -541,7 +771,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
|
||||
remote := path.Join(dir, leaf)
|
||||
if folderInfo != nil {
|
||||
d := fs.NewDir(remote, time.Now())
|
||||
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
|
||||
entries = append(entries, d)
|
||||
} else if fileInfo != nil {
|
||||
o, err := f.newObjectWithInfo(remote, fileInfo)
|
||||
@@ -564,6 +794,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if f.opt.SharedFiles || f.opt.SharedFolders {
|
||||
return nil, fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
@@ -579,6 +812,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
if f.opt.SharedFiles || f.opt.SharedFolders {
|
||||
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
root := path.Join(f.slashRoot, dir)
|
||||
|
||||
// can't create or run metadata on root
|
||||
@@ -656,6 +892,9 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
if f.opt.SharedFiles || f.opt.SharedFolders {
|
||||
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
@@ -927,8 +1166,16 @@ func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ID returns the object id
|
||||
func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
// Hash returns the dropbox special hash
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
|
||||
return "", fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
if t != DbHashType {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -946,8 +1193,9 @@ func (o *Object) Size() int64 {
|
||||
|
||||
// setMetadataFromEntry sets the fs data from a files.FileMetadata
|
||||
//
|
||||
// This isn't a complete set of metadata and has an inacurate date
|
||||
// This isn't a complete set of metadata and has an inaccurate date
|
||||
func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
|
||||
o.id = info.Id
|
||||
o.bytes = int64(info.Size)
|
||||
o.modTime = info.ClientModified
|
||||
o.hash = info.ContentHash
|
||||
@@ -1016,10 +1264,27 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.fs.opt.SharedFiles {
|
||||
if len(options) != 0 {
|
||||
return nil, errors.New("OpenOptions not supported for shared files")
|
||||
}
|
||||
arg := sharing.GetSharedLinkMetadataArg{
|
||||
Url: o.url,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, in, err = o.fs.sharing.GetSharedLinkFile(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
arg := files.DownloadArg{
|
||||
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
|
||||
Path: o.id,
|
||||
ExtraHeaders: headers,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1153,6 +1418,9 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
|
||||
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
remote := o.remotePath()
|
||||
if ignoredFiles.MatchString(remote) {
|
||||
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
||||
@@ -1181,6 +1449,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
|
||||
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
|
||||
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
|
||||
@@ -1201,4 +1472,5 @@ var (
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -306,10 +306,10 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
||||
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"crypto/tls"
|
||||
"io"
|
||||
"net/textproto"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -22,10 +21,15 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
var (
|
||||
currentUser = env.CurrentUser()
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -42,7 +46,7 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
||||
Help: "FTP username, leave blank for current username, " + currentUser,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21)",
|
||||
@@ -311,7 +315,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
}
|
||||
user := opt.User
|
||||
if user == "" {
|
||||
user = os.Getenv("USER")
|
||||
user = currentUser
|
||||
}
|
||||
port := opt.Port
|
||||
if port == "" {
|
||||
|
||||
@@ -841,27 +841,20 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, nil)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
rewriteRequest.DestinationPredefinedAcl(f.opt.ObjectACL)
|
||||
}
|
||||
var rewriteResponse *storage.RewriteResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var newObject *storage.Object
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
|
||||
}
|
||||
if rewriteResponse.Done {
|
||||
break
|
||||
}
|
||||
rewriteRequest.RewriteToken(rewriteResponse.RewriteToken)
|
||||
fs.Debugf(dstObj, "Continuing rewrite %d bytes done", rewriteResponse.TotalBytesRewritten)
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Set the metadata for the new object while we have it
|
||||
dstObj.setMetaData(rewriteResponse.Resource)
|
||||
dstObj.setMetaData(newObject)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -115,7 +115,7 @@ func TestIntegration(t *testing.T) {
|
||||
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
||||
})
|
||||
|
||||
// Check it is there in the date/month/year heirachy
|
||||
// Check it is there in the date/month/year hierarchy
|
||||
// 2013-07-13 is the creation date of the folder
|
||||
checkPresent := func(t *testing.T, objPath string) {
|
||||
entries, err := f.List(ctx, objPath)
|
||||
|
||||
@@ -4,7 +4,7 @@ package hubic
|
||||
|
||||
// This uses the normal swift mechanism to update the credentials and
|
||||
// ignores the expires field returned by the Hubic API. This may need
|
||||
// to be revisted after some actual experience.
|
||||
// to be revisited after some actual experience.
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
@@ -153,9 +153,9 @@ type CustomerInfo struct {
|
||||
AccountType string `json:"account_type"`
|
||||
SubscriptionType string `json:"subscription_type"`
|
||||
Usage int64 `json:"usage"`
|
||||
Qouta int64 `json:"quota"`
|
||||
Quota int64 `json:"quota"`
|
||||
BusinessUsage int64 `json:"business_usage"`
|
||||
BusinessQouta int64 `json:"business_quota"`
|
||||
BusinessQuota int64 `json:"business_quota"`
|
||||
WriteLocked bool `json:"write_locked"`
|
||||
ReadLocked bool `json:"read_locked"`
|
||||
LockedCause interface{} `json:"locked_cause"`
|
||||
@@ -386,7 +386,7 @@ type Error struct {
|
||||
Cause string `xml:"cause"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("error %d", e.StatusCode)
|
||||
if e.Message != "" {
|
||||
|
||||
@@ -107,7 +107,7 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Use legacy authentification?.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n")
|
||||
fmt.Printf("Use legacy authentication?.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n")
|
||||
if config.Confirm(false) {
|
||||
v1config(ctx, name, m)
|
||||
} else {
|
||||
@@ -230,7 +230,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// v1config configure a jottacloud backend using legacy authentification
|
||||
// v1config configure a jottacloud backend using legacy authentication
|
||||
func v1config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
|
||||
@@ -323,7 +323,7 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
|
||||
return deviceRegistration, err
|
||||
}
|
||||
|
||||
// doAuthV1 runs the actual token request for V1 authentification
|
||||
// doAuthV1 runs the actual token request for V1 authentication
|
||||
func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
|
||||
// prepare out token request with username and password
|
||||
values := url.Values{}
|
||||
@@ -365,7 +365,7 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
|
||||
return token, err
|
||||
}
|
||||
|
||||
// v2config configure a jottacloud backend using the modern JottaCli token based authentification
|
||||
// v2config configure a jottacloud backend using the modern JottaCli token based authentication
|
||||
func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
|
||||
@@ -405,7 +405,7 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
m.Set("configVersion", strconv.Itoa(configVersion))
|
||||
}
|
||||
|
||||
// doAuthV2 runs the actual token request for V2 authentification
|
||||
// doAuthV2 runs the actual token request for V2 authentication
|
||||
func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
|
||||
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
|
||||
if err != nil {
|
||||
@@ -730,9 +730,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
if err == fs.ErrorNotAFile {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
||||
@@ -1469,8 +1466,6 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
size := src.Size()
|
||||
md5String, err := src.Hash(ctx, hash.MD5)
|
||||
if err != nil || md5String == "" {
|
||||
@@ -1517,7 +1512,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
// If the file state is INCOMPLETE and CORRPUT, try to upload a then
|
||||
// If the file state is INCOMPLETE and CORRUPT, try to upload a then
|
||||
if response.State != "COMPLETED" {
|
||||
// how much do we still have to upload?
|
||||
remainingBytes := size - response.ResumePos
|
||||
|
||||
@@ -192,7 +192,7 @@ This option must not be used by an ordinary user. It is intended only to
|
||||
facilitate remote troubleshooting of backend issues. Strict meaning of
|
||||
flags is not documented and not guaranteed to persist between releases.
|
||||
Quirks will be removed when the backend grows stable.
|
||||
Supported quirks: atomicmkdir binlist gzip insecure retry400`,
|
||||
Supported quirks: atomicmkdir binlist`,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -238,9 +238,6 @@ func shouldRetry(res *http.Response, err error, f *Fs, opts *rest.Opts) (bool, e
|
||||
reAuthErr := f.reAuthorize(opts, err)
|
||||
return reAuthErr == nil, err // return an original error
|
||||
}
|
||||
if res != nil && res.StatusCode == 400 && f.quirks.retry400 {
|
||||
return true, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
|
||||
}
|
||||
|
||||
@@ -276,7 +273,7 @@ type Fs struct {
|
||||
root string // root path
|
||||
opt Options // parsed options
|
||||
speedupGlobs []string // list of file name patterns eligible for speedup
|
||||
speedupAny bool // true if all file names are aligible for speedup
|
||||
speedupAny bool // true if all file names are eligible for speedup
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // REST API client
|
||||
cli *http.Client // underlying HTTP client (for authorize)
|
||||
@@ -342,7 +339,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if opt.UserAgent != "" {
|
||||
clientConfig.UserAgent = opt.UserAgent
|
||||
}
|
||||
clientConfig.NoGzip = !f.quirks.gzip // Send not "Accept-Encoding: gzip" like official client
|
||||
clientConfig.NoGzip = true // Mimic official client, skip sending "Accept-Encoding: gzip"
|
||||
f.cli = fshttp.NewClient(&clientConfig)
|
||||
|
||||
f.srv = rest.NewClient(f.cli)
|
||||
@@ -350,12 +347,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.srv.SetHeader("Accept", "*/*") // Send "Accept: */*" with every request like official client
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
if f.quirks.insecure {
|
||||
transport := f.cli.Transport.(*fshttp.Transport).Transport
|
||||
transport.TLSClientConfig.InsecureSkipVerify = true
|
||||
transport.ProxyConnectHeader = http.Header{"User-Agent": {clientConfig.UserAgent}}
|
||||
}
|
||||
|
||||
if err = f.authorize(ctx, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -388,30 +379,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Internal maintenance flags (to be removed when the backend matures).
|
||||
// Primarily intended to facilitate remote support and troubleshooting.
|
||||
type quirks struct {
|
||||
gzip bool
|
||||
insecure bool
|
||||
binlist bool
|
||||
atomicmkdir bool
|
||||
retry400 bool
|
||||
}
|
||||
|
||||
func (q *quirks) parseQuirks(option string) {
|
||||
for _, flag := range strings.Split(option, ",") {
|
||||
switch strings.ToLower(strings.TrimSpace(flag)) {
|
||||
case "gzip":
|
||||
// This backend mimics the official client which never sends the
|
||||
// "Accept-Encoding: gzip" header. However, enabling compression
|
||||
// might be good for performance.
|
||||
// Use this quirk to investigate the performance impact.
|
||||
// Remove this quirk if performance does not improve.
|
||||
q.gzip = true
|
||||
case "insecure":
|
||||
// The mailru disk-o protocol is not documented. To compare HTTP
|
||||
// stream against the official client one can use Telerik Fiddler,
|
||||
// which introduces a self-signed certificate. This quirk forces
|
||||
// the Go http layer to accept it.
|
||||
// Remove this quirk when the backend reaches maturity.
|
||||
q.insecure = true
|
||||
case "binlist":
|
||||
// The official client sometimes uses a so called "bin" protocol,
|
||||
// implemented in the listBin file system method below. This method
|
||||
@@ -424,18 +398,11 @@ func (q *quirks) parseQuirks(option string) {
|
||||
case "atomicmkdir":
|
||||
// At the moment rclone requires Mkdir to return success if the
|
||||
// directory already exists. However, such programs as borgbackup
|
||||
// or restic use mkdir as a locking primitive and depend on its
|
||||
// atomicity. This quirk is a workaround. It can be removed
|
||||
// when the above issue is investigated.
|
||||
// use mkdir as a locking primitive and depend on its atomicity.
|
||||
// Remove this quirk when the above issue is investigated.
|
||||
q.atomicmkdir = true
|
||||
case "retry400":
|
||||
// This quirk will help in troubleshooting a very rare "Error 400"
|
||||
// issue. It can be removed if the problem does not show up
|
||||
// for a year or so. See the below issue:
|
||||
// https://github.com/ivandeex/rclone/issues/14
|
||||
q.retry400 = true
|
||||
default:
|
||||
// Just ignore all unknown flags
|
||||
// Ignore unknown flags
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2247,7 +2214,7 @@ func (e *endHandler) handle(err error) error {
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
// serverPool backs server dispacher
|
||||
// serverPool backs server dispatcher
|
||||
type serverPool struct {
|
||||
pool pendingServerMap
|
||||
mu sync.Mutex
|
||||
|
||||
@@ -221,7 +221,7 @@ func (f *Fs) setRoot(root string) {
|
||||
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
|
||||
@@ -254,7 +254,7 @@ type MoveItemRequest struct {
|
||||
//Always Type:view and Scope:anonymous for public sharing
|
||||
type CreateShareLinkRequest struct {
|
||||
Type string `json:"type"` //Link type in View, Edit or Embed
|
||||
Scope string `json:"scope,omitempty"` //Optional. Scope in anonymousi, organization
|
||||
Scope string `json:"scope,omitempty"` //Optional. Scope in anonymous, organization
|
||||
}
|
||||
|
||||
//CreateShareLinkResponse is the response from CreateShareLinkRequest
|
||||
|
||||
@@ -104,9 +104,8 @@ type ItemResult struct {
|
||||
|
||||
// Hashes contains the supported hashes
|
||||
type Hashes struct {
|
||||
SHA1 string `json:"sha1"`
|
||||
MD5 string `json:"md5"`
|
||||
SHA256 string `json:"sha256"`
|
||||
SHA1 string `json:"sha1"`
|
||||
MD5 string `json:"md5"`
|
||||
}
|
||||
|
||||
// UploadFileResponse is the response from /uploadfile
|
||||
|
||||
@@ -885,13 +885,6 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
// EU region supports SHA1 and SHA256 (but rclone doesn't
|
||||
// support SHA256 yet).
|
||||
//
|
||||
// https://forum.rclone.org/t/pcloud-to-local-no-hashes-in-common/19440
|
||||
if f.opt.Hostname == "eapi.pcloud.com" {
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
return hash.Set(hash.MD5 | hash.SHA1)
|
||||
}
|
||||
|
||||
@@ -1132,7 +1125,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Special treatment for a 0 length upload. This doesn't work
|
||||
// with PUT even with Content-Length set (by setting
|
||||
// opts.Body=0), so upload it as a multpart form POST with
|
||||
// opts.Body=0), so upload it as a multipart form POST with
|
||||
// Content-Length set.
|
||||
if size == 0 {
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(in, opts.Parameters, "content", leaf)
|
||||
|
||||
@@ -236,10 +236,10 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
|
||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
||||
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
|
||||
@@ -115,7 +115,7 @@ func (o *Object) MimeType(ctx context.Context) string {
|
||||
|
||||
// setMetadataFromEntry sets the fs data from a putio.File
|
||||
//
|
||||
// This isn't a complete set of metadata and has an inacurate date
|
||||
// This isn't a complete set of metadata and has an inaccurate date
|
||||
func (o *Object) setMetadataFromEntry(info putio.File) error {
|
||||
o.file = &info
|
||||
o.modtime = info.UpdatedAt.Time
|
||||
|
||||
@@ -104,7 +104,7 @@ enough memory, then increasing this will speed up the transfers.`,
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
NB if you set this to > 1 then the checksums of multpart uploads
|
||||
NB if you set this to > 1 then the checksums of multipart uploads
|
||||
become corrupted (the uploads themselves are not corrupted though).
|
||||
|
||||
If you are uploading small numbers of large file over high speed link
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
@@ -1203,6 +1204,19 @@ This option controls how often unused buffers will be removed from the pool.`,
|
||||
Default: memoryPoolUseMmap,
|
||||
Advanced: true,
|
||||
Help: `Whether to use mmap buffers in internal memory pool.`,
|
||||
}, {
|
||||
Name: "disable_http2",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Help: `Disable usage of http2 for S3 backends
|
||||
|
||||
There is currently an unsolved issue with the s3 (specifically minio) backend
|
||||
and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be
|
||||
disabled here. When the issue is solved this flag will be removed.
|
||||
|
||||
See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631
|
||||
|
||||
`,
|
||||
},
|
||||
}})
|
||||
}
|
||||
@@ -1260,6 +1274,7 @@ type Options struct {
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
DisableHTTP2 bool `config:"disable_http2"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@@ -1381,6 +1396,19 @@ func (o *Object) split() (bucket, bucketPath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
// getClient makes an http client according to the options
|
||||
func getClient(opt *Options) *http.Client {
|
||||
// TODO: Do we need cookies too?
|
||||
t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) {
|
||||
if opt.DisableHTTP2 {
|
||||
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
|
||||
}
|
||||
})
|
||||
return &http.Client{
|
||||
Transport: t,
|
||||
}
|
||||
}
|
||||
|
||||
// s3Connection makes a connection to s3
|
||||
func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
// Make the auth
|
||||
@@ -1391,6 +1419,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
}
|
||||
|
||||
lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
|
||||
|
||||
def := defaults.Get()
|
||||
def.Config.HTTPClient = lowTimeoutClient
|
||||
|
||||
@@ -1459,7 +1488,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
awsConfig := aws.NewConfig().
|
||||
WithMaxRetries(0). // Rely on rclone's retry logic
|
||||
WithCredentials(cred).
|
||||
WithHTTPClient(fshttp.NewClient(fs.Config)).
|
||||
WithHTTPClient(getClient(opt)).
|
||||
WithS3ForcePathStyle(opt.ForcePathStyle).
|
||||
WithS3UseAccelerate(opt.UseAccelerateEndpoint).
|
||||
WithS3UsEast1RegionalEndpoint(endpoints.RegionalS3UsEast1Endpoint)
|
||||
@@ -1494,7 +1523,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
if req.Config.Credentials == credentials.AnonymousCredentials {
|
||||
return
|
||||
}
|
||||
sign(v.AccessKeyID, v.SecretAccessKey, req.HTTPRequest)
|
||||
v2sign(opt, req.HTTPRequest)
|
||||
}
|
||||
c.Handlers.Sign.Clear()
|
||||
c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
|
||||
@@ -1573,7 +1602,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ses: ses,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
cache: bucket.NewCache(),
|
||||
srv: fshttp.NewClient(fs.Config),
|
||||
srv: getClient(opt),
|
||||
pool: pool.New(
|
||||
time.Duration(opt.MemoryPoolFlushTime),
|
||||
int(opt.ChunkSize),
|
||||
@@ -2335,7 +2364,7 @@ All the objects shown will be marked for restore, then
|
||||
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||
|
||||
It returns a list of status dictionaries with Remote and Status
|
||||
keys. The Status will be OK if it was successfull or an error message
|
||||
keys. The Status will be OK if it was successful or an error message
|
||||
if not.
|
||||
|
||||
[
|
||||
@@ -2500,7 +2529,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
|
||||
//
|
||||
// Note that rather lazily we treat key as a prefix so it matches
|
||||
// directories and objects. This could suprise the user if they ask
|
||||
// directories and objects. This could surprise the user if they ask
|
||||
// for "dir" and it returns "dirKey"
|
||||
func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uploads []*s3.MultipartUpload, err error) {
|
||||
var (
|
||||
@@ -3033,7 +3062,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// read the md5sum if available
|
||||
// - for non multpart
|
||||
// - for non multipart
|
||||
// - so we can add a ContentMD5
|
||||
// - for multipart provided checksums aren't disabled
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash
|
||||
@@ -3173,6 +3202,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
err = o.readMetaData(ctx)
|
||||
// Empty an Etag which is a valid md5sum for multipart
|
||||
// uploads. This works around a bug in KS3 where the ETag is a
|
||||
// correctly formed md5sum for multpart uploads
|
||||
if multipart && matchMd5.MatchString(strings.Trim(strings.ToLower(o.etag), `"`)) {
|
||||
o.etag = ""
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -9,7 +9,10 @@ import (
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// URL parameters that need to be added to the signature
|
||||
@@ -33,12 +36,20 @@ var s3ParamsToSign = map[string]struct{}{
|
||||
"response-cache-control": {},
|
||||
"response-content-disposition": {},
|
||||
"response-content-encoding": {},
|
||||
"lifecycle": {},
|
||||
"website": {},
|
||||
"delete": {},
|
||||
"cors": {},
|
||||
"restore": {},
|
||||
}
|
||||
|
||||
// Warn once about empty endpoint
|
||||
var warnEmptyEndpointOnce sync.Once
|
||||
|
||||
// sign signs requests using v2 auth
|
||||
//
|
||||
// Cobbled together from goamz and aws-sdk-go
|
||||
func sign(AccessKey, SecretKey string, req *http.Request) {
|
||||
func v2sign(opt *Options, req *http.Request) {
|
||||
// Set date
|
||||
date := time.Now().UTC().Format(time.RFC1123)
|
||||
req.Header.Set("Date", date)
|
||||
@@ -48,11 +59,26 @@ func sign(AccessKey, SecretKey string, req *http.Request) {
|
||||
if uri == "" {
|
||||
uri = "/"
|
||||
}
|
||||
// If not using path style then need to stick the bucket on
|
||||
// the start of the requests if doing a bucket based query
|
||||
if !opt.ForcePathStyle {
|
||||
if opt.Endpoint == "" {
|
||||
warnEmptyEndpointOnce.Do(func() {
|
||||
fs.Logf(nil, "If using v2 auth with AWS and force_path_style=false, endpoint must be set in the config")
|
||||
})
|
||||
} else if req.URL.Host != opt.Endpoint {
|
||||
// read the bucket off the start of the hostname
|
||||
i := strings.IndexRune(req.URL.Host, '.')
|
||||
if i >= 0 {
|
||||
uri = "/" + req.URL.Host[:i] + uri
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Look through headers of interest
|
||||
var md5 string
|
||||
var contentType string
|
||||
var headersToSign []string
|
||||
var headersToSign [][2]string // slice of key, value pairs
|
||||
for k, v := range req.Header {
|
||||
k = strings.ToLower(k)
|
||||
switch k {
|
||||
@@ -63,15 +89,26 @@ func sign(AccessKey, SecretKey string, req *http.Request) {
|
||||
default:
|
||||
if strings.HasPrefix(k, "x-amz-") {
|
||||
vall := strings.Join(v, ",")
|
||||
headersToSign = append(headersToSign, k+":"+vall)
|
||||
headersToSign = append(headersToSign, [2]string{k, vall})
|
||||
}
|
||||
}
|
||||
}
|
||||
// Make headers of interest into canonical string
|
||||
var joinedHeadersToSign string
|
||||
if len(headersToSign) > 0 {
|
||||
sort.StringSlice(headersToSign).Sort()
|
||||
joinedHeadersToSign = strings.Join(headersToSign, "\n") + "\n"
|
||||
// sort by keys
|
||||
sort.Slice(headersToSign, func(i, j int) bool {
|
||||
return headersToSign[i][0] < headersToSign[j][0]
|
||||
})
|
||||
// join into key:value\n
|
||||
var out strings.Builder
|
||||
for _, kv := range headersToSign {
|
||||
out.WriteString(kv[0])
|
||||
out.WriteRune(':')
|
||||
out.WriteString(kv[1])
|
||||
out.WriteRune('\n')
|
||||
}
|
||||
joinedHeadersToSign = out.String()
|
||||
}
|
||||
|
||||
// Look for query parameters which need to be added to the signature
|
||||
@@ -96,11 +133,11 @@ func sign(AccessKey, SecretKey string, req *http.Request) {
|
||||
|
||||
// Make signature
|
||||
payload := req.Method + "\n" + md5 + "\n" + contentType + "\n" + date + "\n" + joinedHeadersToSign + uri
|
||||
hash := hmac.New(sha1.New, []byte(SecretKey))
|
||||
hash := hmac.New(sha1.New, []byte(opt.SecretAccessKey))
|
||||
_, _ = hash.Write([]byte(payload))
|
||||
signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size()))
|
||||
base64.StdEncoding.Encode(signature, hash.Sum(nil))
|
||||
|
||||
// Set signature in request
|
||||
req.Header.Set("Authorization", "AWS "+AccessKey+":"+string(signature))
|
||||
req.Header.Set("Authorization", "AWS "+opt.AccessKeyID+":"+string(signature))
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/user"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
@@ -33,6 +32,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
sshagent "github.com/xanzy/ssh-agent"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/crypto/ssh/knownhosts"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -43,7 +43,7 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
currentUser = readCurrentUser()
|
||||
currentUser = env.CurrentUser()
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -82,6 +82,21 @@ func init() {
|
||||
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
||||
in the new OpenSSH format can't be used.`,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "pubkey_file",
|
||||
Help: `Optional path to public key file.
|
||||
|
||||
Set this if you have a signed certificate you want to use for authentication.` + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "known_hosts_file",
|
||||
Help: `Optional path to known_hosts file.
|
||||
|
||||
Set this value to enable server host key validation.` + env.ShellExpandHelp,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "~/.ssh/known_hosts",
|
||||
Help: "Use OpenSSH's known_hosts file",
|
||||
}},
|
||||
}, {
|
||||
Name: "key_use_agent",
|
||||
Help: `When set forces the usage of the ssh-agent.
|
||||
@@ -190,6 +205,8 @@ type Options struct {
|
||||
KeyPem string `config:"key_pem"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
PubKeyFile string `config:"pubkey_file"`
|
||||
KnownHostsFile string `config:"known_hosts_file"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||
@@ -218,6 +235,7 @@ type Fs struct {
|
||||
poolMu sync.Mutex
|
||||
pool []*conn
|
||||
pacer *fs.Pacer // pacer for operations
|
||||
savedpswd string
|
||||
}
|
||||
|
||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
@@ -231,20 +249,6 @@ type Object struct {
|
||||
sha1sum *string // Cached SHA1 checksum
|
||||
}
|
||||
|
||||
// readCurrentUser finds the current user name or "" if not found
|
||||
func readCurrentUser() (userName string) {
|
||||
usr, err := user.Current()
|
||||
if err == nil {
|
||||
return usr.Username
|
||||
}
|
||||
// Fall back to reading $USER then $LOGNAME
|
||||
userName = os.Getenv("USER")
|
||||
if userName != "" {
|
||||
return userName
|
||||
}
|
||||
return os.Getenv("LOGNAME")
|
||||
}
|
||||
|
||||
// dial starts a client connection to the given SSH server. It is a
|
||||
// convenience function that connects to the given network address,
|
||||
// initiates the SSH handshake, and then sets up a Client.
|
||||
@@ -410,6 +414,10 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// This will hold the Fs object. We need to create it here
|
||||
// so we can refer to it in the SSH callback, but it's populated
|
||||
// in NewFsWithConnection
|
||||
f := &Fs{}
|
||||
ctx := context.Background()
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -423,6 +431,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if opt.Port == "" {
|
||||
opt.Port = "22"
|
||||
}
|
||||
|
||||
sshConfig := &ssh.ClientConfig{
|
||||
User: opt.User,
|
||||
Auth: []ssh.AuthMethod{},
|
||||
@@ -431,6 +440,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ClientVersion: "SSH-2.0-" + fs.Config.UserAgent,
|
||||
}
|
||||
|
||||
if opt.KnownHostsFile != "" {
|
||||
hostcallback, err := knownhosts.New(opt.KnownHostsFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't parse known_hosts_file")
|
||||
}
|
||||
sshConfig.HostKeyCallback = hostcallback
|
||||
}
|
||||
|
||||
if opt.UseInsecureCipher {
|
||||
sshConfig.Config.SetDefaults()
|
||||
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc", "aes192-cbc", "aes256-cbc", "3des-cbc")
|
||||
@@ -438,6 +455,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
keyFile := env.ShellExpand(opt.KeyFile)
|
||||
pubkeyFile := env.ShellExpand(opt.PubKeyFile)
|
||||
//keyPem := env.ShellExpand(opt.KeyPem)
|
||||
// Add ssh agent-auth if no password or file or key PEM specified
|
||||
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
|
||||
@@ -507,7 +525,38 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse private key file")
|
||||
}
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signer))
|
||||
|
||||
// If a public key has been specified then use that
|
||||
if pubkeyFile != "" {
|
||||
certfile, err := ioutil.ReadFile(pubkeyFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to read cert file")
|
||||
}
|
||||
|
||||
pk, _, _, _, err := ssh.ParseAuthorizedKey(certfile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to parse cert file")
|
||||
}
|
||||
|
||||
// And the signer for this, which includes the private key signer
|
||||
// This is what we'll pass to the ssh client.
|
||||
// Normally the ssh client will use the public key built
|
||||
// into the private key, but we need to tell it to use the user
|
||||
// specified public key cert. This signer is specific to the
|
||||
// cert and will include the private key signer. Now ssh
|
||||
// knows everything it needs.
|
||||
cert, ok := pk.(*ssh.Certificate)
|
||||
if !ok {
|
||||
return nil, errors.New("public key file is not a certificate file: " + pubkeyFile)
|
||||
}
|
||||
pubsigner, err := ssh.NewCertSigner(cert, signer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error generating cert signer")
|
||||
}
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(pubsigner))
|
||||
} else {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signer))
|
||||
}
|
||||
}
|
||||
|
||||
// Auth from password if specified
|
||||
@@ -519,30 +568,45 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
||||
}
|
||||
|
||||
// Ask for password if none was defined and we're allowed to
|
||||
// Config for password if none was defined and we're allowed to
|
||||
// We don't ask now; we ask if the ssh connection succeeds
|
||||
if opt.Pass == "" && opt.AskPassword {
|
||||
_, _ = fmt.Fprint(os.Stderr, "Enter SFTP password: ")
|
||||
clearpass := config.ReadPassword()
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PasswordCallback(f.getPass))
|
||||
}
|
||||
|
||||
return NewFsWithConnection(ctx, name, root, m, opt, sshConfig)
|
||||
return NewFsWithConnection(ctx, f, name, root, m, opt, sshConfig)
|
||||
}
|
||||
|
||||
// If we're in password mode and ssh connection succeeds then this
|
||||
// callback is called. First time around we ask the user, and then
|
||||
// save it so on reconnection we give back the previous string.
|
||||
// This removes the ability to let the user correct a mistaken entry,
|
||||
// but means that reconnects are transparent.
|
||||
// We'll re-use config.Pass for this, 'cos we know it's not been
|
||||
// specified.
|
||||
func (f *Fs) getPass() (string, error) {
|
||||
for f.savedpswd == "" {
|
||||
_, _ = fmt.Fprint(os.Stderr, "Enter SFTP password: ")
|
||||
f.savedpswd = config.ReadPassword()
|
||||
}
|
||||
return f.savedpswd, nil
|
||||
}
|
||||
|
||||
// NewFsWithConnection creates a new Fs object from the name and root and an ssh.ClientConfig. It connects to
|
||||
// the host specified in the ssh.ClientConfig
|
||||
func NewFsWithConnection(ctx context.Context, name string, root string, m configmap.Mapper, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
absRoot: root,
|
||||
opt: *opt,
|
||||
m: m,
|
||||
config: sshConfig,
|
||||
url: "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root,
|
||||
mkdirLock: newStringLock(),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m configmap.Mapper, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
|
||||
// Populate the Filesystem Object
|
||||
f.name = name
|
||||
f.root = root
|
||||
f.absRoot = root
|
||||
f.opt = *opt
|
||||
f.m = m
|
||||
f.config = sshConfig
|
||||
f.url = "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root
|
||||
f.mkdirLock = newStringLock()
|
||||
f.pacer = fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
f.savedpswd = ""
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
SlowHash: true,
|
||||
@@ -888,7 +952,7 @@ func (f *Fs) run(cmd string) ([]byte, error) {
|
||||
|
||||
session, err := c.sshClient.NewSession()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "run: get SFTP sessiion")
|
||||
return nil, errors.Wrap(err, "run: get SFTP session")
|
||||
}
|
||||
defer func() {
|
||||
_ = session.Close()
|
||||
|
||||
@@ -95,7 +95,7 @@ type UploadSpecification struct {
|
||||
ChunkURI string `json:"ChunkUri"` // Specifies the URI the client must send the file data to
|
||||
FinishURI string `json:"FinishUri"` // If provided, specifies the final call the client must perform to finish the upload process
|
||||
ProgressData string `json:"ProgressData"` // Allows the client to check progress of standard uploads
|
||||
IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supproted.
|
||||
IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supported.
|
||||
ResumeIndex int64 `json:"ResumeIndex"` // Specifies the initial index for resuming, if IsResume is true.
|
||||
ResumeOffset int64 `json:"ResumeOffset"` // Specifies the initial file offset by bytes, if IsResume is true
|
||||
ResumeFileHash string `json:"ResumeFileHash"` // Specifies the MD5 hash of the first ResumeOffset bytes of the partial file found at the server
|
||||
|
||||
@@ -1090,7 +1090,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
||||
} else if err != nil {
|
||||
return nil, errors.Wrap(err, "copy: failed to examine destination dir")
|
||||
} else {
|
||||
// otherwise need to copy via a temporary directlry
|
||||
// otherwise need to copy via a temporary directory
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -350,7 +350,7 @@ func (f *Fs) getAuth(req *http.Request) (err error) {
|
||||
// if have auth, check it is in date
|
||||
if f.opt.Authorization == "" || f.opt.User == "" || f.authExpiry.IsZero() || time.Until(f.authExpiry) < expiryLeeway {
|
||||
// Get the auth token
|
||||
f.srv.SetSigner(nil) // temporariliy remove the signer so we don't infinitely recurse
|
||||
f.srv.SetSigner(nil) // temporarily remove the signer so we don't infinitely recurse
|
||||
err = f.getAuthToken(ctx)
|
||||
f.srv.SetSigner(f.getAuth) // replace signer
|
||||
if err != nil {
|
||||
|
||||
@@ -67,12 +67,12 @@ func init() {
|
||||
log.Fatalf("Couldn't create access grant: %v", err)
|
||||
}
|
||||
|
||||
serialziedAccess, err := access.Serialize()
|
||||
serializedAccess, err := access.Serialize()
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't serialize access grant: %v", err)
|
||||
}
|
||||
configMapper.Set("satellite_address", satellite)
|
||||
configMapper.Set("access_grant", serialziedAccess)
|
||||
configMapper.Set("access_grant", serializedAccess)
|
||||
} else if provider == existingProvider {
|
||||
config.FileDeleteKey(name, "satellite_address")
|
||||
config.FileDeleteKey(name, "api_key")
|
||||
|
||||
@@ -61,7 +61,7 @@ func (p *EpAll) Action(ctx context.Context, upstreams []*upstream.Fs, path strin
|
||||
return p.epall(ctx, upstreams, path)
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receivng a set of candidate entries
|
||||
// ActionEntries is ACTION category policy but receiving a set of candidate entries
|
||||
func (p *EpAll) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
|
||||
@@ -106,7 +106,7 @@ func (p *EpMfs) Search(ctx context.Context, upstreams []*upstream.Fs, path strin
|
||||
return p.mfs(upstreams)
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receivng a set of candidate entries
|
||||
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
|
||||
func (p *EpMfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
|
||||
@@ -3,6 +3,7 @@ package policy
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -19,10 +20,12 @@ type EpRand struct {
|
||||
}
|
||||
|
||||
func (p *EpRand) rand(upstreams []*upstream.Fs) *upstream.Fs {
|
||||
rand.Seed(time.Now().Unix())
|
||||
return upstreams[rand.Intn(len(upstreams))]
|
||||
}
|
||||
|
||||
func (p *EpRand) randEntries(entries []upstream.Entry) upstream.Entry {
|
||||
rand.Seed(time.Now().Unix())
|
||||
return entries[rand.Intn(len(entries))]
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ func init() {
|
||||
// FF stands for first found
|
||||
// Search category: same as epff.
|
||||
// Action category: same as epff.
|
||||
// Create category: Given the order of the candiates, act on the first one found.
|
||||
// Create category: Given the order of the candidates, act on the first one found.
|
||||
type FF struct {
|
||||
EpFF
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ func init() {
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Upstreams fs.SpaceSepList `config:"upstreams"`
|
||||
Remotes fs.SpaceSepList `config:"remotes"` // Depreated
|
||||
Remotes fs.SpaceSepList `config:"remotes"` // Deprecated
|
||||
ActionPolicy string `config:"action_policy"`
|
||||
CreatePolicy string `config:"create_policy"`
|
||||
SearchPolicy string `config:"search_policy"`
|
||||
@@ -572,7 +572,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
entriess := make([][]upstream.Entry, len(f.upstreams))
|
||||
entriesList := make([][]upstream.Entry, len(f.upstreams))
|
||||
errs := Errors(make([]error, len(f.upstreams)))
|
||||
multithread(len(f.upstreams), func(i int) {
|
||||
u := f.upstreams[i]
|
||||
@@ -585,7 +585,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
for j, e := range entries {
|
||||
uEntries[j], _ = u.WrapEntry(e)
|
||||
}
|
||||
entriess[i] = uEntries
|
||||
entriesList[i] = uEntries
|
||||
})
|
||||
if len(errs) == len(errs.FilterNil()) {
|
||||
errs = errs.Map(func(e error) error {
|
||||
@@ -599,7 +599,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
return nil, errs.Err()
|
||||
}
|
||||
return f.mergeDirEntries(entriess)
|
||||
return f.mergeDirEntries(entriesList)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -619,7 +619,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
var entriess [][]upstream.Entry
|
||||
var entriesList [][]upstream.Entry
|
||||
errs := Errors(make([]error, len(f.upstreams)))
|
||||
var mutex sync.Mutex
|
||||
multithread(len(f.upstreams), func(i int) {
|
||||
@@ -631,7 +631,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
uEntries[j], _ = u.WrapEntry(e)
|
||||
}
|
||||
mutex.Lock()
|
||||
entriess = append(entriess, uEntries)
|
||||
entriesList = append(entriesList, uEntries)
|
||||
mutex.Unlock()
|
||||
return nil
|
||||
}
|
||||
@@ -658,7 +658,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
}
|
||||
return errs.Err()
|
||||
}
|
||||
entries, err := f.mergeDirEntries(entriess)
|
||||
entries, err := f.mergeDirEntries(entriesList)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -729,9 +729,9 @@ func (f *Fs) searchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
return f.searchPolicy.SearchEntries(entries...)
|
||||
}
|
||||
|
||||
func (f *Fs) mergeDirEntries(entriess [][]upstream.Entry) (fs.DirEntries, error) {
|
||||
func (f *Fs) mergeDirEntries(entriesList [][]upstream.Entry) (fs.DirEntries, error) {
|
||||
entryMap := make(map[string]([]upstream.Entry))
|
||||
for _, en := range entriess {
|
||||
for _, en := range entriesList {
|
||||
if en == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ type Object struct {
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// Entry describe a warpped fs.DirEntry interface with the
|
||||
// Entry describe a wrapped fs.DirEntry interface with the
|
||||
// information of upstream Fs
|
||||
type Entry interface {
|
||||
fs.DirEntry
|
||||
|
||||
@@ -1129,14 +1129,10 @@ func (o *Object) Storable() bool {
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var resp *http.Response
|
||||
fs.FixRangeOption(options, o.size)
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: o.filePath(),
|
||||
Options: options,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Depth": "0",
|
||||
},
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestWebdavNexcloud:",
|
||||
RemoteName: "TestWebdavNextcloud:",
|
||||
NilObject: (*webdav.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ type ResourceInfoRequestOptions struct {
|
||||
Fields []string
|
||||
}
|
||||
|
||||
//ResourceInfoResponse struct is returned by the API for metedata requests.
|
||||
//ResourceInfoResponse struct is returned by the API for metadata requests.
|
||||
type ResourceInfoResponse struct {
|
||||
PublicKey string `json:"public_key"`
|
||||
Name string `json:"name"`
|
||||
|
||||
@@ -36,7 +36,6 @@ var (
|
||||
cgo = flag.Bool("cgo", false, "Use cgo for the build")
|
||||
noClean = flag.Bool("no-clean", false, "Don't clean the build directory before running.")
|
||||
tags = flag.String("tags", "", "Space separated list of build tags")
|
||||
buildmode = flag.String("buildmode", "", "Passed to go build -buildmode flag")
|
||||
compileOnly = flag.Bool("compile-only", false, "Just build the binary, not the zip.")
|
||||
)
|
||||
|
||||
@@ -301,15 +300,8 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||
"-trimpath",
|
||||
"-o", output,
|
||||
"-tags", *tags,
|
||||
}
|
||||
if *buildmode != "" {
|
||||
args = append(args,
|
||||
"-buildmode", *buildmode,
|
||||
)
|
||||
}
|
||||
args = append(args,
|
||||
"..",
|
||||
)
|
||||
}
|
||||
env := []string{
|
||||
"GOOS=" + goos,
|
||||
"GOARCH=" + stripVersion(goarch),
|
||||
|
||||
@@ -141,7 +141,7 @@ def main():
|
||||
for name in sorted(bugfixes.keys()):
|
||||
out(name)
|
||||
|
||||
# Read old changlog and split
|
||||
# Read old changelog and split
|
||||
with open("docs/content/changelog.md") as fd:
|
||||
old_changelog = fd.read()
|
||||
heading = "# Changelog"
|
||||
|
||||
@@ -19,7 +19,7 @@ var (
|
||||
// Flags
|
||||
numberOfFiles = flag.Int("n", 1000, "Number of files to create")
|
||||
averageFilesPerDirectory = flag.Int("files-per-directory", 10, "Average number of files per directory")
|
||||
maxDepth = flag.Int("max-depth", 10, "Maximum depth of directory heirachy")
|
||||
maxDepth = flag.Int("max-depth", 10, "Maximum depth of directory hierarchy")
|
||||
minFileSize = flag.Int64("min-size", 0, "Minimum size of file to create")
|
||||
maxFileSize = flag.Int64("max-size", 100, "Maximum size of files to create")
|
||||
minFileNameLength = flag.Int("min-name-length", 4, "Minimum size of file to create")
|
||||
@@ -61,7 +61,7 @@ func fileName() (name string) {
|
||||
return name
|
||||
}
|
||||
|
||||
// dir is a directory in the directory heirachy being built up
|
||||
// dir is a directory in the directory hierarchy being built up
|
||||
type dir struct {
|
||||
name string
|
||||
depth int
|
||||
@@ -69,7 +69,7 @@ type dir struct {
|
||||
parent *dir
|
||||
}
|
||||
|
||||
// Create a random directory heirachy under d
|
||||
// Create a random directory hierarchy under d
|
||||
func (d *dir) createDirectories() {
|
||||
for totalDirectories < directoriesToCreate {
|
||||
newDir := &dir{
|
||||
@@ -91,7 +91,7 @@ func (d *dir) createDirectories() {
|
||||
return
|
||||
}
|
||||
|
||||
// list the directory heirachy
|
||||
// list the directory hierarchy
|
||||
func (d *dir) list(path string, output []string) []string {
|
||||
dirPath := filepath.Join(path, d.name)
|
||||
output = append(output, dirPath)
|
||||
|
||||
@@ -15,12 +15,10 @@ description: |
|
||||
vendor: "rclone"
|
||||
homepage: "https://rclone.org"
|
||||
license: "MIT"
|
||||
contents:
|
||||
- src: ./rclone
|
||||
dst: /usr/bin/rclone
|
||||
- src: ./README.html
|
||||
dst: /usr/share/doc/rclone/README.html
|
||||
- src: ./README.txt
|
||||
dst: /usr/share/doc/rclone/README.txt
|
||||
- src: ./rclone.1
|
||||
dst: /usr/share/man/man1/rclone.1
|
||||
# No longer supported? See https://github.com/goreleaser/nfpm/issues/144
|
||||
# bindir: "/usr/bin"
|
||||
files:
|
||||
./rclone: "/usr/bin/rclone"
|
||||
./README.html: "/usr/share/doc/rclone/README.html"
|
||||
./README.txt: "/usr/share/doc/rclone/README.txt"
|
||||
./rclone.1: "/usr/share/man/man1/rclone.1"
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
)
|
||||
|
||||
// version=$(sed <VERSION -e 's/\.[0-9]+*$//g')
|
||||
@@ -28,7 +30,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
||||
cmd := exec.Command("git", "log", "--oneline", from+".."+to)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to run git log: %v", err)
|
||||
log.Fatalf("failed to run git log %s: %v", from+".."+to, err)
|
||||
}
|
||||
logMap = map[string]string{}
|
||||
logs = []string{}
|
||||
@@ -53,15 +55,20 @@ func main() {
|
||||
if len(args) != 0 {
|
||||
log.Fatalf("Syntax: %s", os.Args[0])
|
||||
}
|
||||
// v1.54.0
|
||||
versionBytes, err := ioutil.ReadFile("VERSION")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read version: %v", err)
|
||||
}
|
||||
i := bytes.LastIndexByte(versionBytes, '.')
|
||||
version := string(versionBytes[:i])
|
||||
log.Printf("Finding commits not in stable %s", version)
|
||||
masterMap, masterLogs := readCommits(version+".0", "master")
|
||||
stableMap, _ := readCommits(version+".0", version+"-stable")
|
||||
if versionBytes[0] == 'v' {
|
||||
versionBytes = versionBytes[1:]
|
||||
}
|
||||
versionBytes = bytes.TrimSpace(versionBytes)
|
||||
semver := semver.New(string(versionBytes))
|
||||
stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1)
|
||||
log.Printf("Finding commits in %v not in stable %s", semver, stable)
|
||||
masterMap, masterLogs := readCommits(stable+".0", "master")
|
||||
stableMap, _ := readCommits(stable+".0", stable+"-stable")
|
||||
for _, logMessage := range masterLogs {
|
||||
// Commit found in stable already
|
||||
if _, found := stableMap[logMessage]; found {
|
||||
|
||||
30
cmd/cmd.go
30
cmd/cmd.go
@@ -1,4 +1,4 @@
|
||||
// Package cmd implemnts the rclone command
|
||||
// Package cmd implements the rclone command
|
||||
//
|
||||
// It is in a sub package so it's internals can be re-used elsewhere
|
||||
package cmd
|
||||
@@ -9,6 +9,7 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
@@ -20,6 +21,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
systemd "github.com/iguanesolutions/go-systemd/v5"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
@@ -34,7 +36,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
"github.com/rclone/rclone/fs/rc/rcserver"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
@@ -288,6 +290,11 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
}
|
||||
fs.Debugf(nil, "%d go routines active\n", runtime.NumGoroutine())
|
||||
|
||||
if fs.Config.Progress && fs.Config.ProgressTerminalTitle {
|
||||
// Clear terminal title
|
||||
terminal.WriteTerminalTitle("")
|
||||
}
|
||||
|
||||
// dump all running go-routines
|
||||
if fs.Config.Dump&fs.DumpGoRoutines != 0 {
|
||||
err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
||||
@@ -364,6 +371,12 @@ func StartStats() func() {
|
||||
|
||||
// initConfig is run by cobra after initialising the flags
|
||||
func initConfig() {
|
||||
// Activate logger systemd support if systemd invocation ID is detected
|
||||
_, sysdLaunch := systemd.GetInvocationID()
|
||||
if sysdLaunch {
|
||||
fs.Config.LogSystemdSupport = true // used during fslog.InitLogging()
|
||||
}
|
||||
|
||||
// Start the logger
|
||||
fslog.InitLogging()
|
||||
|
||||
@@ -379,6 +392,13 @@ func initConfig() {
|
||||
// Write the args for debug purposes
|
||||
fs.Debugf("rclone", "Version %q starting with parameters %q", fs.Version, os.Args)
|
||||
|
||||
// Inform user about systemd log support now that we have a logger
|
||||
if sysdLaunch {
|
||||
fs.Debugf("rclone", "systemd logging support automatically activated")
|
||||
} else if fs.Config.LogSystemdSupport {
|
||||
fs.Debugf("rclone", "systemd logging support manually activated")
|
||||
}
|
||||
|
||||
// Start the remote control server if configured
|
||||
_, err = rcserver.Start(&rcflags.Opt)
|
||||
if err != nil {
|
||||
@@ -493,7 +513,7 @@ func AddBackendFlags() {
|
||||
if opt.IsPassword {
|
||||
help += " (obscured)"
|
||||
}
|
||||
flag := pflag.CommandLine.VarPF(opt, name, opt.ShortOpt, help)
|
||||
flag := flags.VarPF(pflag.CommandLine, opt, name, opt.ShortOpt, help)
|
||||
if _, isBool := opt.Default.(bool); isBool {
|
||||
flag.NoOptDefVal = "true"
|
||||
}
|
||||
@@ -512,9 +532,7 @@ func AddBackendFlags() {
|
||||
|
||||
// Main runs rclone interpreting flags and commands out of os.Args
|
||||
func Main() {
|
||||
if err := random.Seed(); err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
rand.Seed(time.Now().Unix())
|
||||
setupRootCommand(Root)
|
||||
AddBackendFlags()
|
||||
if err := Root.Execute(); err != nil {
|
||||
|
||||
@@ -270,7 +270,7 @@ func (fsys *FS) Releasedir(path string, fh uint64) (errc int) {
|
||||
return fsys.closeHandle(fh)
|
||||
}
|
||||
|
||||
// Statfs reads overall stats on the filessystem
|
||||
// Statfs reads overall stats on the filesystem
|
||||
func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
|
||||
defer log.Trace(path, "")("stat=%+v, errc=%d", stat, &errc)
|
||||
const blockSize = 4096
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
// Build for macos with the brew tag to handle the absence
|
||||
// of fuse and print an appropriate error message
|
||||
|
||||
// +build brew
|
||||
// +build darwin
|
||||
|
||||
package cmount
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
name := "mount"
|
||||
cmd := mountlib.NewMountCommand(name, false, mount)
|
||||
cmd.Aliases = append(cmd.Aliases, "cmount")
|
||||
mountlib.AddRc("cmount", mount)
|
||||
}
|
||||
|
||||
// mount the file system
|
||||
//
|
||||
// The mount point will be ready when this returns.
|
||||
//
|
||||
// returns an error, and an error channel for the serve process to
|
||||
// report an error when fusermount is called.
|
||||
func mount(_ *vfs.VFS, _ string, _ *mountlib.Options) (<-chan error, func() error, error) {
|
||||
return nil, nil, errors.New("mount is not supported on MacOS when installed via Homebrew. " +
|
||||
"Please install the binaries available at https://rclone." +
|
||||
"org/downloads/ instead if you want to use the mount command")
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for cmount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !linux,!darwin,!freebsd,!windows !brew !cgo !cmount
|
||||
// +build !linux,!darwin,!freebsd,!windows !cgo !cmount
|
||||
|
||||
package cmount
|
||||
|
||||
@@ -111,7 +111,7 @@ whether the password is already obscured or not and put unobscured
|
||||
passwords into the config file. If you want to be 100% certain that
|
||||
the passwords get obscured then use the "--obscure" flag, or if you
|
||||
are 100% certain you are already passing obscured passwords then use
|
||||
"--no-obscure". You can also set osbscured passwords using the
|
||||
"--no-obscure". You can also set obscured passwords using the
|
||||
"rclone config password" command.
|
||||
`
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ This means that for most duplicated files the ` + "`dedupe`" + `
|
||||
command will not be interactive.
|
||||
|
||||
` + "`dedupe`" + ` considers files to be identical if they have the
|
||||
same hash. If the backend does not support hashes (eg crypt wrapping
|
||||
same file path and the same hash. If the backend does not support hashes (eg crypt wrapping
|
||||
Google Drive) then they will never be found to be identical. If you
|
||||
use the ` + "`--size-only`" + ` flag then files will be considered
|
||||
identical if they have the same size (any hash will be ignored). This
|
||||
|
||||
@@ -2,6 +2,7 @@ package genautocomplete
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -29,11 +30,20 @@ them directly
|
||||
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is "-", then the output will be written to stdout.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
out := "/etc/bash_completion.d/rclone"
|
||||
if len(args) > 0 {
|
||||
if args[0] == "-" {
|
||||
err := cmd.Root.GenBashCompletion(os.Stdout)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
out = args[0]
|
||||
}
|
||||
err := cmd.Root.GenBashCompletionFile(out)
|
||||
|
||||
@@ -2,6 +2,7 @@ package genautocomplete
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -29,11 +30,20 @@ them directly
|
||||
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is "-", then the output will be written to stdout.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
out := "/etc/fish/completions/rclone.fish"
|
||||
if len(args) > 0 {
|
||||
if args[0] == "-" {
|
||||
err := cmd.Root.GenFishCompletion(os.Stdout, true)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
out = args[0]
|
||||
}
|
||||
err := cmd.Root.GenFishCompletionFile(out, true)
|
||||
|
||||
@@ -11,8 +11,10 @@ import (
|
||||
func TestCompletionBash(t *testing.T) {
|
||||
tempFile, err := ioutil.TempFile("", "completion_bash")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = tempFile.Close() }()
|
||||
defer func() { _ = os.Remove(tempFile.Name()) }()
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
bashCommandDefinition.Run(bashCommandDefinition, []string{tempFile.Name()})
|
||||
|
||||
@@ -21,11 +23,32 @@ func TestCompletionBash(t *testing.T) {
|
||||
assert.NotEmpty(t, string(bs))
|
||||
}
|
||||
|
||||
func TestCompletionBashStdout(t *testing.T) {
|
||||
originalStdout := os.Stdout
|
||||
tempFile, err := ioutil.TempFile("", "completion_zsh")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
os.Stdout = tempFile
|
||||
defer func() { os.Stdout = originalStdout }()
|
||||
|
||||
bashCommandDefinition.Run(bashCommandDefinition, []string{"-"})
|
||||
|
||||
output, err := ioutil.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(output))
|
||||
}
|
||||
|
||||
func TestCompletionZsh(t *testing.T) {
|
||||
tempFile, err := ioutil.TempFile("", "completion_zsh")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = tempFile.Close() }()
|
||||
defer func() { _ = os.Remove(tempFile.Name()) }()
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
zshCommandDefinition.Run(zshCommandDefinition, []string{tempFile.Name()})
|
||||
|
||||
@@ -34,11 +57,31 @@ func TestCompletionZsh(t *testing.T) {
|
||||
assert.NotEmpty(t, string(bs))
|
||||
}
|
||||
|
||||
func TestCompletionZshStdout(t *testing.T) {
|
||||
originalStdout := os.Stdout
|
||||
tempFile, err := ioutil.TempFile("", "completion_zsh")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
os.Stdout = tempFile
|
||||
defer func() { os.Stdout = originalStdout }()
|
||||
|
||||
zshCommandDefinition.Run(zshCommandDefinition, []string{"-"})
|
||||
output, err := ioutil.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(output))
|
||||
}
|
||||
|
||||
func TestCompletionFish(t *testing.T) {
|
||||
tempFile, err := ioutil.TempFile("", "completion_fish")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = tempFile.Close() }()
|
||||
defer func() { _ = os.Remove(tempFile.Name()) }()
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
fishCommandDefinition.Run(fishCommandDefinition, []string{tempFile.Name()})
|
||||
|
||||
@@ -46,3 +89,22 @@ func TestCompletionFish(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(bs))
|
||||
}
|
||||
|
||||
func TestCompletionFishStdout(t *testing.T) {
|
||||
originalStdout := os.Stdout
|
||||
tempFile, err := ioutil.TempFile("", "completion_zsh")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
os.Stdout = tempFile
|
||||
defer func() { os.Stdout = originalStdout }()
|
||||
|
||||
fishCommandDefinition.Run(fishCommandDefinition, []string{"-"})
|
||||
|
||||
output, err := ioutil.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(output))
|
||||
}
|
||||
|
||||
@@ -30,11 +30,20 @@ them directly
|
||||
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is "-", then the output will be written to stdout.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
out := "/usr/share/zsh/vendor-completions/_rclone"
|
||||
if len(args) > 0 {
|
||||
if args[0] == "-" {
|
||||
err := cmd.Root.GenZshCompletion(os.Stdout)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
out = args[0]
|
||||
}
|
||||
outFile, err := os.Create(out)
|
||||
|
||||
@@ -49,7 +49,7 @@ func init() {
|
||||
flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", true, "Check control characters.")
|
||||
flags.DurationVarP(cmdFlags, &uploadWait, "upload-wait", "", 0, "Wait after writing a file.")
|
||||
flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", true, "Check max filename length.")
|
||||
flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", true, "Check uploadxs with indeterminate file size.")
|
||||
flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", true, "Check uploads with indeterminate file size.")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
|
||||
@@ -107,13 +107,6 @@ func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error)
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
dirents = append(dirents, fuse.Dirent{
|
||||
Type: fuse.DT_Dir,
|
||||
Name: ".",
|
||||
}, fuse.Dirent{
|
||||
Type: fuse.DT_Dir,
|
||||
Name: "..",
|
||||
})
|
||||
for _, node := range items {
|
||||
name := node.Name()
|
||||
if len(name) > mountlib.MaxLeafSize {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Build for mount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// Invert the build constraint: linux,go1.13 freebsd,go1.13
|
||||
// Invert the build constraint: linux,go1.13 darwin,go1.13 freebsd,go1.13
|
||||
//
|
||||
// !((linux&&go1.13) || (darwin&&go1.13) || (freebsd&&go1.13))
|
||||
// == !(linux&&go1.13) && !(darwin&&go1.13) && !(freebsd&&go1.13))
|
||||
|
||||
@@ -45,7 +45,7 @@ func newFileHandle(h vfs.Handle, fsys *FS) *FileHandle {
|
||||
}
|
||||
}
|
||||
|
||||
// Check interface satistfied
|
||||
// Check interface satisfied
|
||||
var _ fusefs.FileHandle = (*FileHandle)(nil)
|
||||
|
||||
// The String method is for debug printing.
|
||||
|
||||
@@ -258,7 +258,7 @@ var _ fusefs.DirStream = (*dirStream)(nil)
|
||||
|
||||
// Readdir opens a stream of directory entries.
|
||||
//
|
||||
// Readdir essentiallly returns a list of strings, and it is allowed
|
||||
// Readdir essentially returns a list of strings, and it is allowed
|
||||
// for Readdir to return different results from Lookup. For example,
|
||||
// you can return nothing for Readdir ("ls my-fuse-mount" is empty),
|
||||
// while still implementing Lookup ("ls my-fuse-mount/a-specific-file"
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/okzk/sdnotify"
|
||||
sysdnotify "github.com/iguanesolutions/go-systemd/v5/notify"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -162,7 +162,7 @@ FUSE.
|
||||
First set up your remote using ` + "`rclone config`" + `. Check it works with ` + "`rclone ls`" + ` etc.
|
||||
|
||||
You can either run mount in foreground mode or background (daemon) mode. Mount runs in
|
||||
foreground mode by default, use the --daemon flag to specify background mode mode.
|
||||
foreground mode by default, use the --daemon flag to specify background mode.
|
||||
Background mode is only supported on Linux and OSX, you can only run mount in
|
||||
foreground mode on Windows.
|
||||
|
||||
@@ -448,13 +448,13 @@ func Mount(VFS *vfs.VFS, mountpoint string, mount MountFn, opt *Options) error {
|
||||
|
||||
// Unmount on exit
|
||||
fnHandle := atexit.Register(func() {
|
||||
_ = sysdnotify.Stopping()
|
||||
_ = unmount()
|
||||
_ = sdnotify.Stopping()
|
||||
})
|
||||
defer atexit.Unregister(fnHandle)
|
||||
|
||||
// Notify systemd
|
||||
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
|
||||
if err := sysdnotify.Ready(); err != nil {
|
||||
return errors.Wrap(err, "failed to notify systemd")
|
||||
}
|
||||
|
||||
@@ -479,8 +479,8 @@ waitloop:
|
||||
}
|
||||
}
|
||||
|
||||
_ = sysdnotify.Stopping()
|
||||
_ = unmount()
|
||||
_ = sdnotify.Stopping()
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to umount FUSE fs")
|
||||
|
||||
@@ -75,7 +75,7 @@ func helpText() (tr []string) {
|
||||
" d delete file/directory",
|
||||
}
|
||||
if !clipboard.Unsupported {
|
||||
tr = append(tr, " y copy current path to clipbard")
|
||||
tr = append(tr, " y copy current path to clipboard")
|
||||
}
|
||||
tr = append(tr, []string{
|
||||
" Y display current path",
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
@@ -173,11 +172,8 @@ func (s *server) serveFile(w http.ResponseWriter, r *http.Request, remote string
|
||||
obj := entry.(fs.Object)
|
||||
file := node.(*vfs.File)
|
||||
|
||||
// Set content length if we know how long the object is
|
||||
knownSize := obj.Size() >= 0
|
||||
if knownSize {
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(node.Size(), 10))
|
||||
}
|
||||
// Set content length since we know how long the object is
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(node.Size(), 10))
|
||||
|
||||
// Set content type
|
||||
mimeType := fs.MimeType(r.Context(), obj)
|
||||
@@ -214,19 +210,5 @@ func (s *server) serveFile(w http.ResponseWriter, r *http.Request, remote string
|
||||
// FIXME in = fs.NewAccount(in, obj).WithBuffer() // account the transfer
|
||||
|
||||
// Serve the file
|
||||
if knownSize {
|
||||
http.ServeContent(w, r, remote, node.ModTime(), in)
|
||||
} else {
|
||||
// http.ServeContent can't serve unknown length files
|
||||
if rangeRequest := r.Header.Get("Range"); rangeRequest != "" {
|
||||
http.Error(w, "Can't use Range: on files of unknown length", http.StatusRequestedRangeNotSatisfiable)
|
||||
return
|
||||
}
|
||||
n, err := io.Copy(w, in)
|
||||
if err != nil {
|
||||
fs.Errorf(obj, "Didn't finish writing GET request (wrote %d/unknown bytes): %v", n, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
http.ServeContent(w, r, remote, node.ModTime(), in)
|
||||
}
|
||||
|
||||
@@ -208,7 +208,7 @@ func TestGET(t *testing.T) {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check we got a Last-Modifed header and that it is a valid date
|
||||
// Check we got a Last-Modified header and that it is a valid date
|
||||
if test.Status == http.StatusOK || test.Status == http.StatusPartialContent {
|
||||
lastModified := resp.Header.Get("Last-Modified")
|
||||
assert.NotEqual(t, "", lastModified, test.Golden)
|
||||
|
||||
@@ -61,7 +61,7 @@ to be used within the template to server pages:
|
||||
| .Name | The full path of a file/directory. |
|
||||
| .Title | Directory listing of .Name |
|
||||
| .Sort | The current sort used. This is changeable via ?sort= parameter |
|
||||
| | Sort Options: namedirfist,name,size,time (default namedirfirst) |
|
||||
| | Sort Options: namedirfirst,name,size,time (default namedirfirst) |
|
||||
| .Order | The current ordering used. This is changeable via ?order= parameter |
|
||||
| | Order Options: asc,desc (default asc) |
|
||||
| .Query | Currently unused. |
|
||||
|
||||
@@ -132,7 +132,7 @@ func Error(what interface{}, w http.ResponseWriter, text string, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessQueryParams takes and sorts/orders based on the request sort/order parameters and default is namedirfist/asc
|
||||
// ProcessQueryParams takes and sorts/orders based on the request sort/order parameters and default is namedirfirst/asc
|
||||
func (d *Directory) ProcessQueryParams(sortParm string, orderParm string) *Directory {
|
||||
d.Sort = sortParm
|
||||
d.Order = orderParm
|
||||
|
||||
@@ -27,7 +27,7 @@ var Help = strings.Replace(`
|
||||
If you supply the parameter |--auth-proxy /path/to/program| then
|
||||
rclone will use that program to generate backends on the fly which
|
||||
then are used to authenticate incoming requests. This uses a simple
|
||||
JSON based protocl with input on STDIN and output on STDOUT.
|
||||
JSON based protocol with input on STDIN and output on STDOUT.
|
||||
|
||||
**PLEASE NOTE:** |--auth-proxy| and |--authorized-keys| cannot be used
|
||||
together, if |--auth-proxy| is set the authorized keys option will be
|
||||
|
||||
@@ -75,39 +75,6 @@ func (s *server) getVFS(what string, sshConn *ssh.ServerConn) (VFS *vfs.VFS) {
|
||||
return VFS
|
||||
}
|
||||
|
||||
// Accept a single connection - run in a go routine as the ssh
|
||||
// authentication can block
|
||||
func (s *server) acceptConnection(nConn net.Conn) {
|
||||
what := describeConn(nConn)
|
||||
|
||||
// Before use, a handshake must be performed on the incoming net.Conn.
|
||||
sshConn, chans, reqs, err := ssh.NewServerConn(nConn, s.config)
|
||||
if err != nil {
|
||||
fs.Errorf(what, "SSH login failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
fs.Infof(what, "SSH login from %s using %s", sshConn.User(), sshConn.ClientVersion())
|
||||
|
||||
// Discard all global out-of-band Requests
|
||||
go ssh.DiscardRequests(reqs)
|
||||
|
||||
c := &conn{
|
||||
what: what,
|
||||
vfs: s.getVFS(what, sshConn),
|
||||
}
|
||||
if c.vfs == nil {
|
||||
fs.Infof(what, "Closing unauthenticated connection (couldn't find VFS)")
|
||||
_ = nConn.Close()
|
||||
return
|
||||
}
|
||||
c.handlers = newVFSHandler(c.vfs)
|
||||
|
||||
// Accept all channels
|
||||
go c.handleChannels(chans)
|
||||
}
|
||||
|
||||
// Accept connections and call them in a go routine
|
||||
func (s *server) acceptConnections() {
|
||||
for {
|
||||
nConn, err := s.listener.Accept()
|
||||
@@ -118,7 +85,33 @@ func (s *server) acceptConnections() {
|
||||
fs.Errorf(nil, "Failed to accept incoming connection: %v", err)
|
||||
continue
|
||||
}
|
||||
go s.acceptConnection(nConn)
|
||||
what := describeConn(nConn)
|
||||
|
||||
// Before use, a handshake must be performed on the incoming net.Conn.
|
||||
sshConn, chans, reqs, err := ssh.NewServerConn(nConn, s.config)
|
||||
if err != nil {
|
||||
fs.Errorf(what, "SSH login failed: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
fs.Infof(what, "SSH login from %s using %s", sshConn.User(), sshConn.ClientVersion())
|
||||
|
||||
// Discard all global out-of-band Requests
|
||||
go ssh.DiscardRequests(reqs)
|
||||
|
||||
c := &conn{
|
||||
what: what,
|
||||
vfs: s.getVFS(what, sshConn),
|
||||
}
|
||||
if c.vfs == nil {
|
||||
fs.Infof(what, "Closing unauthenticated connection (couldn't find VFS)")
|
||||
_ = nConn.Close()
|
||||
continue
|
||||
}
|
||||
c.handlers = newVFSHandler(c.vfs)
|
||||
|
||||
// Accept all channels
|
||||
go c.handleChannels(chans)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -64,7 +64,7 @@ func TestTouchWithTimestamp(t *testing.T) {
|
||||
checkFile(t, r.Fremote, srcFileName, "")
|
||||
}
|
||||
|
||||
func TestTouchWithLognerTimestamp(t *testing.T) {
|
||||
func TestTouchWithLongerTimestamp(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
|
||||
@@ -409,3 +409,16 @@ put them back in again.` >}}
|
||||
* Lucas Kanashiro <lucas.kanashiro@canonical.com>
|
||||
* WarpedPixel <WarpedPixel@users.noreply.github.com>
|
||||
* Sam Edwards <sam@samedwards.ca>
|
||||
* wjielai <gouki0123@gmail.com>
|
||||
* Muffin King <jinxz_k@live.com>
|
||||
* Christopher Stewart <6573710+1f47a@users.noreply.github.com>
|
||||
* Russell Cattelan <cattelan@digitalelves.com>
|
||||
* gyutw <30371241+gyutw@users.noreply.github.com>
|
||||
* Hekmon <edouardhur@gmail.com>
|
||||
* LaSombra <lasombra@users.noreply.github.com>
|
||||
* Dov Murik <dov.murik@gmail.com>
|
||||
* Ameer Dawood <ameer1234567890@gmail.com>
|
||||
* Dan Hipschman <dan.hipschman@opendoor.com>
|
||||
* Josh Soref <jsoref@users.noreply.github.com>
|
||||
* David <david@staron.nl>
|
||||
* Ingo <ingo@hoffmann.cx>
|
||||
|
||||
@@ -5,114 +5,6 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.53.4 - 2021-01-20
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.3...v1.53.4)
|
||||
|
||||
* Bug Fixes
|
||||
* accounting: Fix data race in Transferred() (Maciej Zimnoch)
|
||||
* build
|
||||
* Stop tagged releases making a current beta (Nick Craig-Wood)
|
||||
* Upgrade docker buildx action (Matteo Pietro Dazzi)
|
||||
* Add -buildmode to cross-compile.go (Nick Craig-Wood)
|
||||
* Fix docker build by upgrading ilteoood/docker_buildx (Nick Craig-Wood)
|
||||
* Revert GitHub actions brew fix since this is now fixed (Nick Craig-Wood)
|
||||
* Fix brew install --cask syntax for macOS build (Nick Craig-Wood)
|
||||
* Update nfpm syntax to fix build of .deb/.rpm packages (Nick Craig-Wood)
|
||||
* Fix for Windows build errors (Ivan Andreev)
|
||||
* fs: Parseduration: fixed tests to use UTC time (Ankur Gupta)
|
||||
* fshttp: Prevent overlap of HTTP headers in logs (Nathan Collins)
|
||||
* rc
|
||||
* Fix core/command giving 500 internal error (Nick Craig-Wood)
|
||||
* Add Copy method to rc.Params (Nick Craig-Wood)
|
||||
* Fix 500 error when marshalling errors from core/command (Nick Craig-Wood)
|
||||
* plugins: Create plugins files only if webui is enabled. (negative0)
|
||||
* serve http: Fix serving files of unknown length (Nick Craig-Wood)
|
||||
* serve sftp: Fix authentication on one connection blocking others (Nick Craig-Wood)
|
||||
* Mount
|
||||
* Add optional `brew` tag to throw an error when using mount in the binaries installed via Homebrew (Anagh Kumar Baranwal)
|
||||
* Add "." and ".." to directories to match cmount and expectations (Nick Craig-Wood)
|
||||
* VFS
|
||||
* Make cache dir absolute before using it to fix path too long errors (Nick Craig-Wood)
|
||||
* Chunker
|
||||
* Improve detection of incompatible metadata (Ivan Andreev)
|
||||
* Google Cloud Storage
|
||||
* Fix server side copy of large objects (Nick Craig-Wood)
|
||||
* Jottacloud
|
||||
* Fix token renewer to fix long uploads (Nick Craig-Wood)
|
||||
* Fix token refresh failed: is not a regular file error (Nick Craig-Wood)
|
||||
* Pcloud
|
||||
* Only use SHA1 hashes in EU region (Nick Craig-Wood)
|
||||
* Sharefile
|
||||
* Undo Fix backend due to API swapping integers for strings (Nick Craig-Wood)
|
||||
* WebDAV
|
||||
* Fix Open Range requests to fix 4shared mount (Nick Craig-Wood)
|
||||
* Add "Depth: 0" to GET requests to fix bitrix (Nick Craig-Wood)
|
||||
|
||||
## v1.53.3 - 2020-11-19
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.2...v1.53.3)
|
||||
|
||||
* Bug Fixes
|
||||
* random: Fix incorrect use of math/rand instead of crypto/rand CVE-2020-28924 (Nick Craig-Wood)
|
||||
* Passwords you have generated with `rclone config` may be insecure
|
||||
* See [issue #4783](https://github.com/rclone/rclone/issues/4783) for more details and a checking tool
|
||||
* random: Seed math/rand in one place with crypto strong seed (Nick Craig-Wood)
|
||||
* VFS
|
||||
* Fix vfs/refresh calls with fs= parameter (Nick Craig-Wood)
|
||||
* Sharefile
|
||||
* Fix backend due to API swapping integers for strings (Nick Craig-Wood)
|
||||
|
||||
## v1.53.2 - 2020-10-26
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.1...v1.53.2)
|
||||
|
||||
* Bug Fixes
|
||||
* acounting
|
||||
* Fix incorrect speed and transferTime in core/stats (Nick Craig-Wood)
|
||||
* Stabilize display order of transfers on Windows (Nick Craig-Wood)
|
||||
* operations
|
||||
* Fix use of --suffix without --backup-dir (Nick Craig-Wood)
|
||||
* Fix spurious "--checksum is in use but the source and destination have no hashes in common" (Nick Craig-Wood)
|
||||
* build
|
||||
* Work around GitHub actions brew problem (Nick Craig-Wood)
|
||||
* Stop using set-env and set-path in the GitHub actions (Nick Craig-Wood)
|
||||
* Mount
|
||||
* mount2: Fix the swapped UID / GID values (Russell Cattelan)
|
||||
* VFS
|
||||
* Detect and recover from a file being removed externally from the cache (Nick Craig-Wood)
|
||||
* Fix a deadlock vulnerability in downloaders.Close (Leo Luan)
|
||||
* Fix a race condition in retryFailedResets (Leo Luan)
|
||||
* Fix missed concurrency control between some item operations and reset (Leo Luan)
|
||||
* Add exponential backoff during ENOSPC retries (Leo Luan)
|
||||
* Add a missed update of used cache space (Leo Luan)
|
||||
* Fix --no-modtime to not attempt to set modtimes (as documented) (Nick Craig-Wood)
|
||||
* Local
|
||||
* Fix sizes and syncing with --links option on Windows (Nick Craig-Wood)
|
||||
* Chunker
|
||||
* Disable ListR to fix missing files on GDrive (workaround) (Ivan Andreev)
|
||||
* Fix upload over crypt (Ivan Andreev)
|
||||
* Fichier
|
||||
* Increase maximum file size from 100GB to 300GB (gyutw)
|
||||
* Jottacloud
|
||||
* Remove clientSecret from config when upgrading to token based authentication (buengese)
|
||||
* Avoid double url escaping of device/mountpoint (albertony)
|
||||
* Remove DirMove workaround as it's not required anymore - also (buengese)
|
||||
* Mailru
|
||||
* Fix uploads after recent changes on server (Ivan Andreev)
|
||||
* Fix range requests after june changes on server (Ivan Andreev)
|
||||
* Fix invalid timestamp on corrupted files (fixes) (Ivan Andreev)
|
||||
* Onedrive
|
||||
* Fix disk usage for sharepoint (Nick Craig-Wood)
|
||||
* S3
|
||||
* Add missing regions for AWS (Anagh Kumar Baranwal)
|
||||
* Seafile
|
||||
* Fix accessing libraries > 2GB on 32 bit systems (Muffin King)
|
||||
* SFTP
|
||||
* Always convert the checksum to lower case (buengese)
|
||||
* Union
|
||||
* Create root directories if none exist (Nick Craig-Wood)
|
||||
|
||||
## v1.53.1 - 2020-09-13
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.0...v1.53.1)
|
||||
@@ -171,7 +63,7 @@ description: "Rclone Changelog"
|
||||
* Add reverse proxy pluginsHandler for serving plugins (Chaitanya Bankanhal)
|
||||
* Add `mount/listmounts` option for listing current mounts (Chaitanya Bankanhal)
|
||||
* Add `operations/uploadfile` to upload a file through rc using encoding multipart/form-data (Chaitanya Bankanhal)
|
||||
* Add `core/copmmand` to execute rclone terminal commands. (Chaitanya Bankanhal)
|
||||
* Add `core/command` to execute rclone terminal commands. (Chaitanya Bankanhal)
|
||||
* `rclone check`
|
||||
* Add reporting of filenames for same/missing/changed (Nick Craig-Wood)
|
||||
* Make check command obey `--dry-run`/`-i`/`--interactive` (Nick Craig-Wood)
|
||||
@@ -280,7 +172,7 @@ description: "Rclone Changelog"
|
||||
* Google Cloud Storage
|
||||
* Add support for anonymous access (Kai Lüke)
|
||||
* Jottacloud
|
||||
* Bring back legacy authentification for use with whitelabel versions (buengese)
|
||||
* Bring back legacy authentication for use with whitelabel versions (buengese)
|
||||
* Switch to new api root - also implement a very ugly workaround for the DirMove failures (buengese)
|
||||
* Onedrive
|
||||
* Rework cancel of multipart uploads on rclone exit (Nick Craig-Wood)
|
||||
@@ -430,7 +322,7 @@ all the docs and Edward Barker for helping re-write the front page.
|
||||
* Add `--header` flag to add HTTP headers to every HTTP transaction (Nick Craig-Wood)
|
||||
* Add `--check-first` to do all checking before starting transfers (Nick Craig-Wood)
|
||||
* Add `--track-renames-strategy` for configurable matching criteria for `--track-renames` (Bernd Schoolmann)
|
||||
* Add `--cutoff-mode` hard,soft,catious (Shing Kit Chan & Franklyn Tackitt)
|
||||
* Add `--cutoff-mode` hard,soft,cautious (Shing Kit Chan & Franklyn Tackitt)
|
||||
* Filter flags (eg `--files-from -`) can read from stdin (fishbullet)
|
||||
* Add `--error-on-no-transfer` option (Jon Fautley)
|
||||
* Implement `--order-by xxx,mixed` for copying some small and some big files (Nick Craig-Wood)
|
||||
@@ -713,7 +605,7 @@ all the docs and Edward Barker for helping re-write the front page.
|
||||
* dbhashsum: Stop it returning UNSUPPORTED on dropbox (Nick Craig-Wood)
|
||||
* dedupe: Add missing modes to help string (Nick Craig-Wood)
|
||||
* operations
|
||||
* Fix dedupe continuing on errors like insufficientFilePermisson (SezalAgrawal)
|
||||
* Fix dedupe continuing on errors like insufficientFilePersimmon (SezalAgrawal)
|
||||
* Clear accounting before low level retry (Maciej Zimnoch)
|
||||
* Write debug message when hashes could not be checked (Ole Schütt)
|
||||
* Move interface assertion to tests to remove pflag dependency (Nick Craig-Wood)
|
||||
@@ -777,7 +669,7 @@ all the docs and Edward Barker for helping re-write the front page.
|
||||
* S3
|
||||
* Re-implement multipart upload to fix memory issues (Nick Craig-Wood)
|
||||
* Add `--s3-copy-cutoff` for size to switch to multipart copy (Nick Craig-Wood)
|
||||
* Add new region Asia Patific (Hong Kong) (Outvi V)
|
||||
* Add new region Asia Pacific (Hong Kong) (Outvi V)
|
||||
* Reduce memory usage streaming files by reducing max stream upload size (Nick Craig-Wood)
|
||||
* Add `--s3-list-chunk` option for bucket listing (Thomas Kriechbaumer)
|
||||
* Force path style bucket access to off for AWS deprecation (Nick Craig-Wood)
|
||||
@@ -1038,7 +930,7 @@ all the docs and Edward Barker for helping re-write the front page.
|
||||
* rcat: Fix slowdown on systems with multiple hashes (Nick Craig-Wood)
|
||||
* rcd: Fix permissions problems on cache directory with web gui download (Nick Craig-Wood)
|
||||
* Mount
|
||||
* Default `--deamon-timout` to 15 minutes on macOS and FreeBSD (Nick Craig-Wood)
|
||||
* Default `--daemon-timout` to 15 minutes on macOS and FreeBSD (Nick Craig-Wood)
|
||||
* Update docs to show mounting from root OK for bucket based (Nick Craig-Wood)
|
||||
* Remove nonseekable flag from write files (Nick Craig-Wood)
|
||||
* VFS
|
||||
@@ -1201,7 +1093,7 @@ all the docs and Edward Barker for helping re-write the front page.
|
||||
* Add client side TLS/SSL flags `--ca-cert`/`--client-cert`/`--client-key` (Nick Craig-Wood)
|
||||
* Implement `--suffix-keep-extension` for use with `--suffix` (Nick Craig-Wood)
|
||||
* build:
|
||||
* Switch to semvar compliant version tags to be go modules compliant (Nick Craig-Wood)
|
||||
* Switch to semver compliant version tags to be go modules compliant (Nick Craig-Wood)
|
||||
* Update to use go1.12.x for the build (Nick Craig-Wood)
|
||||
* serve dlna: Add connection manager service description to improve compatibility (Dan Walters)
|
||||
* lsf: Add 'e' format to show encrypted names and 'o' for original IDs (Nick Craig-Wood)
|
||||
|
||||
@@ -39,10 +39,10 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
* [rclone backend](/commands/rclone_backend/) - Run a backend specific command.
|
||||
* [rclone cat](/commands/rclone_cat/) - Concatenates any files and sends them to stdout.
|
||||
* [rclone check](/commands/rclone_check/) - Checks the files in the source and destination match.
|
||||
* [rclone cleanup](/commands/rclone_cleanup/) - Clean up the remote if possible.
|
||||
* [rclone cleanup](/commands/rclone_cleanup/) - Clean up the remote if possible
|
||||
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
|
||||
* [rclone copy](/commands/rclone_copy/) - Copy files from source to dest, skipping already copied.
|
||||
* [rclone copyto](/commands/rclone_copyto/) - Copy files from source to dest, skipping already copied.
|
||||
* [rclone copy](/commands/rclone_copy/) - Copy files from source to dest, skipping already copied
|
||||
* [rclone copyto](/commands/rclone_copyto/) - Copy files from source to dest, skipping already copied
|
||||
* [rclone copyurl](/commands/rclone_copyurl/) - Copy url content to dest.
|
||||
* [rclone cryptcheck](/commands/rclone_cryptcheck/) - Cryptcheck checks the integrity of a crypted remote.
|
||||
* [rclone cryptdecode](/commands/rclone_cryptdecode/) - Cryptdecode returns unencrypted file names.
|
||||
@@ -56,7 +56,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
* [rclone listremotes](/commands/rclone_listremotes/) - List all the remotes in the config file.
|
||||
* [rclone ls](/commands/rclone_ls/) - List the objects in the path with size and path.
|
||||
* [rclone lsd](/commands/rclone_lsd/) - List all directories/containers/buckets in the path.
|
||||
* [rclone lsf](/commands/rclone_lsf/) - List directories and objects in remote:path formatted for parsing.
|
||||
* [rclone lsf](/commands/rclone_lsf/) - List directories and objects in remote:path formatted for parsing
|
||||
* [rclone lsjson](/commands/rclone_lsjson/) - List directories and objects in the path in JSON format.
|
||||
* [rclone lsl](/commands/rclone_lsl/) - List the objects in path with modification time, size and path.
|
||||
* [rclone md5sum](/commands/rclone_md5sum/) - Produces an md5sum file for all the objects in the path.
|
||||
@@ -65,7 +65,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
* [rclone move](/commands/rclone_move/) - Move files from source to dest.
|
||||
* [rclone moveto](/commands/rclone_moveto/) - Move file or directory from source to dest.
|
||||
* [rclone ncdu](/commands/rclone_ncdu/) - Explore a remote with a text based user interface.
|
||||
* [rclone obscure](/commands/rclone_obscure/) - Obscure password for use in the rclone config file.
|
||||
* [rclone obscure](/commands/rclone_obscure/) - Obscure password for use in the rclone config file
|
||||
* [rclone purge](/commands/rclone_purge/) - Remove the path and all of its contents.
|
||||
* [rclone rc](/commands/rclone_rc/) - Run a command against a running rclone.
|
||||
* [rclone rcat](/commands/rclone_rcat/) - Copies standard input to file on remote.
|
||||
|
||||
@@ -29,7 +29,7 @@ the source match the files in the destination, not the other way
|
||||
around. This means that extra files in the destination that are not in
|
||||
the source will not be detected.
|
||||
|
||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match`
|
||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--src-only`
|
||||
and `--error` flags write paths, one per line, to the file name (or
|
||||
stdout if it is `-`) supplied. What they write is described in the
|
||||
help below. For example `--differ` will write all paths which are
|
||||
@@ -55,7 +55,6 @@ rclone check source:path dest:path [flags]
|
||||
```
|
||||
--combined string Make a combined report of changes to this file
|
||||
--differ string Report all non-matching files to this file
|
||||
--download Check by downloading rather than with hash.
|
||||
--error string Report all files with errors (hashing or reading) to this file
|
||||
-h, --help help for check
|
||||
--match string Report all matching files to this file
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "rclone cleanup"
|
||||
description: "Clean up the remote if possible."
|
||||
description: "Clean up the remote if possible"
|
||||
slug: rclone_cleanup
|
||||
url: /commands/rclone_cleanup/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/cleanup/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone cleanup
|
||||
|
||||
Clean up the remote if possible.
|
||||
Clean up the remote if possible
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "rclone copy"
|
||||
description: "Copy files from source to dest, skipping already copied."
|
||||
description: "Copy files from source to dest, skipping already copied"
|
||||
slug: rclone_copy
|
||||
url: /commands/rclone_copy/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/copy/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone copy
|
||||
|
||||
Copy files from source to dest, skipping already copied.
|
||||
Copy files from source to dest, skipping already copied
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "rclone copyto"
|
||||
description: "Copy files from source to dest, skipping already copied."
|
||||
description: "Copy files from source to dest, skipping already copied"
|
||||
slug: rclone_copyto
|
||||
url: /commands/rclone_copyto/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/copyto/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone copyto
|
||||
|
||||
Copy files from source to dest, skipping already copied.
|
||||
Copy files from source to dest, skipping already copied
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ the source match the files in the destination, not the other way
|
||||
around. This means that extra files in the destination that are not in
|
||||
the source will not be detected.
|
||||
|
||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match`
|
||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--src-only`
|
||||
and `--error` flags write paths, one per line, to the file name (or
|
||||
stdout if it is `-`) supplied. What they write is described in the
|
||||
help below. For example `--differ` will write all paths which are
|
||||
|
||||
@@ -27,6 +27,7 @@ them directly
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is `-`, then the output will be written to stdout.
|
||||
|
||||
```
|
||||
rclone genautocomplete bash [output_file] [flags]
|
||||
|
||||
@@ -27,6 +27,7 @@ them directly
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is `-`, then the output will be written to stdout.
|
||||
|
||||
```
|
||||
rclone genautocomplete fish [output_file] [flags]
|
||||
|
||||
@@ -27,6 +27,7 @@ them directly
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is `-`, then the output will be written to stdout.
|
||||
|
||||
```
|
||||
rclone genautocomplete zsh [output_file] [flags]
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "rclone lsf"
|
||||
description: "List directories and objects in remote:path formatted for parsing."
|
||||
description: "List directories and objects in remote:path formatted for parsing"
|
||||
slug: rclone_lsf
|
||||
url: /commands/rclone_lsf/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/lsf/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone lsf
|
||||
|
||||
List directories and objects in remote:path formatted for parsing.
|
||||
List directories and objects in remote:path formatted for parsing
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
@@ -49,9 +49,6 @@ Stopping the mount manually:
|
||||
# OS X
|
||||
umount /path/to/local/mount
|
||||
|
||||
**Note**: As of `rclone` 1.52.2, `rclone mount` now requires Go version 1.13
|
||||
or newer on some platforms depending on the underlying FUSE library in use.
|
||||
|
||||
## Installing on Windows
|
||||
|
||||
To run rclone mount on Windows, you will need to
|
||||
@@ -194,6 +191,9 @@ parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
|
||||
When --vfs-read-chunk-size-limit 500M is specified, the result would be
|
||||
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
|
||||
|
||||
Chunked reading will only work with --vfs-cache-mode < full, as the file will always
|
||||
be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
|
||||
## VFS - Virtual File System
|
||||
|
||||
This command uses the VFS layer. This adapts the cloud storage objects
|
||||
@@ -357,11 +357,6 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "rclone obscure"
|
||||
description: "Obscure password for use in the rclone config file."
|
||||
description: "Obscure password for use in the rclone config file"
|
||||
slug: rclone_obscure
|
||||
url: /commands/rclone_obscure/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/obscure/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone obscure
|
||||
|
||||
Obscure password for use in the rclone config file.
|
||||
Obscure password for use in the rclone config file
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
@@ -196,11 +196,6 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
|
||||
@@ -195,11 +195,6 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
|
||||
@@ -267,11 +267,6 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
|
||||
@@ -206,11 +206,6 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
|
||||
@@ -275,11 +275,6 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
|
||||
@@ -1108,6 +1108,11 @@ Note: On Windows until [this bug](https://github.com/Azure/go-ansiterm/issues/26
|
||||
is fixed all non-ASCII characters will be replaced with `.` when
|
||||
`--progress` is in use.
|
||||
|
||||
### --progress-terminal-title ###
|
||||
|
||||
This flag, when used with `-P/--progress`, will print the string `ETA: %s`
|
||||
to the terminal title.
|
||||
|
||||
### -q, --quiet ###
|
||||
|
||||
This flag will limit rclone's output to error messages only.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user