1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-12 14:23:24 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
a5a2e02ea8 build: make arm .deb packages be armel architecture
This is as recommended by debian.

Fixes #5107
2021-03-15 16:55:56 +00:00
154 changed files with 2174 additions and 11399 deletions

View File

@@ -5,31 +5,19 @@ about: Report a problem with rclone
<!--
We understand you are having a problem with rclone; we want to help you with that!
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
**STOP and READ**
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
Please show the effort you've put in to solving the problem and please be specific.
People are volunteering their time to help! Low effort posts are not likely to get good answers!
If you think you might have found a bug, try to replicate it with the latest beta (or stable).
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
If you can still replicate it or just got a question then please use the rclone forum:
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
https://forum.rclone.org/
for a quick response instead of filing an issue on this repo.
instead of filing an issue for a quick response.
If nothing else helps, then please fill in the info below which helps us help you.
If you think you might have found a bug, please can you try to replicate it with the latest beta?
**DO NOT REDACT** any information except passwords/keys/personal info.
https://beta.rclone.org/
You should use 3 backticks to begin and end your paste to make it readable.
Make sure to include a log obtained with '-vv'.
You can also use '-vv --log-file bug.log' and a service such as https://pastebin.com or https://gist.github.com/
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
Thank you
@@ -37,11 +25,6 @@ The Rclone Developers
-->
#### The associated forum post URL from `https://forum.rclone.org`
#### What is the problem you are having with rclone?

View File

@@ -7,16 +7,12 @@ about: Suggest a new feature or enhancement for rclone
Welcome :-)
So you've got an idea to improve rclone? We love that!
You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
Probably the latest beta (or stable) release has your feature, so try to update your rclone.
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
Here is a checklist of things to do:
If it still isn't there, here is a checklist of things to do:
1. Search the old issues for your idea and +1 or comment on an existing issue if possible.
2. Discuss on the forum: https://forum.rclone.org/
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
2. Discuss on the forum first: https://forum.rclone.org/
3. Make a feature request issue (this is the right place!).
4. Be prepared to get involved making the feature :-)
@@ -27,10 +23,6 @@ The Rclone Developers
-->
#### The associated forum post URL from `https://forum.rclone.org`
#### What is your current rclone version (output from `rclone version`)?

View File

@@ -213,94 +213,46 @@ jobs:
# Deploy binaries if enabled in config && not a PR && not a fork
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
android:
timeout-minutes: 30
name: "android-all"
xgo:
timeout-minutes: 60
name: "xgo cross compile"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
# Upgrade together with NDK version
- name: Set up Go 1.14
uses: actions/setup-go@v1
uses: actions/checkout@v1
with:
go-version: 1.14
# Checkout into a fixed path to avoid import path problems on go < 1.11
path: ./src/github.com/rclone/rclone
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
- name: Force NDK version
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;21.4.7075529" | grep -v = || true
- name: Go module cache
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Set global environment variables
- name: Set environment variables
shell: bash
run: |
echo "VERSION=$(make version)" >> $GITHUB_ENV
echo 'GOPATH=${{ runner.workspace }}' >> $GITHUB_ENV
echo '${{ runner.workspace }}/bin' >> $GITHUB_PATH
- name: build native rclone
- name: Cross-compile rclone
run: |
docker pull billziss/xgo-cgofuse
GO111MODULE=off go get -v github.com/karalabe/xgo # don't add to go.mod
# xgo \
# -image=billziss/xgo-cgofuse \
# -targets=darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
# -tags cmount \
# -dest build \
# .
xgo \
-image=billziss/xgo-cgofuse \
-targets=android/* \
-dest build \
.
- name: Build rclone
shell: bash
run: |
make
- name: arm-v7a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm' >> $GITHUB_ENV
echo 'GOARM=7' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm-v7a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
- name: arm64-v8a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm64-v8a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
- name: x86 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x86 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
- name: x64 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x64 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
- name: Upload artifacts
run: |
make ci_upload

View File

@@ -33,11 +33,10 @@ page](https://github.com/rclone/rclone).
Now in your terminal
git clone https://github.com/rclone/rclone.git
cd rclone
go get -u github.com/rclone/rclone
cd $GOPATH/src/github.com/rclone/rclone
git remote rename origin upstream
git remote add origin git@github.com:YOURUSER/rclone.git
go build
Make a branch to add your new feature

1277
MANUAL.html generated

File diff suppressed because it is too large Load Diff

1727
MANUAL.md generated

File diff suppressed because it is too large Load Diff

1849
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -187,10 +187,10 @@ upload_github:
./bin/upload-github $(TAG)
cross: doc
go run bin/cross-compile.go -release current $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
go run bin/cross-compile.go -release current $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
beta:
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
go run bin/cross-compile.go $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
@@ -198,7 +198,7 @@ log_since_last_release:
git log $(LAST_TAG)..
compile_all:
go run bin/cross-compile.go -compile-only $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
ci_upload:
sudo chown -R $$USER build

View File

@@ -76,24 +76,6 @@ Now
The rclone docker image should autobuild on via GitHub actions. If it doesn't
or needs to be updated then rebuild like this.
See: https://github.com/ilteoood/docker_buildx/issues/19
See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
```
git co v1.54.1
docker pull golang
export DOCKER_CLI_EXPERIMENTAL=enabled
docker buildx create --name actions_builder --use
docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
echo "Supported platforms: $SUPPORTED_PLATFORMS"
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
docker buildx stop actions_builder
```
### Old build for linux/amd64 only
```
docker pull golang
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .

View File

@@ -1 +1 @@
v1.56.0
v1.55.0

View File

@@ -217,23 +217,6 @@ This option controls how often unused buffers will be removed from the pool.`,
encoder.EncodeDel |
encoder.EncodeBackSlash |
encoder.EncodeRightPeriod),
}, {
Name: "public_access",
Help: "Public access level of a container: blob, container.",
Default: string(azblob.PublicAccessNone),
Examples: []fs.OptionExample{
{
Value: string(azblob.PublicAccessNone),
Help: "The container and its blobs can be accessed only with an authorized request. It's a default value",
}, {
Value: string(azblob.PublicAccessBlob),
Help: "Blob data within this container can be read via anonymous request.",
}, {
Value: string(azblob.PublicAccessContainer),
Help: "Allow full public read access for container and blob data.",
},
},
Advanced: true,
}},
})
}
@@ -258,7 +241,6 @@ type Options struct {
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"`
PublicAccess string `config:"public_access"`
}
// Fs represents a remote azure server
@@ -280,7 +262,6 @@ type Fs struct {
imdsPacer *fs.Pacer // Same but for IMDS
uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
publicAccess azblob.PublicAccessType // Container Public Access Level
}
// Object describes an azure object
@@ -354,19 +335,6 @@ func validateAccessTier(tier string) bool {
}
}
// validatePublicAccess checks if azureblob supports use supplied public access level
func validatePublicAccess(publicAccess string) bool {
switch publicAccess {
case string(azblob.PublicAccessNone),
string(azblob.PublicAccessBlob),
string(azblob.PublicAccessContainer):
// valid cases
return true
default:
return false
}
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
401, // Unauthorized (e.g. "Token has expired")
@@ -534,11 +502,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
}
if !validatePublicAccess((opt.PublicAccess)) {
return nil, errors.Errorf("Azure Blob: Supported public access level are %s and %s",
string(azblob.PublicAccessBlob), string(azblob.PublicAccessContainer))
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
@@ -557,7 +520,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt.MemoryPoolUseMmap,
),
}
f.publicAccess = azblob.PublicAccessType(opt.PublicAccess)
f.imdsPacer.SetRetries(5) // per IMDS documentation
f.setRoot(root)
f.features = (&fs.Features{
@@ -1122,7 +1084,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
}
// now try to create the container
return f.pacer.Call(func() (bool, error) {
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, f.publicAccess)
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
if err != nil {
if storageErr, ok := err.(azblob.StorageError); ok {
switch storageErr.ServiceCode() {

View File

@@ -1744,13 +1744,6 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
ContentType: resp.Header.Get("Content-Type"),
Info: Info,
}
// When reading files from B2 via cloudflare using
// --b2-download-url cloudflare strips the Content-Length
// headers (presumably so it can inject stuff) so use the old
// length read from the listing.
if info.Size < 0 {
info.Size = o.size
}
return resp, info, nil
}

View File

@@ -323,7 +323,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
}
authRetry := false
if resp != nil && resp.StatusCode == 401 && strings.Contains(resp.Header.Get("Www-Authenticate"), "expired_token") {
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
authRetry = true
fs.Debugf(nil, "Should retry: %v", err)
}

View File

@@ -277,10 +277,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
return nil, errors.New("can't point remote at itself - check the value of the remote setting")
}
baseName, basePath, err := fspath.SplitFs(remote)
baseName, basePath, err := fspath.Parse(remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
}
if baseName != "" {
baseName += ":"
}
// Look for a file first
remotePath := fspath.JoinRootPath(basePath, rpath)
baseFs, err := cache.Get(ctx, baseName+remotePath)

View File

@@ -404,8 +404,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil {
return nil, errors.Wrap(err, "failed to read destination hash")
}
if srcHash != "" && dstHash != "" {
if srcHash != dstHash {
if srcHash != "" && dstHash != "" && srcHash != dstHash {
// remove object
err = o.Remove(ctx)
if err != nil {
@@ -413,8 +412,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
}
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
}
fs.Debugf(src, "%v = %s OK", ht, srcHash)
}
}
return f.newObject(o), nil

View File

@@ -641,10 +641,7 @@ func (f *Fs) Features() *fs.Features {
}
// shouldRetry determines whether a given err rates being retried
func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func (f *Fs) shouldRetry(err error) (bool, error) {
if err == nil {
return false, nil
}
@@ -698,20 +695,20 @@ func containsString(slice []string, s string) bool {
}
// getFile returns drive.File for the ID passed and fields passed in
func (f *Fs) getFile(ctx context.Context, ID string, fields googleapi.Field) (info *drive.File, err error) {
func (f *Fs) getFile(ID string, fields googleapi.Field) (info *drive.File, err error) {
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Get(ID).
Fields(fields).
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
Do()
return f.shouldRetry(err)
})
return info, err
}
// getRootID returns the canonical ID for the "root" ID
func (f *Fs) getRootID(ctx context.Context) (string, error) {
info, err := f.getFile(ctx, "root", "id")
func (f *Fs) getRootID() (string, error) {
info, err := f.getFile("root", "id")
if err != nil {
return "", errors.Wrap(err, "couldn't find root directory ID")
}
@@ -817,7 +814,7 @@ OUTER:
var files *drive.FileList
err = f.pacer.Call(func() (bool, error) {
files, err = list.Fields(googleapi.Field(fields)).Context(ctx).Do()
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return false, errors.Wrap(err, "couldn't list directory")
@@ -840,7 +837,7 @@ OUTER:
if filesOnly && item.ShortcutDetails.TargetMimeType == driveFolderType {
continue
}
item, err = f.resolveShortcut(ctx, item)
item, err = f.resolveShortcut(item)
if err != nil {
return false, errors.Wrap(err, "list")
}
@@ -858,7 +855,7 @@ OUTER:
if !found {
continue
}
_, exportName, _, _ := f.findExportFormat(ctx, item)
_, exportName, _, _ := f.findExportFormat(item)
if exportName == "" || exportName != title {
continue
}
@@ -1158,7 +1155,7 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
f.rootFolderID = f.opt.TeamDriveID
} else {
// otherwise look up the actual root ID
rootID, err := f.getRootID(ctx)
rootID, err := f.getRootID()
if err != nil {
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
// 404 means that this scope does not have permission to get the
@@ -1331,26 +1328,26 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
// newObjectWithInfo creates an fs.Object for any drive.File
//
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.File) (fs.Object, error) {
func (f *Fs) newObjectWithInfo(remote string, info *drive.File) (fs.Object, error) {
// If item has MD5 sum or a length it is a file stored on drive
if info.Md5Checksum != "" || info.Size > 0 {
return f.newRegularObject(remote, info), nil
}
extension, exportName, exportMimeType, isDocument := f.findExportFormat(ctx, info)
return f.newObjectWithExportInfo(ctx, remote, info, extension, exportName, exportMimeType, isDocument)
extension, exportName, exportMimeType, isDocument := f.findExportFormat(info)
return f.newObjectWithExportInfo(remote, info, extension, exportName, exportMimeType, isDocument)
}
// newObjectWithExportInfo creates an fs.Object for any drive.File and the result of findExportFormat
//
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
func (f *Fs) newObjectWithExportInfo(
ctx context.Context, remote string, info *drive.File,
remote string, info *drive.File,
extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
// Note that resolveShortcut will have been called already if
// we are being called from a listing. However the drive.Item
// will have been resolved so this will do nothing.
info, err = f.resolveShortcut(ctx, info)
info, err = f.resolveShortcut(info)
if err != nil {
return nil, errors.Wrap(err, "new object")
}
@@ -1398,7 +1395,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
}
remote = remote[:len(remote)-len(extension)]
obj, err := f.newObjectWithExportInfo(ctx, remote, info, extension, exportName, exportMimeType, isDocument)
obj, err := f.newObjectWithExportInfo(remote, info, extension, exportName, exportMimeType, isDocument)
switch {
case err != nil:
return nil, err
@@ -1415,7 +1412,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
pathID = actualID(pathID)
found, err = f.list(ctx, []string{pathID}, leaf, true, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
if !f.opt.SkipGdocs {
_, exportName, _, isDocument := f.findExportFormat(ctx, item)
_, exportName, _, isDocument := f.findExportFormat(item)
if exportName == leaf {
pathIDOut = item.Id
return true
@@ -1450,8 +1447,8 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
info, err = f.svc.Files.Create(createInfo).
Fields("id").
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
Do()
return f.shouldRetry(err)
})
if err != nil {
return "", err
@@ -1486,15 +1483,15 @@ func linkTemplate(mt string) *template.Template {
})
return _linkTemplates[mt]
}
func (f *Fs) fetchFormats(ctx context.Context) {
func (f *Fs) fetchFormats() {
fetchFormatsOnce.Do(func() {
var about *drive.About
var err error
err = f.pacer.Call(func() (bool, error) {
about, err = f.svc.About.Get().
Fields("exportFormats,importFormats").
Context(ctx).Do()
return f.shouldRetry(ctx, err)
Do()
return f.shouldRetry(err)
})
if err != nil {
fs.Errorf(f, "Failed to get Drive exportFormats and importFormats: %v", err)
@@ -1511,8 +1508,8 @@ func (f *Fs) fetchFormats(ctx context.Context) {
// if necessary.
//
// if the fetch fails then it will not export any drive formats
func (f *Fs) exportFormats(ctx context.Context) map[string][]string {
f.fetchFormats(ctx)
func (f *Fs) exportFormats() map[string][]string {
f.fetchFormats()
return _exportFormats
}
@@ -1520,8 +1517,8 @@ func (f *Fs) exportFormats(ctx context.Context) map[string][]string {
// if necessary.
//
// if the fetch fails then it will not import any drive formats
func (f *Fs) importFormats(ctx context.Context) map[string][]string {
f.fetchFormats(ctx)
func (f *Fs) importFormats() map[string][]string {
f.fetchFormats()
return _importFormats
}
@@ -1530,9 +1527,9 @@ func (f *Fs) importFormats(ctx context.Context) map[string][]string {
//
// Look through the exportExtensions and find the first format that can be
// converted. If none found then return ("", "", false)
func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string) (
func (f *Fs) findExportFormatByMimeType(itemMimeType string) (
extension, mimeType string, isDocument bool) {
exportMimeTypes, isDocument := f.exportFormats(ctx)[itemMimeType]
exportMimeTypes, isDocument := f.exportFormats()[itemMimeType]
if isDocument {
for _, _extension := range f.exportExtensions {
_mimeType := mime.TypeByExtension(_extension)
@@ -1559,8 +1556,8 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
//
// Look through the exportExtensions and find the first format that can be
// converted. If none found then return ("", "", "", false)
func (f *Fs) findExportFormat(ctx context.Context, item *drive.File) (extension, filename, mimeType string, isDocument bool) {
extension, mimeType, isDocument = f.findExportFormatByMimeType(ctx, item.MimeType)
func (f *Fs) findExportFormat(item *drive.File) (extension, filename, mimeType string, isDocument bool) {
extension, mimeType, isDocument = f.findExportFormatByMimeType(item.MimeType)
if extension != "" {
filename = item.Name + extension
}
@@ -1572,9 +1569,9 @@ func (f *Fs) findExportFormat(ctx context.Context, item *drive.File) (extension,
// MIME type is returned
//
// When no match is found "" is returned.
func (f *Fs) findImportFormat(ctx context.Context, mimeType string) string {
func (f *Fs) findImportFormat(mimeType string) string {
mimeType = fixMimeType(mimeType)
ifs := f.importFormats(ctx)
ifs := f.importFormats()
for _, mt := range f.importMimeTypes {
if mt == mimeType {
importMimeTypes := ifs[mimeType]
@@ -1607,7 +1604,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
var iErr error
_, err = f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
entry, err := f.itemToDirEntry(ctx, path.Join(dir, item.Name), item)
entry, err := f.itemToDirEntry(path.Join(dir, item.Name), item)
if err != nil {
iErr = err
return true
@@ -1720,7 +1717,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
}
}
remote := path.Join(paths[i], item.Name)
entry, err := f.itemToDirEntry(ctx, remote, item)
entry, err := f.itemToDirEntry(remote, item)
if err != nil {
iErr = err
return true
@@ -1985,7 +1982,7 @@ func isShortcut(item *drive.File) bool {
// Note that we assume shortcuts can't point to shortcuts. Google
// drive web interface doesn't offer the option to create a shortcut
// to a shortcut. The documentation is silent on the issue.
func (f *Fs) resolveShortcut(ctx context.Context, item *drive.File) (newItem *drive.File, err error) {
func (f *Fs) resolveShortcut(item *drive.File) (newItem *drive.File, err error) {
if f.opt.SkipShortcuts || item.MimeType != shortcutMimeType {
return item, nil
}
@@ -1993,7 +1990,7 @@ func (f *Fs) resolveShortcut(ctx context.Context, item *drive.File) (newItem *dr
fs.Errorf(nil, "Expecting shortcutDetails in %v", item)
return item, nil
}
newItem, err = f.getFile(ctx, item.ShortcutDetails.TargetId, f.fileFields)
newItem, err = f.getFile(item.ShortcutDetails.TargetId, f.fileFields)
if err != nil {
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
// 404 means dangling shortcut, so just return the shortcut with the mime type mangled
@@ -2015,7 +2012,7 @@ func (f *Fs) resolveShortcut(ctx context.Context, item *drive.File) (newItem *dr
// itemToDirEntry converts a drive.File to an fs.DirEntry.
// When the drive.File cannot be represented as an fs.DirEntry
// (nil, nil) is returned.
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *drive.File) (entry fs.DirEntry, err error) {
func (f *Fs) itemToDirEntry(remote string, item *drive.File) (entry fs.DirEntry, err error) {
switch {
case item.MimeType == driveFolderType:
// cache the directory ID for later lookups
@@ -2029,7 +2026,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *drive.File
case f.opt.AuthOwnerOnly && !isAuthOwned(item):
// ignore object
default:
entry, err = f.newObjectWithInfo(ctx, remote, item)
entry, err = f.newObjectWithInfo(remote, item)
if err == fs.ErrorObjectNotFound {
return nil, nil
}
@@ -2096,12 +2093,12 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
importMimeType := ""
if f.importMimeTypes != nil && !f.opt.SkipGdocs {
importMimeType = f.findImportFormat(ctx, srcMimeType)
importMimeType = f.findImportFormat(srcMimeType)
if isInternalMimeType(importMimeType) {
remote = remote[:len(remote)-len(srcExt)]
exportExt, _, _ = f.findExportFormatByMimeType(ctx, importMimeType)
exportExt, _, _ = f.findExportFormatByMimeType(importMimeType)
if exportExt == "" {
return nil, errors.Errorf("No export format found for %q", importMimeType)
}
@@ -2131,8 +2128,8 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(f.opt.KeepRevisionForever).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
Do()
return f.shouldRetry(err)
})
if err != nil {
return nil, err
@@ -2144,7 +2141,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
return nil, err
}
}
return f.newObjectWithInfo(ctx, remote, info)
return f.newObjectWithInfo(remote, info)
}
// MergeDirs merges the contents of all the directories passed
@@ -2186,8 +2183,8 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
AddParents(dstDir.ID()).
Fields("").
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
Do()
return f.shouldRetry(err)
})
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir)
@@ -2220,14 +2217,14 @@ func (f *Fs) delete(ctx context.Context, id string, useTrash bool) error {
_, err = f.svc.Files.Update(id, &info).
Fields("").
SupportsAllDrives(true).
Context(ctx).Do()
Do()
} else {
err = f.svc.Files.Delete(id).
Fields("").
SupportsAllDrives(true).
Context(ctx).Do()
Do()
}
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
}
@@ -2340,12 +2337,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
if isDoc {
// preserve the description on copy for docs
info, err := f.getFile(ctx, actualID(srcObj.id), "description")
info, err := f.getFile(actualID(srcObj.id), "description")
if err != nil {
fs.Errorf(srcObj, "Failed to read description for Google Doc: %v", err)
} else {
createInfo.Description = info.Description
return nil, errors.Wrap(err, "failed to read description for Google Doc")
}
createInfo.Description = info.Description
} else {
// don't overwrite the description on copy for files
// this should work for docs but it doesn't - it is probably a bug in Google Drive
@@ -2361,13 +2357,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(f.opt.KeepRevisionForever).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
Do()
return f.shouldRetry(err)
})
if err != nil {
return nil, err
}
newObject, err := f.newObjectWithInfo(ctx, remote, info)
newObject, err := f.newObjectWithInfo(remote, info)
if err != nil {
return nil, err
}
@@ -2461,7 +2457,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
}
err := f.pacer.Call(func() (bool, error) {
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
@@ -2479,7 +2475,7 @@ func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
var td *drive.Drive
err = f.pacer.Call(func() (bool, error) {
td, err = f.svc.Drives.Get(f.opt.TeamDriveID).Fields("name,id,capabilities,createdTime,restrictions").Context(ctx).Do()
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "failed to get Shared Drive info")
@@ -2502,7 +2498,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var err error
err = f.pacer.Call(func() (bool, error) {
about, err = f.svc.About.Get().Fields("storageQuota").Context(ctx).Do()
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get Drive storageQuota")
@@ -2574,14 +2570,14 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
AddParents(dstParents).
Fields(partialFields).
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
Do()
return f.shouldRetry(err)
})
if err != nil {
return nil, err
}
return f.newObjectWithInfo(ctx, remote, info)
return f.newObjectWithInfo(remote, info)
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
@@ -2611,8 +2607,8 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
_, err = f.svc.Permissions.Create(id, permission).
Fields("").
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
Do()
return f.shouldRetry(err)
})
if err != nil {
return "", err
@@ -2654,8 +2650,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
AddParents(dstDirectoryID).
Fields("").
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
Do()
return f.shouldRetry(err)
})
if err != nil {
return err
@@ -2673,7 +2669,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
go func() {
// get the StartPageToken early so all changes from now on get processed
startPageToken, err := f.changeNotifyStartPageToken(ctx)
startPageToken, err := f.changeNotifyStartPageToken()
if err != nil {
fs.Infof(f, "Failed to get StartPageToken: %s", err)
}
@@ -2698,7 +2694,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
}
case <-tickerC:
if startPageToken == "" {
startPageToken, err = f.changeNotifyStartPageToken(ctx)
startPageToken, err = f.changeNotifyStartPageToken()
if err != nil {
fs.Infof(f, "Failed to get StartPageToken: %s", err)
continue
@@ -2713,15 +2709,15 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
}
}()
}
func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (pageToken string, err error) {
func (f *Fs) changeNotifyStartPageToken() (pageToken string, err error) {
var startPageToken *drive.StartPageToken
err = f.pacer.Call(func() (bool, error) {
changes := f.svc.Changes.GetStartPageToken().SupportsAllDrives(true)
if f.isTeamDrive {
changes.DriveId(f.opt.TeamDriveID)
}
startPageToken, err = changes.Context(ctx).Do()
return f.shouldRetry(ctx, err)
startPageToken, err = changes.Do()
return f.shouldRetry(err)
})
if err != nil {
return
@@ -2750,7 +2746,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
changesCall.Spaces("appDataFolder")
}
changeList, err = changesCall.Context(ctx).Do()
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return
@@ -2946,8 +2942,8 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(dstFs.opt.KeepRevisionForever).
Context(ctx).Do()
return dstFs.shouldRetry(ctx, err)
Do()
return dstFs.shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "shortcut creation failed")
@@ -2955,24 +2951,24 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
if isDir {
return nil, nil
}
return dstFs.newObjectWithInfo(ctx, dstPath, info)
return dstFs.newObjectWithInfo(dstPath, info)
}
// List all team drives
func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.Drive, err error) {
drives = []*drive.Drive{}
listTeamDrives := f.svc.Drives.List().PageSize(100)
func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.TeamDrive, err error) {
drives = []*drive.TeamDrive{}
listTeamDrives := f.svc.Teamdrives.List().PageSize(100)
var defaultFs Fs // default Fs with default Options
for {
var teamDrives *drive.DriveList
var teamDrives *drive.TeamDriveList
err = f.pacer.Call(func() (bool, error) {
teamDrives, err = listTeamDrives.Context(ctx).Do()
return defaultFs.shouldRetry(ctx, err)
return defaultFs.shouldRetry(err)
})
if err != nil {
return drives, errors.Wrap(err, "listing Team Drives failed")
}
drives = append(drives, teamDrives.Drives...)
drives = append(drives, teamDrives.TeamDrives...)
if teamDrives.NextPageToken == "" {
break
}
@@ -3009,8 +3005,8 @@ func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurs
_, err := f.svc.Files.Update(item.Id, &update).
SupportsAllDrives(true).
Fields("trashed").
Context(ctx).Do()
return f.shouldRetry(ctx, err)
Do()
return f.shouldRetry(err)
})
if err != nil {
err = errors.Wrap(err, "failed to restore")
@@ -3052,7 +3048,7 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
// copy file with id to dest
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
info, err := f.getFile(ctx, id, f.fileFields)
info, err := f.getFile(id, f.fileFields)
if err != nil {
return errors.Wrap(err, "couldn't find id")
}
@@ -3060,7 +3056,7 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
return errors.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
}
info.Name = f.opt.Enc.ToStandardName(info.Name)
o, err := f.newObjectWithInfo(ctx, info.Name, info)
o, err := f.newObjectWithInfo(info.Name, info)
if err != nil {
return err
}
@@ -3069,7 +3065,7 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
return err
}
if destLeaf == "" {
destLeaf = path.Base(o.Remote())
destLeaf = info.Name
}
if destDir == "" {
destDir = "."
@@ -3361,7 +3357,7 @@ func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
found, err := f.list(ctx, []string{directoryID}, leaf, false, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
if !f.opt.SkipGdocs {
extension, exportName, exportMimeType, isDocument = f.findExportFormat(ctx, item)
extension, exportName, exportMimeType, isDocument = f.findExportFormat(item)
if exportName == leaf {
info = item
return true
@@ -3412,8 +3408,8 @@ func (o *baseObject) SetModTime(ctx context.Context, modTime time.Time) error {
info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo).
Fields(partialFields).
SupportsAllDrives(true).
Context(ctx).Do()
return o.fs.shouldRetry(ctx, err)
Do()
return o.fs.shouldRetry(err)
})
if err != nil {
return err
@@ -3451,7 +3447,7 @@ func (o *baseObject) httpResponse(ctx context.Context, url, method string, optio
_ = res.Body.Close() // ignore error
}
}
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
})
if err != nil {
return req, nil, err
@@ -3543,8 +3539,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
v2File, err = o.fs.v2Svc.Files.Get(actualID(o.id)).
Fields("downloadUrl").
SupportsAllDrives(true).
Context(ctx).Do()
return o.fs.shouldRetry(ctx, err)
Do()
return o.fs.shouldRetry(err)
})
if err == nil {
fs.Debugf(o, "Using v2 download: %v", v2File.DownloadUrl)
@@ -3624,8 +3620,8 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(o.fs.opt.KeepRevisionForever).
Context(ctx).Do()
return o.fs.shouldRetry(ctx, err)
Do()
return o.fs.shouldRetry(err)
})
return
}
@@ -3668,7 +3664,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return err
}
newO, err := o.fs.newObjectWithInfo(ctx, src.Remote(), info)
newO, err := o.fs.newObjectWithInfo(src.Remote(), info)
if err != nil {
return err
}
@@ -3692,7 +3688,7 @@ func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.Object
if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs {
return errors.Errorf("can't update google document type without --drive-import-formats")
}
importMimeType = o.fs.findImportFormat(ctx, updateInfo.MimeType)
importMimeType = o.fs.findImportFormat(updateInfo.MimeType)
if importMimeType == "" {
return errors.Errorf("no import format found for %q", srcMimeType)
}
@@ -3709,7 +3705,7 @@ func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.Object
remote := src.Remote()
remote = remote[:len(remote)-o.extLen]
newO, err := o.fs.newObjectWithInfo(ctx, remote, info)
newO, err := o.fs.newObjectWithInfo(remote, info)
if err != nil {
return err
}

View File

@@ -111,7 +111,6 @@ func TestInternalParseExtensions(t *testing.T) {
}
func TestInternalFindExportFormat(t *testing.T) {
ctx := context.Background()
item := &drive.File{
Name: "file",
MimeType: "application/vnd.google-apps.document",
@@ -129,7 +128,7 @@ func TestInternalFindExportFormat(t *testing.T) {
} {
f := new(Fs)
f.exportExtensions = test.extensions
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(ctx, item)
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
assert.Equal(t, test.wantExtension, gotExtension)
if test.wantExtension != "" {
assert.Equal(t, item.Name+gotExtension, gotFilename)

View File

@@ -94,7 +94,7 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
defer googleapi.CloseBody(res)
err = googleapi.CheckResponse(res)
}
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return nil, err
@@ -202,7 +202,7 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
err = rx.f.pacer.Call(func() (bool, error) {
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
again, err := rx.f.shouldRetry(ctx, err)
again, err := rx.f.shouldRetry(err)
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
again = false
err = nil

View File

@@ -310,7 +310,7 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
switch e := err.(type) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {
fs.Logf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
}
return true, err
@@ -1084,30 +1084,13 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
createArg := sharing.CreateSharedLinkWithSettingsArg{
Path: absPath,
Settings: &sharing.SharedLinkSettings{
RequestedVisibility: &sharing.RequestedVisibility{
Tagged: dropbox.Tagged{Tag: sharing.RequestedVisibilityPublic},
},
Audience: &sharing.LinkAudience{
Tagged: dropbox.Tagged{Tag: sharing.LinkAudiencePublic},
},
Access: &sharing.RequestedLinkAccessLevel{
Tagged: dropbox.Tagged{Tag: sharing.RequestedLinkAccessLevelViewer},
},
},
// FIXME this gives settings_error/not_authorized/.. errors
// and the expires setting isn't in the documentation so remove
// for now.
// Settings: &sharing.SharedLinkSettings{
// Expires: time.Now().Add(time.Duration(expire)).UTC().Round(time.Second),
// },
}
if expire < fs.DurationOff {
expiryTime := time.Now().Add(time.Duration(expire)).UTC().Round(time.Second)
createArg.Settings.Expires = expiryTime
}
// FIXME note we can't set Settings for non enterprise dropbox
// because of https://github.com/dropbox/dropbox-sdk-go-unofficial/issues/75
// however this only goes wrong when we set Expires, so as a
// work-around remove Settings unless expire is set.
if expire == fs.DurationOff {
createArg.Settings = nil
}
var linkRes sharing.IsSharedLinkMetadata
err = f.pacer.Call(func() (bool, error) {
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)

View File

@@ -348,10 +348,8 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
return nil, err
}
if len(fileUploadResponse.Links) == 0 {
return nil, errors.New("upload response not found")
} else if len(fileUploadResponse.Links) > 1 {
fs.Debugf(remote, "Multiple upload responses found, using the first")
if len(fileUploadResponse.Links) != 1 {
return nil, errors.New("unexpected amount of files")
}
link := fileUploadResponse.Links[0]

View File

@@ -21,7 +21,6 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
@@ -34,12 +33,6 @@ var (
currentUser = env.CurrentUser()
)
const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -111,11 +104,6 @@ given, rclone will empty the connection pool.
Set to 0 to keep connections indefinitely.
`,
Advanced: true,
}, {
Name: "close_timeout",
Help: "Maximum time to wait for a response to close.",
Default: fs.Duration(60 * time.Second),
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -144,7 +132,6 @@ type Options struct {
DisableEPSV bool `config:"disable_epsv"`
DisableMLSD bool `config:"disable_mlsd"`
IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -164,7 +151,6 @@ type Fs struct {
drain *time.Timer // used to drain the pool when we stop using the connections
tokens *pacer.TokenDispenser
tlsConf *tls.Config
pacer *fs.Pacer // pacer for FTP connections
}
// Object describes an FTP file
@@ -258,24 +244,8 @@ func (d *dialCtx) dial(network, address string) (net.Conn, error) {
return conn, err
}
// shouldRetry returns a boolean as to whether this err deserve to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusNotAvailable:
return true, err
}
}
return fserrors.ShouldRetry(err), err
}
// Open a new connection to the FTP server.
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
func (f *Fs) ftpConnection(ctx context.Context) (*ftp.ServerConn, error) {
fs.Debugf(f, "Connecting to FTP server")
dCtx := dialCtx{f, ctx}
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dCtx.dial)}
@@ -297,22 +267,18 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
}
err = f.pacer.Call(func() (bool, error) {
c, err = ftp.Dial(f.dialAddr, ftpConfig...)
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
if err != nil {
return shouldRetry(ctx, err)
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
return nil, errors.Wrap(err, "ftpConnection Dial")
}
err = c.Login(f.user, f.pass)
if err != nil {
_ = c.Quit()
return shouldRetry(ctx, err)
fs.Errorf(f, "Error while Logging in into %s: %s", f.dialAddr, err)
return nil, errors.Wrap(err, "ftpConnection Login")
}
return false, nil
})
if err != nil {
err = errors.Wrapf(err, "failed to make FTP connection to %q", f.dialAddr)
}
return c, err
return c, nil
}
// Get an FTP connection from the pool, or open a new one
@@ -445,7 +411,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency),
tlsConf: tlsConfig,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
@@ -966,8 +931,8 @@ func (f *ftpReadCloser) Close() error {
go func() {
errchan <- f.rc.Close()
}()
// Wait for Close for up to 60 seconds by default
timer := time.NewTimer(time.Duration(f.f.opt.CloseTimeout))
// Wait for Close for up to 60 seconds
timer := time.NewTimer(60 * time.Second)
select {
case err = <-errchan:
timer.Stop()

View File

@@ -44,7 +44,6 @@ func init() {
Options: []fs.Option{{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
Advanced: runtime.GOOS != "windows",
Examples: []fs.OptionExample{{
Value: "true",
Help: "Disables long file names",
@@ -1145,10 +1144,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
err = file.PreAllocate(src.Size(), f)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
if err == file.ErrDiskFull {
_ = f.Close()
return err
}
}
}
out = f

View File

@@ -234,10 +234,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this response and err
// deserve to be retried. It returns the err as a convenience.
// Retries password authorization (once) in a special case of access denied.
func shouldRetry(ctx context.Context, res *http.Response, err error, f *Fs, opts *rest.Opts) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(res *http.Response, err error, f *Fs, opts *rest.Opts) (bool, error) {
if res != nil && res.StatusCode == 403 && f.opt.Password != "" && !f.passFailed {
reAuthErr := f.reAuthorize(opts, err)
return reAuthErr == nil, err // return an original error
@@ -603,7 +600,7 @@ func (f *Fs) readItemMetaData(ctx context.Context, path string) (entry fs.DirEnt
var info api.ItemInfoResponse
err = f.pacer.Call(func() (bool, error) {
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(ctx, res, err, f, &opts)
return shouldRetry(res, err, f, &opts)
})
if err != nil {
@@ -739,7 +736,7 @@ func (f *Fs) listM1(ctx context.Context, dirPath string, offset int, limit int)
)
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(ctx, res, err, f, &opts)
return shouldRetry(res, err, f, &opts)
})
if err != nil {
@@ -803,7 +800,7 @@ func (f *Fs) listBin(ctx context.Context, dirPath string, depth int) (entries fs
var res *http.Response
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err, f, &opts)
return shouldRetry(res, err, f, &opts)
})
if err != nil {
closeBody(res)
@@ -1076,7 +1073,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) error {
var res *http.Response
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err, f, &opts)
return shouldRetry(res, err, f, &opts)
})
if err != nil {
closeBody(res)
@@ -1219,7 +1216,7 @@ func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) error {
var response api.GenericResponse
err = f.pacer.Call(func() (bool, error) {
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
return shouldRetry(ctx, res, err, f, &opts)
return shouldRetry(res, err, f, &opts)
})
switch {
@@ -1291,7 +1288,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var response api.GenericBodyResponse
err = f.pacer.Call(func() (bool, error) {
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
return shouldRetry(ctx, res, err, f, &opts)
return shouldRetry(res, err, f, &opts)
})
if err != nil {
@@ -1395,7 +1392,7 @@ func (f *Fs) moveItemBin(ctx context.Context, srcPath, dstPath, opName string) e
var res *http.Response
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err, f, &opts)
return shouldRetry(res, err, f, &opts)
})
if err != nil {
closeBody(res)
@@ -1486,7 +1483,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var response api.GenericBodyResponse
err = f.pacer.Call(func() (bool, error) {
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
return shouldRetry(ctx, res, err, f, &opts)
return shouldRetry(res, err, f, &opts)
})
if err == nil && response.Body != "" {
@@ -1527,7 +1524,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
var response api.CleanupResponse
err = f.pacer.Call(func() (bool, error) {
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
return shouldRetry(ctx, res, err, f, &opts)
return shouldRetry(res, err, f, &opts)
})
if err != nil {
return err
@@ -1560,7 +1557,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var info api.UserInfoResponse
err = f.pacer.Call(func() (bool, error) {
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(ctx, res, err, f, &opts)
return shouldRetry(res, err, f, &opts)
})
if err != nil {
return nil, err
@@ -2079,7 +2076,7 @@ func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err, o.fs, &opts)
return shouldRetry(res, err, o.fs, &opts)
})
if err != nil {
closeBody(res)
@@ -2175,7 +2172,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
opts.RootURL = server
res, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err, o.fs, &opts)
return shouldRetry(res, err, o.fs, &opts)
})
if err != nil {
if res != nil && res.Body != nil {

View File

@@ -30,7 +30,6 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
@@ -159,10 +158,7 @@ func parsePath(path string) (root string) {
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(err error) (bool, error) {
// Let the mega library handle the low level retries
return false, err
/*
@@ -175,8 +171,8 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
}
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, remote string) (info *mega.Node, err error) {
rootNode, err := f.findRoot(ctx, false)
func (f *Fs) readMetaDataForPath(remote string) (info *mega.Node, err error) {
rootNode, err := f.findRoot(false)
if err != nil {
return nil, err
}
@@ -241,7 +237,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}).Fill(ctx, f)
// Find the root node and check if it is a file or not
_, err = f.findRoot(ctx, false)
_, err = f.findRoot(false)
switch err {
case nil:
// root node found and is a directory
@@ -311,8 +307,8 @@ func (f *Fs) findObject(rootNode *mega.Node, file string) (node *mega.Node, err
// lookupDir looks up the node for the directory of the name given
//
// if create is true it tries to create the root directory if not found
func (f *Fs) lookupDir(ctx context.Context, dir string) (*mega.Node, error) {
rootNode, err := f.findRoot(ctx, false)
func (f *Fs) lookupDir(dir string) (*mega.Node, error) {
rootNode, err := f.findRoot(false)
if err != nil {
return nil, err
}
@@ -320,15 +316,15 @@ func (f *Fs) lookupDir(ctx context.Context, dir string) (*mega.Node, error) {
}
// lookupParentDir finds the parent node for the remote passed in
func (f *Fs) lookupParentDir(ctx context.Context, remote string) (dirNode *mega.Node, leaf string, err error) {
func (f *Fs) lookupParentDir(remote string) (dirNode *mega.Node, leaf string, err error) {
parent, leaf := path.Split(remote)
dirNode, err = f.lookupDir(ctx, parent)
dirNode, err = f.lookupDir(parent)
return dirNode, leaf, err
}
// mkdir makes the directory and any parent directories for the
// directory of the name given
func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *mega.Node, err error) {
func (f *Fs) mkdir(rootNode *mega.Node, dir string) (node *mega.Node, err error) {
f.mkdirMu.Lock()
defer f.mkdirMu.Unlock()
@@ -362,7 +358,7 @@ func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *
// create directory called name in node
err = f.pacer.Call(func() (bool, error) {
node, err = f.srv.CreateDir(name, node)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "mkdir create node failed")
@@ -372,20 +368,20 @@ func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *
}
// mkdirParent creates the parent directory of remote
func (f *Fs) mkdirParent(ctx context.Context, remote string) (dirNode *mega.Node, leaf string, err error) {
rootNode, err := f.findRoot(ctx, true)
func (f *Fs) mkdirParent(remote string) (dirNode *mega.Node, leaf string, err error) {
rootNode, err := f.findRoot(true)
if err != nil {
return nil, "", err
}
parent, leaf := path.Split(remote)
dirNode, err = f.mkdir(ctx, rootNode, parent)
dirNode, err = f.mkdir(rootNode, parent)
return dirNode, leaf, err
}
// findRoot looks up the root directory node and returns it.
//
// if create is true it tries to create the root directory if not found
func (f *Fs) findRoot(ctx context.Context, create bool) (*mega.Node, error) {
func (f *Fs) findRoot(create bool) (*mega.Node, error) {
f.rootNodeMu.Lock()
defer f.rootNodeMu.Unlock()
@@ -407,7 +403,7 @@ func (f *Fs) findRoot(ctx context.Context, create bool) (*mega.Node, error) {
}
//..not found so create the root directory
f._rootNode, err = f.mkdir(ctx, absRoot, f.root)
f._rootNode, err = f.mkdir(absRoot, f.root)
return f._rootNode, err
}
@@ -437,7 +433,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
fs.Debugf(f, "Deleting trash %q", f.opt.Enc.ToStandardName(item.GetName()))
deleteErr := f.pacer.Call(func() (bool, error) {
err := f.srv.Delete(item, true)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if deleteErr != nil {
err = deleteErr
@@ -451,7 +447,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *mega.Node) (fs.Object, error) {
func (f *Fs) newObjectWithInfo(remote string, info *mega.Node) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
@@ -461,7 +457,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *mega.No
// Set info
err = o.setMetaData(info)
} else {
err = o.readMetaData(ctx) // reads info and meta, returning an error
err = o.readMetaData() // reads info and meta, returning an error
}
if err != nil {
return nil, err
@@ -472,7 +468,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *mega.No
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
return f.newObjectWithInfo(remote, nil)
}
// list the objects into the function supplied
@@ -510,7 +506,7 @@ func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, e
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
dirNode, err := f.lookupDir(ctx, dir)
dirNode, err := f.lookupDir(dir)
if err != nil {
return nil, err
}
@@ -522,7 +518,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
d := fs.NewDir(remote, info.GetTimeStamp()).SetID(info.GetHash())
entries = append(entries, d)
case mega.FILE:
o, err := f.newObjectWithInfo(ctx, remote, info)
o, err := f.newObjectWithInfo(remote, info)
if err != nil {
iErr = err
return true
@@ -546,8 +542,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Returns the dirNode, object, leaf and error
//
// Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
dirNode, leaf, err = f.mkdirParent(ctx, remote)
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
dirNode, leaf, err = f.mkdirParent(remote)
if err != nil {
return nil, nil, leaf, err
}
@@ -569,7 +565,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
switch err {
case nil:
return existingObj, existingObj.Update(ctx, in, src, options...)
@@ -595,7 +591,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
size := src.Size()
modTime := src.ModTime(ctx)
o, _, _, err := f.createObject(ctx, remote, modTime, size)
o, _, _, err := f.createObject(remote, modTime, size)
if err != nil {
return nil, err
}
@@ -604,30 +600,30 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
// Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
rootNode, err := f.findRoot(ctx, true)
rootNode, err := f.findRoot(true)
if err != nil {
return err
}
_, err = f.mkdir(ctx, rootNode, dir)
_, err = f.mkdir(rootNode, dir)
return errors.Wrap(err, "Mkdir failed")
}
// deleteNode removes a file or directory, observing useTrash
func (f *Fs) deleteNode(ctx context.Context, node *mega.Node) (err error) {
func (f *Fs) deleteNode(node *mega.Node) (err error) {
err = f.pacer.Call(func() (bool, error) {
err = f.srv.Delete(node, f.opt.HardDelete)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
return err
}
// purgeCheck removes the directory dir, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
func (f *Fs) purgeCheck(dir string, check bool) error {
f.mkdirMu.Lock()
defer f.mkdirMu.Unlock()
rootNode, err := f.findRoot(ctx, false)
rootNode, err := f.findRoot(false)
if err != nil {
return err
}
@@ -648,7 +644,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
waitEvent := f.srv.WaitEventsStart()
err = f.deleteNode(ctx, dirNode)
err = f.deleteNode(dirNode)
if err != nil {
return errors.Wrap(err, "delete directory node failed")
}
@@ -666,7 +662,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, true)
return f.purgeCheck(dir, true)
}
// Precision return the precision of this Fs
@@ -680,13 +676,13 @@ func (f *Fs) Precision() time.Duration {
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
return f.purgeCheck(dir, false)
}
// move a file or folder (srcFs, srcRemote, info) to (f, dstRemote)
//
// info will be updates
func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node) (err error) {
func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node) (err error) {
var (
dstFs = f
srcDirNode, dstDirNode *mega.Node
@@ -696,12 +692,12 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
if dstRemote != "" {
// lookup or create the destination parent directory
dstDirNode, dstLeaf, err = dstFs.mkdirParent(ctx, dstRemote)
dstDirNode, dstLeaf, err = dstFs.mkdirParent(dstRemote)
} else {
// find or create the parent of the root directory
absRoot := dstFs.srv.FS.GetRoot()
dstParent, dstLeaf = path.Split(dstFs.root)
dstDirNode, err = dstFs.mkdir(ctx, absRoot, dstParent)
dstDirNode, err = dstFs.mkdir(absRoot, dstParent)
}
if err != nil {
return errors.Wrap(err, "server-side move failed to make dst parent dir")
@@ -709,7 +705,7 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
if srcRemote != "" {
// lookup the existing parent directory
srcDirNode, srcLeaf, err = srcFs.lookupParentDir(ctx, srcRemote)
srcDirNode, srcLeaf, err = srcFs.lookupParentDir(srcRemote)
} else {
// lookup the existing root parent
absRoot := srcFs.srv.FS.GetRoot()
@@ -725,7 +721,7 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
//log.Printf("move src %p %q dst %p %q", srcDirNode, srcDirNode.GetName(), dstDirNode, dstDirNode.GetName())
err = f.pacer.Call(func() (bool, error) {
err = f.srv.Move(info, dstDirNode)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "server-side move failed")
@@ -739,7 +735,7 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
//log.Printf("rename %q to %q", srcLeaf, dstLeaf)
err = f.pacer.Call(func() (bool, error) {
err = f.srv.Rename(info, f.opt.Enc.FromStandardName(dstLeaf))
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "server-side rename failed")
@@ -771,7 +767,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// Do the move
err := f.move(ctx, remote, srcObj.fs, srcObj.remote, srcObj.info)
err := f.move(remote, srcObj.fs, srcObj.remote, srcObj.info)
if err != nil {
return nil, err
}
@@ -802,13 +798,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// find the source
info, err := srcFs.lookupDir(ctx, srcRemote)
info, err := srcFs.lookupDir(srcRemote)
if err != nil {
return err
}
// check the destination doesn't exist
_, err = dstFs.lookupDir(ctx, dstRemote)
_, err = dstFs.lookupDir(dstRemote)
if err == nil {
return fs.ErrorDirExists
} else if err != fs.ErrorDirNotFound {
@@ -816,7 +812,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// Do the move
err = f.move(ctx, dstRemote, srcFs, srcRemote, info)
err = f.move(dstRemote, srcFs, srcRemote, info)
if err != nil {
return err
}
@@ -842,7 +838,7 @@ func (f *Fs) Hashes() hash.Set {
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
root, err := f.findRoot(ctx, false)
root, err := f.findRoot(false)
if err != nil {
return "", errors.Wrap(err, "PublicLink failed to find root node")
}
@@ -890,7 +886,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
fs.Infof(srcDir, "merging %q", f.opt.Enc.ToStandardName(info.GetName()))
err = f.pacer.Call(func() (bool, error) {
err = f.srv.Move(info, dstDirNode)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", f.opt.Enc.ToStandardName(info.GetName()), srcDir)
@@ -898,7 +894,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
}
// rmdir (into trash) the now empty source directory
fs.Infof(srcDir, "removing empty directory")
err = f.deleteNode(ctx, srcDirNode)
err = f.deleteNode(srcDirNode)
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
}
@@ -912,7 +908,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var err error
err = f.pacer.Call(func() (bool, error) {
q, err = f.srv.GetQuota()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get Mega Quota")
@@ -967,11 +963,11 @@ func (o *Object) setMetaData(info *mega.Node) (err error) {
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData(ctx context.Context) (err error) {
func (o *Object) readMetaData() (err error) {
if o.info != nil {
return nil
}
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
info, err := o.fs.readMetaDataForPath(o.remote)
if err != nil {
if err == fs.ErrorDirNotFound {
err = fs.ErrorObjectNotFound
@@ -1002,7 +998,6 @@ func (o *Object) Storable() bool {
// openObject represents a download in progress
type openObject struct {
ctx context.Context
mu sync.Mutex
o *Object
d *mega.Download
@@ -1013,14 +1008,14 @@ type openObject struct {
}
// get the next chunk
func (oo *openObject) getChunk(ctx context.Context) (err error) {
func (oo *openObject) getChunk() (err error) {
if oo.id >= oo.d.Chunks() {
return io.EOF
}
var chunk []byte
err = oo.o.fs.pacer.Call(func() (bool, error) {
chunk, err = oo.d.DownloadChunk(oo.id)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return err
@@ -1050,7 +1045,7 @@ func (oo *openObject) Read(p []byte) (n int, err error) {
oo.skip -= int64(size)
}
if len(oo.chunk) == 0 {
err = oo.getChunk(oo.ctx)
err = oo.getChunk()
if err != nil {
return 0, err
}
@@ -1073,7 +1068,7 @@ func (oo *openObject) Close() (err error) {
}
err = oo.o.fs.pacer.Call(func() (bool, error) {
err = oo.d.Finish()
return shouldRetry(oo.ctx, err)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "failed to finish download")
@@ -1101,14 +1096,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var d *mega.Download
err = o.fs.pacer.Call(func() (bool, error) {
d, err = o.fs.srv.NewDownload(o.info)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "open download file failed")
}
oo := &openObject{
ctx: ctx,
o: o,
d: d,
skip: offset,
@@ -1131,7 +1125,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
remote := o.Remote()
// Create the parent directory
dirNode, leaf, err := o.fs.mkdirParent(ctx, remote)
dirNode, leaf, err := o.fs.mkdirParent(remote)
if err != nil {
return errors.Wrap(err, "update make parent dir failed")
}
@@ -1139,7 +1133,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var u *mega.Upload
err = o.fs.pacer.Call(func() (bool, error) {
u, err = o.fs.srv.NewUpload(dirNode, o.fs.opt.Enc.FromStandardName(leaf), size)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "upload file failed to create session")
@@ -1160,7 +1154,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
err = o.fs.pacer.Call(func() (bool, error) {
err = u.UploadChunk(id, chunk)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "upload file failed to upload chunk")
@@ -1171,7 +1165,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var info *mega.Node
err = o.fs.pacer.Call(func() (bool, error) {
info, err = u.Finish()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "failed to finish upload")
@@ -1179,7 +1173,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// If the upload succeeded and the original object existed, then delete it
if o.info != nil {
err = o.fs.deleteNode(ctx, o.info)
err = o.fs.deleteNode(o.info)
if err != nil {
return errors.Wrap(err, "upload failed to remove old version")
}
@@ -1191,7 +1185,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
err := o.fs.deleteNode(ctx, o.info)
err := o.fs.deleteNode(o.info)
if err != nil {
return errors.Wrap(err, "Remove object failed")
}

View File

@@ -361,11 +361,6 @@ This will only work if you are copying between two OneDrive *Personal* drives AN
the files to copy are already shared between them. In other cases, rclone will
fall back to normal copy (which will be slightly slower).`,
Advanced: true,
}, {
Name: "list_chunk",
Help: "Size of listing chunk.",
Default: 1000,
Advanced: true,
}, {
Name: "no_versions",
Default: false,
@@ -473,7 +468,6 @@ type Options struct {
DriveType string `config:"drive_type"`
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ListChunk int64 `config:"list_chunk"`
NoVersions bool `config:"no_versions"`
LinkScope string `config:"link_scope"`
LinkType string `config:"link_type"`
@@ -555,10 +549,7 @@ var errAsyncJobAccessDenied = errors.New("async job failed - access denied")
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(resp *http.Response, err error) (bool, error) {
retry := false
if resp != nil {
switch resp.StatusCode {
@@ -605,7 +596,7 @@ func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID s
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
return info, resp, err
@@ -621,7 +612,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
opts.Path = strings.TrimSuffix(opts.Path, ":")
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
return info, resp, err
}
@@ -877,7 +868,7 @@ func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, e
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
//fmt.Printf("...Error %v\n", err)
@@ -902,14 +893,14 @@ type listAllFn func(*api.Item) bool
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
// Top parameter asks for bigger pages of data
// https://dev.onedrive.com/odata/optional-query-parameters.htm
opts := f.newOptsCall(dirID, "GET", fmt.Sprintf("/children?$top=%d", f.opt.ListChunk))
opts := f.newOptsCall(dirID, "GET", "/children?$top=1000")
OUTER:
for {
var result api.ListChildrenResponse
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return found, errors.Wrap(err, "couldn't list files")
@@ -1046,7 +1037,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
}
@@ -1202,7 +1193,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &copyReq, nil)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -1295,7 +1286,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
var info api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -1362,7 +1353,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
var info api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return err
@@ -1388,7 +1379,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &drive)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "about failed")
@@ -1429,7 +1420,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
Password: f.opt.LinkPassword,
}
if expire < fs.DurationOff {
if expire < fs.Duration(time.Hour*24*365*100) {
expiry := time.Now().Add(time.Duration(expire))
share.Expiry = &expiry
}
@@ -1438,7 +1429,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var result api.CreateShareLinkResponse
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &share, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
fmt.Println(err)
@@ -1483,7 +1474,7 @@ func (o *Object) deleteVersions(ctx context.Context) error {
var versions api.VersionsResponse
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &versions)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return err
@@ -1510,7 +1501,7 @@ func (o *Object) deleteVersion(ctx context.Context, ID string) error {
opts.NoResponse = true
return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
}
@@ -1661,7 +1652,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
var info *api.Item
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
// Remove versions if required
if o.fs.opt.NoVersions {
@@ -1703,7 +1694,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -1731,7 +1722,7 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
err = errors.New(err.Error() + " (is it a OneNote file?)")
}
}
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
return response, err
}
@@ -1746,7 +1737,7 @@ func (o *Object) getPosition(ctx context.Context, url string) (pos int64, err er
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return 0, err
@@ -1806,11 +1797,11 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
return true, errors.Wrapf(err, "retry this chunk skipping %d bytes", skip)
}
if err != nil {
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
}
body, err = rest.ReadBody(resp)
if err != nil {
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
}
if resp.StatusCode == 200 || resp.StatusCode == 201 {
// we are done :)
@@ -1833,7 +1824,7 @@ func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
return
}
@@ -1904,7 +1895,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
err = errors.New(err.Error() + " (is it a OneNote file?)")
}
}
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err

View File

@@ -1137,7 +1137,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// opts.Body=0), so upload it as a multipart form POST with
// Content-Length set.
if size == 0 {
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf)
formReader, contentType, overhead, err := rest.MultipartUpload(in, opts.Parameters, "content", leaf)
if err != nil {
return errors.Wrap(err, "failed to make multipart upload for 0 length file")
}

View File

@@ -1399,10 +1399,7 @@ var retryErrorCodes = []int{
//S3 is pretty resilient, and the built in retry handling is probably sufficient
// as it should notice closed connections and timeouts which are the most likely
// sort of failure modes
func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func (f *Fs) shouldRetry(err error) (bool, error) {
// If this is an awserr object, try and extract more useful information to determine if we should retry
if awsError, ok := err.(awserr.Error); ok {
// Simple case, check the original embedded error in case it's generically retryable
@@ -1414,7 +1411,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
// 301 if wrong region for bucket - can only update if running from a bucket
if f.rootBucket != "" {
if reqErr.StatusCode() == http.StatusMovedPermanently {
urfbErr := f.updateRegionForBucket(ctx, f.rootBucket)
urfbErr := f.updateRegionForBucket(f.rootBucket)
if urfbErr != nil {
fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr)
return false, err
@@ -1562,8 +1559,6 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
// Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env)
awsSessionOpts.SharedConfigState = session.SharedConfigEnable
// Set the name of the profile if supplied
awsSessionOpts.Profile = opt.Profile
}
ses, err := session.NewSessionWithOptions(awsSessionOpts)
if err != nil {
@@ -1746,7 +1741,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
}
// Gets the bucket location
func (f *Fs) getBucketLocation(ctx context.Context, bucket string) (string, error) {
func (f *Fs) getBucketLocation(bucket string) (string, error) {
req := s3.GetBucketLocationInput{
Bucket: &bucket,
}
@@ -1754,7 +1749,7 @@ func (f *Fs) getBucketLocation(ctx context.Context, bucket string) (string, erro
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.GetBucketLocation(&req)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return "", err
@@ -1764,8 +1759,8 @@ func (f *Fs) getBucketLocation(ctx context.Context, bucket string) (string, erro
// Updates the region for the bucket by reading the region from the
// bucket then updating the session.
func (f *Fs) updateRegionForBucket(ctx context.Context, bucket string) error {
region, err := f.getBucketLocation(ctx, bucket)
func (f *Fs) updateRegionForBucket(bucket string) error {
region, err := f.getBucketLocation(bucket)
if err != nil {
return errors.Wrap(err, "reading bucket location failed")
}
@@ -1859,7 +1854,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
}
}
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
if awsErr, ok := err.(awserr.RequestFailure); ok {
@@ -2006,7 +2001,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
var resp *s3.ListBucketsOutput
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.ListBucketsWithContext(ctx, &req)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return nil, err
@@ -2121,7 +2116,7 @@ func (f *Fs) bucketExists(ctx context.Context, bucket string) (bool, error) {
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.HeadBucketWithContext(ctx, &req)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err == nil {
return true, nil
@@ -2157,7 +2152,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.CreateBucketWithContext(ctx, &req)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Bucket %q created with ACL %q", bucket, f.opt.BucketACL)
@@ -2187,7 +2182,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.DeleteBucketWithContext(ctx, &req)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Bucket %q deleted", bucket)
@@ -2247,7 +2242,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
}
return f.pacer.Call(func() (bool, error) {
_, err := f.c.CopyObjectWithContext(ctx, req)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
}
@@ -2291,7 +2286,7 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
if err := f.pacer.Call(func() (bool, error) {
var err error
cout, err = f.c.CreateMultipartUploadWithContext(ctx, req)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
}); err != nil {
return err
}
@@ -2307,7 +2302,7 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
UploadId: uid,
RequestPayer: req.RequestPayer,
})
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
})()
@@ -2330,7 +2325,7 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
uploadPartReq.CopySourceRange = aws.String(calculateRange(partSize, partNum-1, numParts, srcSize))
uout, err := f.c.UploadPartCopyWithContext(ctx, uploadPartReq)
if err != nil {
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
}
parts = append(parts, &s3.CompletedPart{
PartNumber: &partNum,
@@ -2352,7 +2347,7 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
RequestPayer: req.RequestPayer,
UploadId: uid,
})
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
}
@@ -2583,7 +2578,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
reqCopy.Key = &bucketPath
err = f.pacer.Call(func() (bool, error) {
_, err = f.c.RestoreObject(&reqCopy)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
st.Status = err.Error()
@@ -2631,7 +2626,7 @@ func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uplo
var resp *s3.ListMultipartUploadsOutput
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.ListMultipartUploads(&req)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return nil, errors.Wrapf(err, "list multipart uploads bucket %q key %q", bucket, key)
@@ -2806,7 +2801,7 @@ func (o *Object) headObject(ctx context.Context) (resp *s3.HeadObjectOutput, err
err = o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = o.fs.c.HeadObjectWithContext(ctx, &req)
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
})
if err != nil {
if awsErr, ok := err.(awserr.RequestFailure); ok {
@@ -2962,7 +2957,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var err error
httpReq.HTTPRequest = httpReq.HTTPRequest.WithContext(ctx)
err = httpReq.Send()
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
})
if err, ok := err.(awserr.RequestFailure); ok {
if err.Code() == "InvalidObjectState" {
@@ -3021,7 +3016,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
err = f.pacer.Call(func() (bool, error) {
var err error
cout, err = f.c.CreateMultipartUploadWithContext(ctx, &mReq)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "multipart upload failed to initialise")
@@ -3040,7 +3035,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
UploadId: uid,
RequestPayer: req.RequestPayer,
})
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if errCancel != nil {
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
@@ -3116,7 +3111,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
uout, err := f.c.UploadPartWithContext(gCtx, uploadPartReq)
if err != nil {
if partNum <= int64(concurrency) {
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
}
// retry all chunks once have done the first batch
return true, err
@@ -3156,7 +3151,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
RequestPayer: req.RequestPayer,
UploadId: uid,
})
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "multipart upload failed to finalise")
@@ -3311,11 +3306,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var err error
resp, err = o.fs.srv.Do(httpReq)
if err != nil {
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
}
body, err := rest.ReadBody(resp)
if err != nil {
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
}
if resp.StatusCode >= 200 && resp.StatusCode < 299 {
return false, nil
@@ -3366,7 +3361,7 @@ func (o *Object) Remove(ctx context.Context) error {
}
err := o.fs.pacer.Call(func() (bool, error) {
_, err := o.fs.c.DeleteObjectWithContext(ctx, &req)
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
})
return err
}

View File

@@ -682,7 +682,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri
"need_idx_progress": {"true"},
"replace": {"1"},
}
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename))
formReader, contentType, _, err := rest.MultipartUpload(in, parameters, "file", f.opt.Enc.FromStandardName(filename))
if err != nil {
return nil, errors.Wrap(err, "failed to make multipart upload")
}

View File

@@ -16,7 +16,6 @@ import (
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/pkg/errors"
@@ -287,7 +286,6 @@ type Fs struct {
drain *time.Timer // used to drain the pool when we stop using the connections
pacer *fs.Pacer // pacer for operations
savedpswd string
transfers int32 // count in use references
}
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
@@ -350,23 +348,6 @@ func (c *conn) closed() error {
return nil
}
// Show that we are doing an upload or download
//
// Call removeTransfer() when done
func (f *Fs) addTransfer() {
atomic.AddInt32(&f.transfers, 1)
}
// Show the upload or download done
func (f *Fs) removeTransfer() {
atomic.AddInt32(&f.transfers, -1)
}
// getTransfers shows whether there are any transfers in progress
func (f *Fs) getTransfers() int32 {
return atomic.LoadInt32(&f.transfers)
}
// Open a new connection to the SFTP server.
func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
// Rate limit rate of new connections
@@ -414,12 +395,8 @@ func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.C
opts = opts[:len(opts):len(opts)] // make sure we don't overwrite the callers opts
opts = append(opts,
sftp.UseFstat(f.opt.UseFstat),
// FIXME disabled after library reversion
// sftp.UseConcurrentReads(!f.opt.DisableConcurrentReads),
sftp.UseConcurrentReads(!f.opt.DisableConcurrentReads),
)
if f.opt.DisableConcurrentReads { // FIXME
fs.Errorf(f, "Ignoring disable_concurrent_reads after library reversion - see #5197")
}
return sftp.NewClientPipe(pr, pw, opts...)
}
@@ -497,13 +474,6 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
func (f *Fs) drainPool(ctx context.Context) (err error) {
f.poolMu.Lock()
defer f.poolMu.Unlock()
if transfers := f.getTransfers(); transfers != 0 {
fs.Debugf(f, "Not closing %d unused connections as %d transfers in progress", len(f.pool), transfers)
if f.opt.IdleTimeout > 0 {
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
}
return nil
}
if f.opt.IdleTimeout > 0 {
f.drain.Stop()
}
@@ -1384,9 +1354,7 @@ func (o *Object) stat(ctx context.Context) error {
//
// it also updates the info field
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if !o.fs.opt.SetModTime {
return nil
}
if o.fs.opt.SetModTime {
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
return errors.Wrap(err, "SetModTime")
@@ -1396,7 +1364,8 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if err != nil {
return errors.Wrap(err, "SetModTime failed")
}
err = o.stat(ctx)
}
err := o.stat(ctx)
if err != nil {
return errors.Wrap(err, "SetModTime stat failed")
}
@@ -1410,22 +1379,18 @@ func (o *Object) Storable() bool {
// objectReader represents a file open for reading on the SFTP server
type objectReader struct {
f *Fs
sftpFile *sftp.File
pipeReader *io.PipeReader
done chan struct{}
}
func (f *Fs) newObjectReader(sftpFile *sftp.File) *objectReader {
func newObjectReader(sftpFile *sftp.File) *objectReader {
pipeReader, pipeWriter := io.Pipe()
file := &objectReader{
f: f,
sftpFile: sftpFile,
pipeReader: pipeReader,
done: make(chan struct{}),
}
// Show connection in use
f.addTransfer()
go func() {
// Use sftpFile.WriteTo to pump data so that it gets a
@@ -1455,8 +1420,6 @@ func (file *objectReader) Close() (err error) {
_ = file.pipeReader.Close()
// Wait for the background process to finish
<-file.done
// Show connection no longer in use
file.f.removeTransfer()
return err
}
@@ -1490,14 +1453,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, errors.Wrap(err, "Open Seek failed")
}
}
in = readers.NewLimitedReadCloser(o.fs.newObjectReader(sftpFile), limit)
in = readers.NewLimitedReadCloser(newObjectReader(sftpFile), limit)
return in, nil
}
// Update a remote sftp file using the data <in> and ModTime from <src>
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
o.fs.addTransfer() // Show transfer in progress
defer o.fs.removeTransfer()
// Clear the hash cache since we are about to update the object
o.md5sum = nil
o.sha1sum = nil
@@ -1535,28 +1496,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
remove()
return errors.Wrap(err, "Update Close failed")
}
// Set the mod time - this stats the object if o.fs.opt.SetModTime == true
err = o.SetModTime(ctx, src.ModTime(ctx))
if err != nil {
return errors.Wrap(err, "Update SetModTime failed")
}
// Stat the file after the upload to read its stats back if o.fs.opt.SetModTime == false
if !o.fs.opt.SetModTime {
err = o.stat(ctx)
if err == fs.ErrorObjectNotFound {
// In the specific case of o.fs.opt.SetModTime == false
// if the object wasn't found then don't return an error
fs.Debugf(o, "Not found after upload with set_modtime=false so returning best guess")
o.modTime = src.ModTime(ctx)
o.size = src.Size()
o.mode = os.FileMode(0666) // regular file
} else if err != nil {
return errors.Wrap(err, "Update stat failed")
}
}
return nil
}

View File

@@ -13,7 +13,6 @@ import (
"strings"
"time"
"github.com/google/uuid"
"github.com/ncw/swift/v2"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
@@ -292,10 +291,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(err error) (bool, error) {
// If this is a swift.Error object extract the HTTP error code
if swiftError, ok := err.(*swift.Error); ok {
for _, e := range retryErrorCodes {
@@ -311,7 +307,7 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
// shouldRetryHeaders returns a boolean as to whether this err
// deserves to be retried. It reads the headers passed in looking for
// `Retry-After`. It returns the err as a convenience
func shouldRetryHeaders(ctx context.Context, headers swift.Headers, err error) (bool, error) {
func shouldRetryHeaders(headers swift.Headers, err error) (bool, error) {
if swiftError, ok := err.(*swift.Error); ok && swiftError.StatusCode == 429 {
if value := headers["Retry-After"]; value != "" {
retryAfter, parseErr := strconv.Atoi(value)
@@ -330,7 +326,7 @@ func shouldRetryHeaders(ctx context.Context, headers swift.Headers, err error) (
}
}
}
return shouldRetry(ctx, err)
return shouldRetry(err)
}
// parsePath parses a remote 'url'
@@ -472,7 +468,7 @@ func NewFsWithConnection(ctx context.Context, opt *Options, name, root string, c
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
info, rxHeaders, err = f.c.Object(ctx, f.rootContainer, encodedDirectory)
return shouldRetryHeaders(ctx, rxHeaders, err)
return shouldRetryHeaders(rxHeaders, err)
})
if err == nil && info.ContentType != directoryMarkerContentType {
newRoot := path.Dir(f.root)
@@ -580,7 +576,7 @@ func (f *Fs) listContainerRoot(ctx context.Context, container, directory, prefix
var err error
err = f.pacer.Call(func() (bool, error) {
objects, err = f.c.Objects(ctx, container, opts)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err == nil {
for i := range objects {
@@ -665,7 +661,7 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
var containers []swift.Container
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(ctx, nil)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "container listing failed")
@@ -757,7 +753,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var err error
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(ctx, nil)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "container listing failed")
@@ -809,7 +805,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
_, rxHeaders, err = f.c.Container(ctx, container)
return shouldRetryHeaders(ctx, rxHeaders, err)
return shouldRetryHeaders(rxHeaders, err)
})
}
if err == swift.ContainerNotFound {
@@ -819,7 +815,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
}
err = f.pacer.Call(func() (bool, error) {
err = f.c.ContainerCreate(ctx, container, headers)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Container %q created", container)
@@ -840,7 +836,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
err := f.cache.Remove(container, func() error {
err := f.pacer.Call(func() (bool, error) {
err := f.c.ContainerDelete(ctx, container)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Container %q removed", container)
@@ -906,125 +902,18 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
isLargeObject, err := srcObj.isLargeObject(ctx)
if err != nil {
return nil, err
}
if isLargeObject {
/*handle large object*/
err = copyLargeObject(ctx, f, srcObj, dstContainer, dstPath)
} else {
srcContainer, srcPath := srcObj.split()
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = f.c.ObjectCopy(ctx, srcContainer, srcPath, dstContainer, dstPath, nil)
return shouldRetryHeaders(ctx, rxHeaders, err)
return shouldRetryHeaders(rxHeaders, err)
})
}
if err != nil {
return nil, err
}
return f.NewObject(ctx, remote)
}
func copyLargeObject(ctx context.Context, f *Fs, src *Object, dstContainer string, dstPath string) error {
segmentsContainer := dstContainer + "_segments"
err := f.makeContainer(ctx, segmentsContainer)
if err != nil {
return err
}
segments, err := src.getSegmentsLargeObject(ctx)
if err != nil {
return err
}
if len(segments) == 0 {
return errors.New("could not copy object, list segments are empty")
}
nanoSeconds := time.Now().Nanosecond()
prefixSegment := fmt.Sprintf("%v/%v/%s", nanoSeconds, src.size, strings.ReplaceAll(uuid.New().String(), "-", ""))
copiedSegmentsLen := 10
for _, value := range segments {
if len(value) <= 0 {
continue
}
fragment := value[0]
if len(fragment) <= 0 {
continue
}
copiedSegmentsLen = len(value)
firstIndex := strings.Index(fragment, "/")
if firstIndex < 0 {
firstIndex = 0
} else {
firstIndex = firstIndex + 1
}
lastIndex := strings.LastIndex(fragment, "/")
if lastIndex < 0 {
lastIndex = len(fragment)
} else {
lastIndex = lastIndex - 1
}
prefixSegment = fragment[firstIndex:lastIndex]
break
}
copiedSegments := make([]string, copiedSegmentsLen)
defer handleCopyFail(ctx, f, segmentsContainer, copiedSegments, err)
for c, ss := range segments {
if len(ss) <= 0 {
continue
}
for _, s := range ss {
lastIndex := strings.LastIndex(s, "/")
if lastIndex <= 0 {
lastIndex = 0
} else {
lastIndex = lastIndex + 1
}
segmentName := dstPath + "/" + prefixSegment + "/" + s[lastIndex:]
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = f.c.ObjectCopy(ctx, c, s, segmentsContainer, segmentName, nil)
copiedSegments = append(copiedSegments, segmentName)
return shouldRetryHeaders(ctx, rxHeaders, err)
})
if err != nil {
return err
}
}
}
m := swift.Metadata{}
headers := m.ObjectHeaders()
headers["X-Object-Manifest"] = urlEncode(fmt.Sprintf("%s/%s/%s", segmentsContainer, dstPath, prefixSegment))
headers["Content-Length"] = "0"
emptyReader := bytes.NewReader(nil)
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = f.c.ObjectPut(ctx, dstContainer, dstPath, emptyReader, true, "", src.contentType, headers)
return shouldRetryHeaders(ctx, rxHeaders, err)
})
return err
}
//remove copied segments when copy process failed
func handleCopyFail(ctx context.Context, f *Fs, segmentsContainer string, segments []string, err error) {
fs.Debugf(f, "handle copy segment fail")
if err == nil {
return
}
if len(segmentsContainer) == 0 {
fs.Debugf(f, "invalid segments container")
return
}
if len(segments) == 0 {
fs.Debugf(f, "segments is empty")
return
}
fs.Debugf(f, "action delete segments what copied")
for _, v := range segments {
_ = f.c.ObjectDelete(ctx, segmentsContainer, v)
}
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
@@ -1152,7 +1041,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
container, containerPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
info, h, err = o.fs.c.Object(ctx, container, containerPath)
return shouldRetryHeaders(ctx, h, err)
return shouldRetryHeaders(h, err)
})
if err != nil {
if err == swift.ObjectNotFound {
@@ -1211,7 +1100,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
container, containerPath := o.split()
return o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectUpdate(ctx, container, containerPath, newHeaders)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
}
@@ -1232,7 +1121,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
in, rxHeaders, err = o.fs.c.ObjectOpen(ctx, container, containerPath, !isRanging, headers)
return shouldRetryHeaders(ctx, rxHeaders, err)
return shouldRetryHeaders(rxHeaders, err)
})
return
}
@@ -1322,7 +1211,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
_, rxHeaders, err = o.fs.c.Container(ctx, segmentsContainer)
return shouldRetryHeaders(ctx, rxHeaders, err)
return shouldRetryHeaders(rxHeaders, err)
})
if err == swift.ContainerNotFound {
headers := swift.Headers{}
@@ -1331,7 +1220,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
}
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ContainerCreate(ctx, segmentsContainer, headers)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
}
if err != nil {
@@ -1352,8 +1241,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
if segmentInfos == nil || len(segmentInfos) == 0 {
return
}
_ctx := context.Background()
deleteChunks(_ctx, o, segmentsContainer, segmentInfos)
deleteChunks(ctx, o, segmentsContainer, segmentInfos)
})()
for {
// can we read at least one byte?
@@ -1379,7 +1267,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
if err == nil {
segmentInfos = append(segmentInfos, segmentPath)
}
return shouldRetryHeaders(ctx, rxHeaders, err)
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
return "", err
@@ -1393,7 +1281,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, emptyReader, true, "", contentType, headers)
return shouldRetryHeaders(ctx, rxHeaders, err)
return shouldRetryHeaders(rxHeaders, err)
})
if err == nil {
@@ -1468,7 +1356,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var rxHeaders swift.Headers
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, in, true, "", contentType, headers)
return shouldRetryHeaders(ctx, rxHeaders, err)
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
return err
@@ -1526,7 +1414,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
// Remove file/manifest first
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectDelete(ctx, container, containerPath)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return err

View File

@@ -1,7 +1,6 @@
package swift
import (
"context"
"testing"
"time"
@@ -33,7 +32,6 @@ func TestInternalUrlEncode(t *testing.T) {
}
func TestInternalShouldRetryHeaders(t *testing.T) {
ctx := context.Background()
headers := swift.Headers{
"Content-Length": "64",
"Content-Type": "text/html; charset=UTF-8",
@@ -47,7 +45,7 @@ func TestInternalShouldRetryHeaders(t *testing.T) {
// Short sleep should just do the sleep
start := time.Now()
retry, gotErr := shouldRetryHeaders(ctx, headers, err)
retry, gotErr := shouldRetryHeaders(headers, err)
dt := time.Since(start)
assert.True(t, retry)
assert.Equal(t, err, gotErr)
@@ -56,7 +54,7 @@ func TestInternalShouldRetryHeaders(t *testing.T) {
// Long sleep should return RetryError
headers["Retry-After"] = "3600"
start = time.Now()
retry, gotErr = shouldRetryHeaders(ctx, headers, err)
retry, gotErr = shouldRetryHeaders(headers, err)
dt = time.Since(start)
assert.True(t, dt < time.Second)
assert.False(t, retry)

View File

@@ -80,7 +80,6 @@ func (f *Fs) InternalTest(t *testing.T) {
t.Run("NoChunk", f.testNoChunk)
t.Run("WithChunk", f.testWithChunk)
t.Run("WithChunkFail", f.testWithChunkFail)
t.Run("CopyLargeObject", f.testCopyLargeObject)
}
func (f *Fs) testWithChunk(t *testing.T) {
@@ -155,39 +154,4 @@ func (f *Fs) testWithChunkFail(t *testing.T) {
require.Empty(t, objs)
}
func (f *Fs) testCopyLargeObject(t *testing.T) {
preConfChunkSize := f.opt.ChunkSize
preConfChunk := f.opt.NoChunk
f.opt.NoChunk = false
f.opt.ChunkSize = 1024 * fs.Byte
defer func() {
//restore old config after test
f.opt.ChunkSize = preConfChunkSize
f.opt.NoChunk = preConfChunk
}()
file := fstest.Item{
ModTime: fstest.Time("2020-12-31T04:05:06.499999999Z"),
Path: "large.txt",
Size: -1, // use unknown size during upload
}
const contentSize = 2048
contents := random.String(contentSize)
buf := bytes.NewBufferString(contents)
uploadHash := hash.NewMultiHasher()
in := io.TeeReader(buf, uploadHash)
file.Size = -1
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
ctx := context.TODO()
obj, err := f.Features().PutStream(ctx, in, obji)
require.NoError(t, err)
require.NotEmpty(t, obj)
remoteTarget := "large.txt (copy)"
objTarget, err := f.Features().Copy(ctx, obj, remoteTarget)
require.NoError(t, err)
require.NotEmpty(t, objTarget)
require.Equal(t, obj.Size(), objTarget.Size())
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -59,19 +59,9 @@ func (d *Directory) candidates() []upstream.Entry {
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
entries, err := o.fs.actionEntries(o.candidates()...)
if err == fs.ErrorPermissionDenied {
// There are no candidates in this object which can be written to
// So attempt to create a new object instead
newO, err := o.fs.put(ctx, in, src, false, options...)
if err != nil {
return err
}
// Update current object
*o = *newO.(*Object)
return nil
} else if err != nil {
return err
}
if len(entries) == 1 {
obj := entries[0].(*upstream.Object)
return obj.Update(ctx, in, src, options...)

View File

@@ -17,9 +17,7 @@ func init() {
type EpFF struct{}
func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath string) (*upstream.Fs, error) {
ch := make(chan *upstream.Fs, len(upstreams))
ctx, cancel := context.WithCancel(ctx)
defer cancel()
ch := make(chan *upstream.Fs)
for _, u := range upstreams {
u := u // Closure
go func() {
@@ -32,10 +30,16 @@ func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath stri
}()
}
var u *upstream.Fs
for range upstreams {
for i := 0; i < len(upstreams); i++ {
u = <-ch
if u != nil {
break
// close remaining goroutines
go func(num int) {
defer close(ch)
for i := 0; i < num; i++ {
<-ch
}
}(len(upstreams) - 1 - i)
}
}
if u == nil {

View File

@@ -1,67 +0,0 @@
package union
import (
"bytes"
"context"
"testing"
"time"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func (f *Fs) TestInternalReadOnly(t *testing.T) {
if f.name != "TestUnionRO" {
t.Skip("Only on RO union")
}
dir := "TestInternalReadOnly"
ctx := context.Background()
rofs := f.upstreams[len(f.upstreams)-1]
assert.False(t, rofs.IsWritable())
// Put a file onto the read only fs
contents := random.String(50)
file1 := fstest.NewItem(dir+"/file.txt", contents, time.Now())
_, obj1 := fstests.PutTestContents(ctx, t, rofs, &file1, contents, true)
// Check read from readonly fs via union
o, err := f.NewObject(ctx, file1.Path)
require.NoError(t, err)
assert.Equal(t, int64(50), o.Size())
// Now call Update on the union Object with new data
contents2 := random.String(100)
file2 := fstest.NewItem(dir+"/file.txt", contents2, time.Now())
in := bytes.NewBufferString(contents2)
src := object.NewStaticObjectInfo(file2.Path, file2.ModTime, file2.Size, true, nil, nil)
err = o.Update(ctx, in, src)
require.NoError(t, err)
assert.Equal(t, int64(100), o.Size())
// Check we read the new object via the union
o, err = f.NewObject(ctx, file1.Path)
require.NoError(t, err)
assert.Equal(t, int64(100), o.Size())
// Remove the object
assert.NoError(t, o.Remove(ctx))
// Check we read the old object in the read only layer now
o, err = f.NewObject(ctx, file1.Path)
require.NoError(t, err)
assert.Equal(t, int64(50), o.Size())
// Remove file and dir from read only fs
assert.NoError(t, obj1.Remove(ctx))
assert.NoError(t, rofs.Rmdir(ctx, dir))
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("ReadOnly", f.TestInternalReadOnly)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -2,15 +2,13 @@
package union_test
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -26,28 +24,17 @@ func TestIntegration(t *testing.T) {
})
}
func makeTestDirs(t *testing.T, n int) (dirs []string, clean func()) {
for i := 1; i <= n; i++ {
dir, err := ioutil.TempDir("", fmt.Sprintf("rclone-union-test-%d", n))
require.NoError(t, err)
dirs = append(dirs, dir)
}
clean = func() {
for _, dir := range dirs {
err := os.RemoveAll(dir)
assert.NoError(t, err)
}
}
return dirs, clean
}
func TestStandard(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs, clean := makeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-standard1")
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-standard2")
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-standard3")
require.NoError(t, os.MkdirAll(tempdir1, 0744))
require.NoError(t, os.MkdirAll(tempdir2, 0744))
require.NoError(t, os.MkdirAll(tempdir3, 0744))
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
name := "TestUnion"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
@@ -67,9 +54,13 @@ func TestRO(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs, clean := makeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + ":ro " + dirs[2] + ":ro"
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-ro1")
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-ro2")
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-ro3")
require.NoError(t, os.MkdirAll(tempdir1, 0744))
require.NoError(t, os.MkdirAll(tempdir2, 0744))
require.NoError(t, os.MkdirAll(tempdir3, 0744))
upstreams := tempdir1 + " " + tempdir2 + ":ro " + tempdir3 + ":ro"
name := "TestUnionRO"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
@@ -89,9 +80,13 @@ func TestNC(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs, clean := makeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + ":nc " + dirs[2] + ":nc"
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-nc1")
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-nc2")
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-nc3")
require.NoError(t, os.MkdirAll(tempdir1, 0744))
require.NoError(t, os.MkdirAll(tempdir2, 0744))
require.NoError(t, os.MkdirAll(tempdir3, 0744))
upstreams := tempdir1 + " " + tempdir2 + ":nc " + tempdir3 + ":nc"
name := "TestUnionNC"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
@@ -111,9 +106,13 @@ func TestPolicy1(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs, clean := makeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy11")
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy12")
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy13")
require.NoError(t, os.MkdirAll(tempdir1, 0744))
require.NoError(t, os.MkdirAll(tempdir2, 0744))
require.NoError(t, os.MkdirAll(tempdir3, 0744))
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
name := "TestUnionPolicy1"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
@@ -133,9 +132,13 @@ func TestPolicy2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs, clean := makeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy21")
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy22")
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy23")
require.NoError(t, os.MkdirAll(tempdir1, 0744))
require.NoError(t, os.MkdirAll(tempdir2, 0744))
require.NoError(t, os.MkdirAll(tempdir3, 0744))
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
name := "TestUnionPolicy2"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
@@ -155,9 +158,13 @@ func TestPolicy3(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs, clean := makeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy31")
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy32")
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy33")
require.NoError(t, os.MkdirAll(tempdir1, 0744))
require.NoError(t, os.MkdirAll(tempdir2, 0744))
require.NoError(t, os.MkdirAll(tempdir3, 0744))
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
name := "TestUnionPolicy3"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",

View File

@@ -14,7 +14,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/fspath"
)
var (
@@ -63,7 +62,7 @@ type Entry interface {
// New creates a new Fs based on the
// string formatted `type:root_path(:ro/:nc)`
func New(ctx context.Context, remote, root string, cacheTime time.Duration) (*Fs, error) {
configName, fsPath, err := fspath.SplitFs(remote)
_, configName, fsPath, err := fs.ParseRemote(remote)
if err != nil {
return nil, err
}
@@ -84,13 +83,15 @@ func New(ctx context.Context, remote, root string, cacheTime time.Duration) (*Fs
f.creatable = false
fsPath = fsPath[0 : len(fsPath)-3]
}
remote = configName + fsPath
rFs, err := cache.Get(ctx, remote)
if configName != "local" {
fsPath = configName + ":" + fsPath
}
rFs, err := cache.Get(ctx, fsPath)
if err != nil && err != fs.ErrorIsFile {
return nil, err
}
f.RootFs = rFs
rootString := path.Join(remote, filepath.ToSlash(root))
rootString := path.Join(fsPath, filepath.ToSlash(root))
myFs, err := cache.Get(ctx, rootString)
if err != nil && err != fs.ErrorIsFile {
return nil, err

View File

@@ -36,8 +36,8 @@ import (
)
const (
rcloneClientID = "1000.46MXF275FM2XV7QCHX5A7K3LGME66B"
rcloneEncryptedClientSecret = "U-2gxclZQBcOG9NPhjiXAhj-f0uQ137D0zar8YyNHXHkQZlTeSpIOQfmCb4oSpvosJp_SJLXmLLeUA"
rcloneClientID = "1000.OZNFWW075EKDSIE1R42HI9I2SUPC9A"
rcloneEncryptedClientSecret = "rn7myzbsYK3WlqO2EU6jU8wmj0ylsx7_1B5wvSaVncYbu1Wt0QxPW9FFbidjqAZtyxnBenYIWq1pcA"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
@@ -96,11 +96,6 @@ func init() {
log.Fatalf("Failed to configure token: %v", err)
}
}
if fs.GetConfig(ctx).AutoConfirm {
return
}
if err = setupRoot(ctx, name, m); err != nil {
log.Fatalf("Failed to configure root directory: %v", err)
}
@@ -166,7 +161,7 @@ type Object struct {
func setupRegion(m configmap.Mapper) {
region, ok := m.Get("region")
if !ok || region == "" {
if !ok {
log.Fatalf("No region set\n")
}
rootURL = fmt.Sprintf("https://workdrive.zoho.%s/api/v1", region)
@@ -377,7 +372,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err := configstruct.Set(m, opt); err != nil {
return nil, err
}
setupRegion(m)
root = parsePath(root)
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
@@ -652,7 +646,7 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
params.Set("filename", name)
params.Set("parent_id", parent)
params.Set("override-name-exist", strconv.FormatBool(true))
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
formReader, contentType, overhead, err := rest.MultipartUpload(in, nil, "content", name)
if err != nil {
return nil, errors.Wrap(err, "failed to make multipart upload")
}

View File

@@ -161,7 +161,15 @@ func buildZip(dir string) string {
// Build .deb and .rpm packages
//
// It returns a list of artifacts it has made
func buildDebAndRpm(dir, version, goarch string) []string {
func buildDebAndRpm(dir, version, goarchBuild string) []string {
goarch := stripVersion(goarchBuild)
// Base ARM build we will mark as "arm5" so nfpm puts the
// architecture in as armel not armhf
if goarchBuild == "arm" {
goarch = "arm5"
}
// Make internal version number acceptable to .deb and .rpm
pkgVersion := version[1:]
pkgVersion = strings.Replace(pkgVersion, "β", "-beta", -1)
@@ -376,7 +384,7 @@ func compileArch(version, goos, goarch, dir string) bool {
artifacts := []string{buildZip(dir)}
// build a .deb and .rpm if appropriate
if goos == "linux" {
artifacts = append(artifacts, buildDebAndRpm(dir, version, stripVersion(goarch))...)
artifacts = append(artifacts, buildDebAndRpm(dir, version, goarch)...)
}
if *copyAs != "" {
for _, artifact := range artifacts {

View File

@@ -36,7 +36,6 @@ import (
"github.com/rclone/rclone/fs/rc/rcflags"
"github.com/rclone/rclone/fs/rc/rcserver"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/buildinfo"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/terminal"
"github.com/spf13/cobra"
@@ -75,24 +74,9 @@ const (
// ShowVersion prints the version to stdout
func ShowVersion() {
osVersion, osKernel := buildinfo.GetOSVersion()
if osVersion == "" {
osVersion = "unknown"
}
if osKernel == "" {
osKernel = "unknown"
}
linking, tagString := buildinfo.GetLinkingAndTags()
fmt.Printf("rclone %s\n", fs.Version)
fmt.Printf("- os/version: %s\n", osVersion)
fmt.Printf("- os/kernel: %s\n", osKernel)
fmt.Printf("- os/type: %s\n", runtime.GOOS)
fmt.Printf("- os/arch: %s\n", runtime.GOARCH)
fmt.Printf("- go/version: %s\n", runtime.Version())
fmt.Printf("- go/linking: %s\n", linking)
fmt.Printf("- go/tags: %s\n", tagString)
fmt.Printf("- os/arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)
fmt.Printf("- go version: %s\n", runtime.Version())
}
// NewFsFile creates an Fs from a name but may point to a file.
@@ -100,7 +84,7 @@ func ShowVersion() {
// It returns a string with the file name if points to a file
// otherwise "".
func NewFsFile(remote string) (fs.Fs, string) {
_, fsPath, err := fspath.SplitFs(remote)
_, _, fsPath, err := fs.ParseRemote(remote)
if err != nil {
err = fs.CountError(err)
log.Fatalf("Failed to create file system for %q: %v", remote, err)
@@ -564,7 +548,7 @@ func Main() {
setupRootCommand(Root)
AddBackendFlags()
if err := Root.Execute(); err != nil {
if strings.HasPrefix(err.Error(), "unknown command") && selfupdateEnabled {
if strings.HasPrefix(err.Error(), "unknown command") {
Root.PrintErrf("You could use '%s selfupdate' to get latest features.\n\n", Root.CommandPath())
}
log.Fatalf("Fatal error: %v", err)

View File

@@ -1,7 +0,0 @@
package cmount
// ProvidedBy returns true if the rclone build for the given OS
// provides support for lib/cgo-fuse
func ProvidedBy(osName string) bool {
return osName == "windows" || osName == "darwin"
}

View File

@@ -21,13 +21,12 @@ import (
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/buildinfo"
"github.com/rclone/rclone/vfs"
)
func init() {
name := "cmount"
cmountOnly := ProvidedBy(runtime.GOOS)
cmountOnly := runtime.GOOS == "windows" || runtime.GOOS == "darwin"
if cmountOnly {
name = "mount"
}
@@ -36,7 +35,6 @@ func init() {
cmd.Aliases = append(cmd.Aliases, "cmount")
}
mountlib.AddRc("cmount", mount)
buildinfo.Tags = append(buildinfo.Tags, "cmount")
}
// Find the option string in the current options

View File

@@ -3,7 +3,6 @@ package copyurl
import (
"context"
"errors"
"fmt"
"os"
"github.com/rclone/rclone/cmd"
@@ -15,7 +14,6 @@ import (
var (
autoFilename = false
printFilename = false
stdout = false
noClobber = false
)
@@ -24,7 +22,6 @@ func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &autoFilename, "auto-filename", "a", autoFilename, "Get the file name from the URL and use it for destination file path")
flags.BoolVarP(cmdFlags, &printFilename, "print-filename", "p", printFilename, "Print the resulting name from --auto-filename")
flags.BoolVarP(cmdFlags, &noClobber, "no-clobber", "", noClobber, "Prevent overwriting file with same name")
flags.BoolVarP(cmdFlags, &stdout, "stdout", "", stdout, "Write the output to stdout rather than a file")
}
@@ -36,16 +33,15 @@ var commandDefinition = &cobra.Command{
Download a URL's content and copy it to the destination without saving
it in temporary storage.
Setting ` + "`--auto-filename`" + ` will cause the file name to be retrieved from
Setting --auto-filename will cause the file name to be retrieved from
the from URL (after any redirections) and used in the destination
path. With ` + "`--print-filename`" + ` in addition, the resuling file name will
be printed.
path.
Setting ` + "`--no-clobber`" + ` will prevent overwriting file on the
Setting --no-clobber will prevent overwriting file on the
destination if there is one with the same name.
Setting ` + "`--stdout`" + ` or making the output file name ` + "`-`" + `
will cause the output to be written to standard output.
Setting --stdout or making the output file name "-" will cause the
output to be written to standard output.
`,
RunE: func(command *cobra.Command, args []string) (err error) {
cmd.CheckArgs(1, 2, command, args)
@@ -65,14 +61,10 @@ will cause the output to be written to standard output.
}
}
cmd.Run(true, true, command, func() error {
var dst fs.Object
if stdout {
err = operations.CopyURLToWriter(context.Background(), args[0], os.Stdout)
} else {
dst, err = operations.CopyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename, noClobber)
if printFilename && err == nil && dst != nil {
fmt.Println(dst.Remote())
}
_, err = operations.CopyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename, noClobber)
}
return err
})

View File

@@ -3,6 +3,7 @@ package link
import (
"context"
"fmt"
"time"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
@@ -12,7 +13,7 @@ import (
)
var (
expire = fs.DurationOff
expire = fs.Duration(time.Hour * 24 * 365 * 100)
unlink = false
)

View File

@@ -181,15 +181,6 @@ func (d *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) (err error) {
return nil
}
// Invalidate a leaf in a directory
func (d *Dir) invalidateEntry(dirNode fusefs.Node, leaf string) {
fs.Debugf(dirNode, "Invalidating %q", leaf)
err := d.fsys.server.InvalidateEntry(dirNode, leaf)
if err != nil {
fs.Debugf(dirNode, "Failed to invalidate %q: %v", leaf, err)
}
}
// Check interface satisfied
var _ fusefs.NodeRenamer = (*Dir)(nil)
@@ -206,13 +197,6 @@ func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs
return translateError(err)
}
// Invalidate the new directory entry so it gets re-read (in
// the background otherwise we cause a deadlock)
//
// See https://github.com/rclone/rclone/issues/4977 for why
go d.invalidateEntry(newDir, req.NewName)
//go d.invalidateEntry(d, req.OldName)
return nil
}

View File

@@ -22,7 +22,6 @@ type FS struct {
*vfs.VFS
f fs.Fs
opt *mountlib.Options
server *fusefs.Server
}
// Check interface satisfied

View File

@@ -91,12 +91,12 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
}
filesys := NewFS(VFS, opt)
filesys.server = fusefs.New(c, nil)
server := fusefs.New(c, nil)
// Serve the mount point in the background returning error to errChan
errChan := make(chan error, 1)
go func() {
err := filesys.server.Serve(filesys)
err := server.Serve(filesys)
closeErr := c.Close()
if err == nil {
err = closeErr

View File

@@ -334,7 +334,7 @@ metadata about files like in UNIX. One case that may arise is that other program
(incorrectly) interprets this as the file being accessible by everyone. For example
an SSH client may warn about "unprotected private key file".
WinFsp 2021 (version 1.9) introduces a new FUSE option "FileSecurity",
WinFsp 2021 (version 1.9, still in beta) introduces a new FUSE option "FileSecurity",
that allows the complete specification of file security descriptors using
[SDDL](https://docs.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format).
With this you can work around issues such as the mentioned "unprotected private key file"

View File

@@ -1,5 +1,3 @@
// +build !noselfupdate
package selfupdate
// Note: "|" will be replaced by backticks in the help string below
@@ -8,30 +6,10 @@ This command downloads the latest release of rclone and replaces
the currently running binary. The download is verified with a hashsum
and cryptographically signed signature.
If used without flags (or with implied |--stable| flag), this command
will install the latest stable release. However, some issues may be fixed
(or features added) only in the latest beta release. In such cases you should
run the command with the |--beta| flag, i.e. |rclone selfupdate --beta|.
You can check in advance what version would be installed by adding the
|--check| flag, then repeat the command without it when you are satisfied.
Sometimes the rclone team may recommend you a concrete beta or stable
rclone release to troubleshoot your issue or add a bleeding edge feature.
The |--version VER| flag, if given, will update to the concrete version
The |--version VER| flag, if given, will update to a concrete version
instead of the latest one. If you omit micro version from |VER| (for
example |1.53|), the latest matching micro version will be used.
Upon successful update rclone will print a message that contains a previous
version number. You will need it if you later decide to revert your update
for some reason. Then you'll have to note the previous version and run the
following command: |rclone selfupdate [--beta] OLDVER|.
If the old version contains only dots and digits (for example |v1.54.0|)
then it's a stable release so you won't need the |--beta| flag. Beta releases
have an additional information similar to |v1.54.0-beta.5111.06f1c0c61|.
(if you are a developer and use a locally built rclone, the version number
will end with |-DEV|, you will have to rebuild it as it obviously can't
be distributed).
If you previously installed rclone via a package manager, the package may
include local documentation or configure services. You may wish to update
with the flag |--package deb| or |--package rpm| (whichever is correct for
@@ -39,15 +17,6 @@ your OS) to update these too. This command with the default |--package zip|
will update only the rclone executable so the local manual may become
inaccurate after it.
The |rclone mount| command (https://rclone.org/commands/rclone_mount/) may
or may not support extended FUSE options depending on the build and OS.
|selfupdate| will refuse to update if the capability would be discarded.
Note: Windows forbids deletion of a currently running executable so this
command will rename the old executable to 'rclone.old.exe' upon success.
Please note that this command was not available before rclone version 1.55.
If it fails for you with the message |unknown command "selfupdate"| then
you will need to update manually following the install instructions located
at https://rclone.org/install/
`

View File

@@ -1,11 +0,0 @@
// +build noselfupdate
package selfupdate
import (
"github.com/rclone/rclone/lib/buildinfo"
)
func init() {
buildinfo.Tags = append(buildinfo.Tags, "noselfupdate")
}

View File

@@ -1,5 +1,3 @@
// +build !noselfupdate
package selfupdate
import (
@@ -23,11 +21,9 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/cmount"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/buildinfo"
"github.com/rclone/rclone/lib/random"
"github.com/spf13/cobra"
@@ -145,19 +141,20 @@ func InstallUpdate(ctx context.Context, opt *Options) error {
return errors.New("--stable and --beta are mutually exclusive")
}
// The `cmount` tag is added by cmd/cmount/mount.go only if build is static.
_, tags := buildinfo.GetLinkingAndTags()
if strings.Contains(" "+tags+" ", " cmount ") && !cmount.ProvidedBy(runtime.GOOS) {
return errors.New("updating would discard the mount FUSE capability, aborting")
}
newVersion, siteURL, err := GetVersion(ctx, opt.Beta, opt.Version)
if err != nil {
return errors.Wrap(err, "unable to detect new version")
}
oldVersion := fs.Version
if newVersion == oldVersion {
if newVersion == "" {
var err error
_, newVersion, _, err = versionCmd.GetVersion(siteURL + "/version.txt")
if err != nil {
return errors.Wrap(err, "unable to detect new version")
}
}
if newVersion == fs.Version {
fmt.Println("rclone is up to date")
return nil
}
@@ -169,7 +166,7 @@ func InstallUpdate(ctx context.Context, opt *Options) error {
} else {
err := installPackage(ctx, opt.Beta, newVersion, siteURL, opt.Package)
if err == nil {
fmt.Printf("Successfully updated rclone package from version %s to version %s\n", oldVersion, newVersion)
fmt.Printf("Successfully updated rclone package to version %s\n", newVersion)
}
return err
}
@@ -221,7 +218,7 @@ func InstallUpdate(ctx context.Context, opt *Options) error {
err = replaceExecutable(targetFile, newFile, savedFile)
if err == nil {
fmt.Printf("Successfully updated rclone from version %s to version %s\n", oldVersion, newVersion)
fmt.Printf("Successfully updated rclone to version %s\n", newVersion)
}
return err
}

View File

@@ -1,5 +1,3 @@
// +build !noselfupdate
package selfupdate
import (

View File

@@ -1,5 +1,3 @@
// +build !noselfupdate
package selfupdate
import (

View File

@@ -1,5 +1,4 @@
// +build !windows,!plan9,!js
// +build !noselfupdate
package selfupdate

View File

@@ -1,5 +1,4 @@
// +build plan9 js
// +build !noselfupdate
package selfupdate

View File

@@ -1,5 +1,4 @@
// +build windows
// +build !noselfupdate
package selfupdate

View File

@@ -1,5 +0,0 @@
// +build noselfupdate
package cmd
const selfupdateEnabled = false

View File

@@ -1,7 +0,0 @@
// +build !noselfupdate
package cmd
// This constant must be in the `cmd` package rather than `cmd/selfupdate`
// to prevent build failure due to dependency loop.
const selfupdateEnabled = true

View File

@@ -29,24 +29,14 @@ var commandDefinition = &cobra.Command{
Use: "version",
Short: `Show the version number.`,
Long: `
Show the rclone version number, the go version, the build target
OS and architecture, the runtime OS and kernel version and bitness,
build tags and the type of executable (static or dynamic).
Show the version number, the go version and the architecture.
For example:
Eg
$ rclone version
rclone v1.55.0
- os/version: ubuntu 18.04 (64 bit)
- os/kernel: 4.15.0-136-generic (x86_64)
- os/type: linux
- os/arch: amd64
- go/version: go1.16
- go/linking: static
- go/tags: none
Note: before rclone version 1.55 the os/type and os/arch lines were merged,
and the "go/version" line was tagged as "go version".
rclone v1.41
- os/arch: linux/amd64
- go version: go1.10
If you supply the --check flag, then it will do an online check to
compare your version with the latest release and the latest beta.
@@ -99,7 +89,9 @@ func GetVersion(url string) (v *semver.Version, vs string, date time.Time, err e
return v, vs, date, err
}
vs = strings.TrimSpace(string(bodyBytes))
vs = strings.TrimPrefix(vs, "rclone ")
if strings.HasPrefix(vs, "rclone ") {
vs = vs[7:]
}
vs = strings.TrimRight(vs, "β")
date, err = http.ParseTime(resp.Header.Get("Last-Modified"))
if err != nil {

View File

@@ -475,7 +475,3 @@ put them back in again.` >}}
* Maxwell Calman <mcalman@MacBook-Pro.local>
* Naveen Honest Raj <naveendurai19@gmail.com>
* Lucas Messenger <lmesseng@cisco.com>
* Manish Kumar <krmanish260@gmail.com>
* x0b <x0bdev@gmail.com>
* CERN through the CS3MESH4EOSC Project
* Nick Gaya <nicholasgaya+github@gmail.com>

View File

@@ -392,22 +392,6 @@ See: the [encoding section in the overview](/overview/#encoding) for more info.
- Type: MultiEncoder
- Default: Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8
#### --azureblob-public-access
Public access level of a container: blob, container.
- Config: public_access
- Env Var: RCLONE_AZUREBLOB_PUBLIC_ACCESS
- Type: string
- Default: ""
- Examples:
- ""
- The container and its blobs can be accessed only with an authorized request. It's a default value
- "blob"
- Blob data within this container can be read via anonymous request.
- "container"
- Allow full public read access for container and blob data.
{{< rem autogenerated options stop >}}
### Limitations ###

View File

@@ -5,160 +5,6 @@ description: "Rclone Changelog"
# Changelog
## v1.55.0 - 2021-03-31
[See commits](https://github.com/rclone/rclone/compare/v1.54.0...v1.55.0)
* New commands
* [selfupdate](/commands/rclone_selfupdate/) (Ivan Andreev)
* Allows rclone to update itself in-place or via a package (using `--package` flag)
* Reads cryptographically signed signatures for non beta releases
* Works on all OSes.
* [test](/commands/rclone_test/) - these are test commands - use with care!
* `histogram` - Makes a histogram of file name characters.
* `info` - Discovers file name or other limitations for paths.
* `makefiles` - Make a random file hierarchy for testing.
* `memory` - Load all the objects at remote:path into memory and report memory stats.
* New Features
* [Connection strings](/docs/#connection-strings)
* Config parameters can now be passed as part of the remote name as a connection string.
* For example to do the equivalent of `--drive-shared-with-me` use `drive,shared_with_me:`
* Make sure we don't save on the fly remote config to the config file (Nick Craig-Wood)
* Make sure backends with additional config have a different name for caching (Nick Craig-Wood)
* This work was sponsored by CERN, through the [CS3MESH4EOSC Project](https://cs3mesh4eosc.eu/).
* CS3MESH4EOSC has received funding from the European Unions Horizon 2020
* research and innovation programme under Grant Agreement no. 863353.
* build
* Update go build version to go1.16 and raise minimum go version to go1.13 (Nick Craig-Wood)
* Make a macOS ARM64 build to support Apple Silicon (Nick Craig-Wood)
* Install macfuse 4.x instead of osxfuse 3.x (Nick Craig-Wood)
* Use `GO386=softfloat` instead of deprecated `GO386=387` for 386 builds (Nick Craig-Wood)
* Disable IOS builds for the time being (Nick Craig-Wood)
* Androids builds made with up to date NDK (x0b)
* Add an rclone user to the Docker image but don't use it by default (cynthia kwok)
* dedupe: Make largest directory primary to minimize data moved (Saksham Khanna)
* config
* Wrap config library in an interface (Fionera)
* Make config file system pluggable (Nick Craig-Wood)
* `--config ""` or `"/notfound"` for in memory config only (Nick Craig-Wood)
* Clear fs cache of stale entries when altering config (Nick Craig-Wood)
* copyurl: Add option to print resulting auto-filename (albertony)
* delete: Make `--rmdirs` obey the filters (Nick Craig-Wood)
* docs - many fixes and reworks from edwardxml, albertony, pvalls, Ivan Andreev, Evan Harris, buengese, Alexey Tabakman
* encoder/filename - add SCSU as tables (Klaus Post)
* Add multiple paths support to `--compare-dest` and `--copy-dest` flag (K265)
* filter: Make `--exclude "dir/"` equivalent to `--exclude "dir/**"` (Nick Craig-Wood)
* fshttp: Add DSCP support with `--dscp` for QoS with differentiated services (Max Sum)
* lib/cache: Add Delete and DeletePrefix methods (Nick Craig-Wood)
* lib/file
* Make pre-allocate detect disk full errors and return them (Nick Craig-Wood)
* Don't run preallocate concurrently (Nick Craig-Wood)
* Retry preallocate on EINTR (Nick Craig-Wood)
* operations: Made copy and sync operations obey a RetryAfterError (Ankur Gupta)
* rc
* Add string alternatives for setting options over the rc (Nick Craig-Wood)
* Add `options/local` to see the options configured in the context (Nick Craig-Wood)
* Add `_config` parameter to set global config for just this rc call (Nick Craig-Wood)
* Implement passing filter config with `_filter` parameter (Nick Craig-Wood)
* Add `fscache/clear` and `fscache/entries` to control the fs cache (Nick Craig-Wood)
* Avoid +Inf value for speed in `core/stats` (albertony)
* Add a full set of stats to `core/stats` (Nick Craig-Wood)
* Allow `fs=` params to be a JSON blob (Nick Craig-Wood)
* rcd: Added systemd notification during the `rclone rcd` command. (Naveen Honest Raj)
* rmdirs: Make `--rmdirs` obey the filters (Nick Craig-Wood)
* version: Show build tags and type of executable (Ivan Andreev)
* Bug Fixes
* install.sh: make it fail on download errors (Ivan Andreev)
* Fix excessive retries missing `--max-duration` timeout (Nick Craig-Wood)
* Fix crash when `--low-level-retries=0` (Nick Craig-Wood)
* Fix failed token refresh on mounts created via the rc (Nick Craig-Wood)
* fshttp: Fix bandwidth limiting after bad merge (Nick Craig-Wood)
* lib/atexit
* Unregister interrupt handler once it has fired so users can interrupt again (Nick Craig-Wood)
* Fix occasional failure to unmount with CTRL-C (Nick Craig-Wood)
* Fix deadlock calling Finalise while Run is running (Nick Craig-Wood)
* lib/rest: Fix multipart uploads not stopping on context cancel (Nick Craig-Wood)
* Mount
* Allow mounting to root directory on windows (albertony)
* Improved handling of relative paths on windows (albertony)
* Fix unicode issues with accented characters on macOS (Nick Craig-Wood)
* Docs: document the new FileSecurity option in WinFsp 2021 (albertony)
* Docs: add note about volume path syntax on windows (albertony)
* Fix caching of old directories after renaming them (Nick Craig-Wood)
* Update cgofuse to the latest version to bring in macfuse 4 fix (Nick Craig-Wood)
* VFS
* `--vfs-used-is-size` to report used space using recursive scan (tYYGH)
* Don't set modification time if it was already correct (Nick Craig-Wood)
* Fix Create causing windows explorer to truncate files on CTRL-C CTRL-V (Nick Craig-Wood)
* Fix modtimes not updating when writing via cache (Nick Craig-Wood)
* Fix modtimes changing by fractional seconds after upload (Nick Craig-Wood)
* Fix modtime set if `--vfs-cache-mode writes`/`full` and no write (Nick Craig-Wood)
* Rename files in cache and cancel uploads on directory rename (Nick Craig-Wood)
* Fix directory renaming by renaming dirs cached in memory (Nick Craig-Wood)
* Local
* Add flag `--local-no-preallocate` (David Sze)
* Make `nounc` an advanced option except on Windows (albertony)
* Don't ignore preallocate disk full errors (Nick Craig-Wood)
* Cache
* Add `--fs-cache-expire-duration` to control the fs cache (Nick Craig-Wood)
* Crypt
* Add option to not encrypt data (Vesnyx)
* Log hash ok on upload (albertony)
* Azure Blob
* Add container public access level support. (Manish Kumar)
* B2
* Fix HTML files downloaded via cloudflare (Nick Craig-Wood)
* Box
* Fix transfers getting stuck on token expiry after API change (Nick Craig-Wood)
* Chunker
* Partially implement no-rename transactions (Maxwell Calman)
* Drive
* Don't stop server side copy if couldn't read description (Nick Craig-Wood)
* Pass context on to drive SDK - to help with cancellation (Nick Craig-Wood)
* Dropbox
* Add polling for changes support (Robert Thomas)
* Make `--timeout 0` work properly (Nick Craig-Wood)
* Raise priority of rate limited message to INFO to make it more noticeable (Nick Craig-Wood)
* Fichier
* Implement copy & move (buengese)
* Implement public link (buengese)
* FTP
* Implement Shutdown method (Nick Craig-Wood)
* Close idle connections after `--ftp-idle-timeout` (1m by default) (Nick Craig-Wood)
* Make `--timeout 0` work properly (Nick Craig-Wood)
* Add `--ftp-close-timeout` flag for use with awkward ftp servers (Nick Craig-Wood)
* Retry connections and logins on 421 errors (Nick Craig-Wood)
* Hdfs
* Fix permissions for when directory is created (Lucas Messenger)
* Onedrive
* Make `--timeout 0` work properly (Nick Craig-Wood)
* S3
* Fix `--s3-profile` which wasn't working (Nick Craig-Wood)
* SFTP
* Close idle connections after `--sftp-idle-timeout` (1m by default) (Nick Craig-Wood)
* Fix "file not found" errors for read once servers (Nick Craig-Wood)
* Fix SetModTime stat failed: object not found with `--sftp-set-modtime=false` (Nick Craig-Wood)
* Swift
* Update github.com/ncw/swift to v2.0.0 (Nick Craig-Wood)
* Implement copying large objects (nguyenhuuluan434)
* Union
* Fix crash when using epff policy (Nick Craig-Wood)
* Fix union attempting to update files on a read only file system (Nick Craig-Wood)
* Refactor to use fspath.SplitFs instead of fs.ParseRemote (Nick Craig-Wood)
* Fix initialisation broken in refactor (Nick Craig-Wood)
* WebDAV
* Add support for sharepoint with NTLM authentication (Rauno Ots)
* Make sharepoint-ntlm docs more consistent (Alex Chen)
* Improve terminology in sharepoint-ntlm docs (Ivan Andreev)
* Disable HTTP/2 for NTLM authentication (georne)
* Fix sharepoint-ntlm error 401 for parallel actions (Ivan Andreev)
* Check that purged directory really exists (Ivan Andreev)
* Yandex
* Make `--timeout 0` work properly (Nick Craig-Wood)
* Zoho
* Replace client id - you will need to `rclone config reconnect` after this (buengese)
* Add forgotten setupRegion() to NewFs - this finally fixes regions other than EU (buengese)
## v1.54.1 - 2021-03-08
[See commits](https://github.com/rclone/rclone/compare/v1.54.0...v1.54.1)

View File

@@ -416,27 +416,4 @@ Choose how chunker should handle files with missing or invalid chunks.
- "false"
- Warn user, skip incomplete file and proceed.
#### --chunker-transactions
Choose how chunker should handle temporary files during transactions.
- Config: transactions
- Env Var: RCLONE_CHUNKER_TRANSACTIONS
- Type: string
- Default: "rename"
- Examples:
- "rename"
- Rename temporary files after a successful transaction.
- "norename"
- Leave temporary file names and write transaction ID to metadata file.
- Metadata is required for no rename transactions (meta format cannot be "none").
- If you are using norename transactions you should be careful not to downgrade Rclone
- as older versions of Rclone don't support this transaction style and will misinterpret
- files manipulated by norename transactions.
- This method is EXPERIMENTAL, don't use on production systems.
- "auto"
- Rename or norename will be used depending on capabilities of the backend.
- If meta format is set to "none", rename transactions will always be used.
- This method is EXPERIMENTAL, don't use on production systems.
{{< rem autogenerated options stop >}}

View File

@@ -72,13 +72,11 @@ See the [global flags page](/flags/) for global options not listed here.
* [rclone rcd](/commands/rclone_rcd/) - Run rclone listening to remote control commands only.
* [rclone rmdir](/commands/rclone_rmdir/) - Remove the empty directory at path.
* [rclone rmdirs](/commands/rclone_rmdirs/) - Remove empty directories under the path.
* [rclone selfupdate](/commands/rclone_selfupdate/) - Update the rclone binary.
* [rclone serve](/commands/rclone_serve/) - Serve a remote over a protocol.
* [rclone settier](/commands/rclone_settier/) - Changes storage class/tier of objects in remote.
* [rclone sha1sum](/commands/rclone_sha1sum/) - Produces an sha1sum file for all the objects in the path.
* [rclone size](/commands/rclone_size/) - Prints the total size and number of objects in remote:path.
* [rclone sync](/commands/rclone_sync/) - Make source and dest identical, modifying destination only.
* [rclone test](/commands/rclone_test/) - Run a test command
* [rclone touch](/commands/rclone_touch/) - Create new file or change file modification time.
* [rclone tree](/commands/rclone_tree/) - List the contents of the remote in a tree like fashion.
* [rclone version](/commands/rclone_version/) - Show the version number.

View File

@@ -15,16 +15,15 @@ Copy url content to dest.
Download a URL's content and copy it to the destination without saving
it in temporary storage.
Setting `--auto-filename`will cause the file name to be retrieved from
Setting --auto-filename will cause the file name to be retrieved from
the from URL (after any redirections) and used in the destination
path. With `--print-filename` in addition, the resuling file name will
be printed.
path.
Setting `--no-clobber` will prevent overwriting file on the
Setting --no-clobber will prevent overwriting file on the
destination if there is one with the same name.
Setting `--stdout` or making the output file name `-`
will cause the output to be written to standard output.
Setting --stdout or making the output file name "-" will cause the
output to be written to standard output.
```
@@ -37,7 +36,6 @@ rclone copyurl https://example.com dest:path [flags]
-a, --auto-filename Get the file name from the URL and use it for destination file path
-h, --help help for copyurl
--no-clobber Prevent overwriting file with same name
-p, --print-filename Print the resulting name from --auto-filename
--stdout Write the output to stdout rather than a file
```

View File

@@ -17,8 +17,8 @@ By default `dedupe` interactively finds files with duplicate
names and offers to delete all but one or rename them to be
different. This is known as deduping by name.
Deduping by name is only useful with a small group of backends (e.g. Google Drive,
Opendrive) that can have duplicate file names. It can be run on wrapping backends
Deduping by name is only useful with backends like Google Drive which
can have duplicate file names. It can be run on wrapping backends
(e.g. crypt) if they wrap a backend which supports duplicate file
names.

View File

@@ -29,15 +29,15 @@ is an **empty** **existing** directory:
On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows)
for details. The following examples will mount to an automatically assigned drive,
to specific drive letter `X:`, to path `C:\path\parent\mount`
(where parent directory or drive must exist, and mount must **not** exist,
to specific drive letter `X:`, to path `C:\path\to\nonexistent\directory`
(which must be **non-existent** subdirectory of an **existing** parent directory or drive,
and is not supported when [mounting as a network drive](#mounting-modes-on-windows)), and
the last example will mount as network share `\\cloud\remote` and map it to an
automatically assigned drive:
rclone mount remote:path/to/files *
rclone mount remote:path/to/files X:
rclone mount remote:path/to/files C:\path\parent\mount
rclone mount remote:path/to/files C:\path\to\nonexistent\directory
rclone mount remote:path/to/files \\cloud\remote
When the program ends while in foreground mode, either via Ctrl+C or receiving
@@ -91,14 +91,14 @@ and experience unexpected program errors, freezes or other issues, consider moun
as a network drive instead.
When mounting as a fixed disk drive you can either mount to an unused drive letter,
or to a path representing a **non-existent** subdirectory of an **existing** parent
or to a path - which must be **non-existent** subdirectory of an **existing** parent
directory or drive. Using the special value `*` will tell rclone to
automatically assign the next available drive letter, starting with Z: and moving backward.
Examples:
rclone mount remote:path/to/files *
rclone mount remote:path/to/files X:
rclone mount remote:path/to/files C:\path\parent\mount
rclone mount remote:path/to/files C:\path\to\nonexistent\directory
rclone mount remote:path/to/files X:
Option `--volname` can be used to set a custom volume name for the mounted
@@ -171,24 +171,10 @@ Note that the mapping of permissions is not always trivial, and the result
you see in Windows Explorer may not be exactly like you expected.
For example, when setting a value that includes write access, this will be
mapped to individual permissions "write attributes", "write data" and "append data",
but not "write extended attributes". Windows will then show this as basic
permission "Special" instead of "Write", because "Write" includes the
"write extended attributes" permission.
If you set POSIX permissions for only allowing access to the owner, using
`--file-perms 0600 --dir-perms 0700`, the user group and the built-in "Everyone"
group will still be given some special permissions, such as "read attributes"
and "read permissions", in Windows. This is done for compatibility reasons,
e.g. to allow users without additional permissions to be able to read basic
metadata about files like in UNIX. One case that may arise is that other programs
(incorrectly) interprets this as the file being accessible by everyone. For example
an SSH client may warn about "unprotected private key file".
WinFsp 2021 (version 1.9, still in beta) introduces a new FUSE option "FileSecurity",
that allows the complete specification of file security descriptors using
[SDDL](https://docs.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format).
With this you can work around issues such as the mentioned "unprotected private key file"
by specifying `-o FileSecurity="D:P(A;;FA;;;OW)"`, for file all access (FA) to the owner (OW).
but not "write extended attributes" (WinFsp does not support extended attributes,
see [this](https://github.com/billziss-gh/winfsp/wiki/NTFS-Compatibility)).
Windows will then show this as basic permission "Special" instead of "Write",
because "Write" includes the "write extended attributes" permission.
### Windows caveats
@@ -392,13 +378,6 @@ for two reasons. Firstly because it is only checked every
`--vfs-cache-poll-interval`. Secondly because open files cannot be
evicted from the cache.
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
This can potentially cause data corruption if you do. You can work
around this by giving each rclone its own cache hierarchy with
`--cache-dir`. You don't need to worry about this if the remotes in
use don't overlap.
### --vfs-cache-mode off
In this mode (the default) the cache will read directly from the remote and write
@@ -542,19 +521,6 @@ If the flag is not provided on the command line, then its default value depends
on the operating system where rclone runs: "true" on Windows and macOS, "false"
otherwise. If the flag is provided without a value, then it is "true".
## Alternate report of used bytes
Some backends, most notably S3, do not report the amount of bytes used.
If you need this information to be available when running `df` on the
filesystem, then pass the flag `--vfs-used-is-size` to rclone.
With this flag set, instead of relying on the backend to report this
information, rclone will scan the whole remote similar to `rclone size`
and compute the total used space itself.
_WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
result is accurate. However, this is very inefficient and may cost lots of API
calls resulting in extra charges. Use it as a last resort and only with caching.
```
rclone mount remote:path /path/to/mountpoint [flags]
@@ -599,7 +565,6 @@ rclone mount remote:path /path/to/mountpoint [flags]
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
--volname string Set the volume name. Supported on Windows and OSX only.

View File

@@ -1,84 +0,0 @@
---
title: "rclone selfupdate"
description: "Update the rclone binary."
slug: rclone_selfupdate
url: /commands/rclone_selfupdate/
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/selfupdate/ and as part of making a release run "make commanddocs"
---
# rclone selfupdate
Update the rclone binary.
## Synopsis
This command downloads the latest release of rclone and replaces
the currently running binary. The download is verified with a hashsum
and cryptographically signed signature.
If used without flags (or with implied `--stable` flag), this command
will install the latest stable release. However, some issues may be fixed
(or features added) only in the latest beta release. In such cases you should
run the command with the `--beta` flag, i.e. `rclone selfupdate --beta`.
You can check in advance what version would be installed by adding the
`--check` flag, then repeat the command without it when you are satisfied.
Sometimes the rclone team may recommend you a concrete beta or stable
rclone release to troubleshoot your issue or add a bleeding edge feature.
The `--version VER` flag, if given, will update to the concrete version
instead of the latest one. If you omit micro version from `VER` (for
example `1.53`), the latest matching micro version will be used.
Upon successful update rclone will print a message that contains a previous
version number. You will need it if you later decide to revert your update
for some reason. Then you'll have to note the previous version and run the
following command: `rclone selfupdate [--beta] OLDVER`.
If the old version contains only dots and digits (for example `v1.54.0`)
then it's a stable release so you won't need the `--beta` flag. Beta releases
have an additional information similar to `v1.54.0-beta.5111.06f1c0c61`.
(if you are a developer and use a locally built rclone, the version number
will end with `-DEV`, you will have to rebuild it as it obvisously can't
be distributed).
If you previously installed rclone via a package manager, the package may
include local documentation or configure services. You may wish to update
with the flag `--package deb` or `--package rpm` (whichever is correct for
your OS) to update these too. This command with the default `--package zip`
will update only the rclone executable so the local manual may become
inaccurate after it.
The `rclone mount` command (https://rclone.org/commands/rclone_mount/) may
or may not support extended FUSE options depending on the build and OS.
`selfupdate` will refuse to update if the capability would be discarded.
Note: Windows forbids deletion of a currently running executable so this
command will rename the old executable to 'rclone.old.exe' upon success.
Please note that this command was not available before rclone version 1.55.
If it fails for you with the message `unknown command "selfupdate"` then
you will need to update manually following the install instructions located
at https://rclone.org/install/
```
rclone selfupdate [flags]
```
## Options
```
--beta Install beta release.
--check Check for latest release, do not download.
-h, --help help for selfupdate
--output string Save the downloaded binary at a given path (default: replace running binary)
--package string Package format: zip|deb|rpm (default: zip)
--stable Install stable release (this is the default)
--version string Install the given rclone version (default: latest)
```
See the [global flags page](/flags/) for global options not listed here.
## SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.

View File

@@ -134,13 +134,6 @@ for two reasons. Firstly because it is only checked every
`--vfs-cache-poll-interval`. Secondly because open files cannot be
evicted from the cache.
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
This can potentially cause data corruption if you do. You can work
around this by giving each rclone its own cache hierarchy with
`--cache-dir`. You don't need to worry about this if the remotes in
use don't overlap.
### --vfs-cache-mode off
In this mode (the default) the cache will read directly from the remote and write
@@ -284,19 +277,6 @@ If the flag is not provided on the command line, then its default value depends
on the operating system where rclone runs: "true" on Windows and macOS, "false"
otherwise. If the flag is provided without a value, then it is "true".
## Alternate report of used bytes
Some backends, most notably S3, do not report the amount of bytes used.
If you need this information to be available when running `df` on the
filesystem, then pass the flag `--vfs-used-is-size` to rclone.
With this flag set, instead of relying on the backend to report this
information, rclone will scan the whole remote similar to `rclone size`
and compute the total used space itself.
_WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
result is accurate. However, this is very inefficient and may cost lots of API
calls resulting in extra charges. Use it as a last resort and only with caching.
```
rclone serve dlna remote:path [flags]
@@ -329,7 +309,6 @@ rclone serve dlna remote:path [flags]
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
```

View File

@@ -133,13 +133,6 @@ for two reasons. Firstly because it is only checked every
`--vfs-cache-poll-interval`. Secondly because open files cannot be
evicted from the cache.
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
This can potentially cause data corruption if you do. You can work
around this by giving each rclone its own cache hierarchy with
`--cache-dir`. You don't need to worry about this if the remotes in
use don't overlap.
### --vfs-cache-mode off
In this mode (the default) the cache will read directly from the remote and write
@@ -283,19 +276,6 @@ If the flag is not provided on the command line, then its default value depends
on the operating system where rclone runs: "true" on Windows and macOS, "false"
otherwise. If the flag is provided without a value, then it is "true".
## Alternate report of used bytes
Some backends, most notably S3, do not report the amount of bytes used.
If you need this information to be available when running `df` on the
filesystem, then pass the flag `--vfs-used-is-size` to rclone.
With this flag set, instead of relying on the backend to report this
information, rclone will scan the whole remote similar to `rclone size`
and compute the total used space itself.
_WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
result is accurate. However, this is very inefficient and may cost lots of API
calls resulting in extra charges. Use it as a last resort and only with caching.
## Auth Proxy
If you supply the parameter `--auth-proxy /path/to/program` then
@@ -414,7 +394,6 @@ rclone serve ftp remote:path [flags]
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
```

View File

@@ -205,13 +205,6 @@ for two reasons. Firstly because it is only checked every
`--vfs-cache-poll-interval`. Secondly because open files cannot be
evicted from the cache.
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
This can potentially cause data corruption if you do. You can work
around this by giving each rclone its own cache hierarchy with
`--cache-dir`. You don't need to worry about this if the remotes in
use don't overlap.
### --vfs-cache-mode off
In this mode (the default) the cache will read directly from the remote and write
@@ -355,19 +348,6 @@ If the flag is not provided on the command line, then its default value depends
on the operating system where rclone runs: "true" on Windows and macOS, "false"
otherwise. If the flag is provided without a value, then it is "true".
## Alternate report of used bytes
Some backends, most notably S3, do not report the amount of bytes used.
If you need this information to be available when running `df` on the
filesystem, then pass the flag `--vfs-used-is-size` to rclone.
With this flag set, instead of relying on the backend to report this
information, rclone will scan the whole remote similar to `rclone size`
and compute the total used space itself.
_WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
result is accurate. However, this is very inefficient and may cost lots of API
calls resulting in extra charges. Use it as a last resort and only with caching.
```
rclone serve http remote:path [flags]
@@ -410,7 +390,6 @@ rclone serve http remote:path [flags]
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
```

View File

@@ -144,13 +144,6 @@ for two reasons. Firstly because it is only checked every
`--vfs-cache-poll-interval`. Secondly because open files cannot be
evicted from the cache.
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
This can potentially cause data corruption if you do. You can work
around this by giving each rclone its own cache hierarchy with
`--cache-dir`. You don't need to worry about this if the remotes in
use don't overlap.
### --vfs-cache-mode off
In this mode (the default) the cache will read directly from the remote and write
@@ -294,19 +287,6 @@ If the flag is not provided on the command line, then its default value depends
on the operating system where rclone runs: "true" on Windows and macOS, "false"
otherwise. If the flag is provided without a value, then it is "true".
## Alternate report of used bytes
Some backends, most notably S3, do not report the amount of bytes used.
If you need this information to be available when running `df` on the
filesystem, then pass the flag `--vfs-used-is-size` to rclone.
With this flag set, instead of relying on the backend to report this
information, rclone will scan the whole remote similar to `rclone size`
and compute the total used space itself.
_WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
result is accurate. However, this is very inefficient and may cost lots of API
calls resulting in extra charges. Use it as a last resort and only with caching.
## Auth Proxy
If you supply the parameter `--auth-proxy /path/to/program` then
@@ -424,7 +404,6 @@ rclone serve sftp remote:path [flags]
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
```

View File

@@ -213,13 +213,6 @@ for two reasons. Firstly because it is only checked every
`--vfs-cache-poll-interval`. Secondly because open files cannot be
evicted from the cache.
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
This can potentially cause data corruption if you do. You can work
around this by giving each rclone its own cache hierarchy with
`--cache-dir`. You don't need to worry about this if the remotes in
use don't overlap.
### --vfs-cache-mode off
In this mode (the default) the cache will read directly from the remote and write
@@ -363,19 +356,6 @@ If the flag is not provided on the command line, then its default value depends
on the operating system where rclone runs: "true" on Windows and macOS, "false"
otherwise. If the flag is provided without a value, then it is "true".
## Alternate report of used bytes
Some backends, most notably S3, do not report the amount of bytes used.
If you need this information to be available when running `df` on the
filesystem, then pass the flag `--vfs-used-is-size` to rclone.
With this flag set, instead of relying on the backend to report this
information, rclone will scan the whole remote similar to `rclone size`
and compute the total used space itself.
_WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
result is accurate. However, this is very inefficient and may cost lots of API
calls resulting in extra charges. Use it as a last resort and only with caching.
## Auth Proxy
If you supply the parameter `--auth-proxy /path/to/program` then
@@ -502,7 +482,6 @@ rclone serve webdav remote:path [flags]
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
```

View File

@@ -15,8 +15,7 @@ Make source and dest identical, modifying destination only.
Sync the source to the destination, changing the destination
only. Doesn't transfer unchanged files, testing by size and
modification time or MD5SUM. Destination is updated to match
source, including deleting files if necessary (except duplicate
objects, see below).
source, including deleting files if necessary.
**Important**: Since this can cause data loss, test first with the
`--dry-run` or the `--interactive`/`-i` flag.
@@ -24,8 +23,7 @@ objects, see below).
rclone sync -i SOURCE remote:DESTINATION
Note that files in the destination won't be deleted if there were any
errors at any point. Duplicate objects (files with the same name, on
those providers that support it) are also not yet handled.
errors at any point.
It is always the contents of the directory that is synced, not the
directory so when source:path is a directory, it's the contents of
@@ -37,9 +35,6 @@ go there.
**Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics
**Note**: Use the `rclone dedupe` command to deal with "Duplicate object/directory found in source/destination - ignoring" errors.
See [this forum post](https://forum.rclone.org/t/sync-not-clearing-duplicates/14372) for more info.
```
rclone sync source:path dest:path [flags]

View File

@@ -1,41 +0,0 @@
---
title: "rclone test"
description: "Run a test command"
slug: rclone_test
url: /commands/rclone_test/
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/ and as part of making a release run "make commanddocs"
---
# rclone test
Run a test command
## Synopsis
Rclone test is used to run test commands.
Select which test comand you want with the subcommand, eg
rclone test memory remote:
Each subcommand has its own options which you can see in their help.
**NB** Be careful running these commands, they may do strange things
so reading their documentation first is recommended.
## Options
```
-h, --help help for test
```
See the [global flags page](/flags/) for global options not listed here.
## SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
* [rclone test histogram](/commands/rclone_test_histogram/) - Makes a histogram of file name characters.
* [rclone test info](/commands/rclone_test_info/) - Discovers file name or other limitations for paths.
* [rclone test makefiles](/commands/rclone_test_makefiles/) - Make a random file hierarchy in <dir>
* [rclone test memory](/commands/rclone_test_memory/) - Load all the objects at remote:path into memory and report memory stats.

View File

@@ -1,36 +0,0 @@
---
title: "rclone test histogram"
description: "Makes a histogram of file name characters."
slug: rclone_test_histogram
url: /commands/rclone_test_histogram/
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/histogram/ and as part of making a release run "make commanddocs"
---
# rclone test histogram
Makes a histogram of file name characters.
## Synopsis
This command outputs JSON which shows the histogram of characters used
in filenames in the remote:path specified.
The data doesn't contain any identifying information but is useful for
the rclone developers when developing filename compression.
```
rclone test histogram [remote:path] [flags]
```
## Options
```
-h, --help help for histogram
```
See the [global flags page](/flags/) for global options not listed here.
## SEE ALSO
* [rclone test](/commands/rclone_test/) - Run a test command

View File

@@ -1,44 +0,0 @@
---
title: "rclone test info"
description: "Discovers file name or other limitations for paths."
slug: rclone_test_info
url: /commands/rclone_test_info/
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/info/ and as part of making a release run "make commanddocs"
---
# rclone test info
Discovers file name or other limitations for paths.
## Synopsis
rclone info discovers what filenames and upload methods are possible
to write to the paths passed in and how long they can be. It can take some
time. It will write test files into the remote:path passed in. It outputs
a bit of go code for each one.
**NB** this can create undeletable files and other hazards - use with care
```
rclone test info [remote:path]+ [flags]
```
## Options
```
--all Run all tests.
--check-control Check control characters.
--check-length Check max filename length.
--check-normalization Check UTF-8 Normalization.
--check-streaming Check uploads with indeterminate file size.
-h, --help help for info
--upload-wait duration Wait after writing a file.
--write-json string Write results to file.
```
See the [global flags page](/flags/) for global options not listed here.
## SEE ALSO
* [rclone test](/commands/rclone_test/) - Run a test command

View File

@@ -1,33 +0,0 @@
---
title: "rclone test makefiles"
description: "Make a random file hierarchy in <dir>"
slug: rclone_test_makefiles
url: /commands/rclone_test_makefiles/
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/makefiles/ and as part of making a release run "make commanddocs"
---
# rclone test makefiles
Make a random file hierarchy in <dir>
```
rclone test makefiles <dir> [flags]
```
## Options
```
--files int Number of files to create (default 1000)
--files-per-directory int Average number of files per directory (default 10)
-h, --help help for makefiles
--max-file-size SizeSuffix Maximum size of files to create (default 100)
--max-name-length int Maximum size of file names (default 12)
--min-file-size SizeSuffix Minimum size of file to create
--min-name-length int Minimum size of file names (default 4)
```
See the [global flags page](/flags/) for global options not listed here.
## SEE ALSO
* [rclone test](/commands/rclone_test/) - Run a test command

View File

@@ -1,27 +0,0 @@
---
title: "rclone test memory"
description: "Load all the objects at remote:path into memory and report memory stats."
slug: rclone_test_memory
url: /commands/rclone_test_memory/
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/memory/ and as part of making a release run "make commanddocs"
---
# rclone test memory
Load all the objects at remote:path into memory and report memory stats.
```
rclone test memory remote:path [flags]
```
## Options
```
-h, --help help for memory
```
See the [global flags page](/flags/) for global options not listed here.
## SEE ALSO
* [rclone test](/commands/rclone_test/) - Run a test command

View File

@@ -12,21 +12,14 @@ Show the version number.
## Synopsis
Show the rclone version number, the go version, the build target OS and
architecture, build tags and the type of executable (static or dynamic).
Show the version number, the go version and the architecture.
For example:
Eg
$ rclone version
rclone v1.54
- os/type: linux
- os/arch: amd64
- go/version: go1.16
- go/linking: static
- go/tags: none
Note: before rclone version 1.55 the os/type and os/arch lines were merged,
and the "go/version" line was tagged as "go version".
rclone v1.41
- os/arch: linux/amd64
- go version: go1.10
If you supply the --check flag, then it will do an online check to
compare your version with the latest release and the latest beta.

View File

@@ -517,20 +517,6 @@ names, or for debugging purposes.
- Type: bool
- Default: false
#### --crypt-no-data-encryption
Option to either encrypt file data or leave it unencrypted.
- Config: no_data_encryption
- Env Var: RCLONE_CRYPT_NO_DATA_ENCRYPTION
- Type: bool
- Default: false
- Examples:
- "true"
- Don't encrypt file data, leave it unencrypted.
- "false"
- Encrypt file data.
### Backend commands
Here are the commands specific to the crypt backend.

View File

@@ -8,8 +8,8 @@ Configure
First, you'll need to configure rclone. As the object storage systems
have quite complicated authentication these are kept in a config file.
(See the [`--config`](#config-config-file) entry for how to find the config
file and choose its location.)
(See the `--config` entry for how to find the config file and choose
its location.)
The easiest way to make the config is to run rclone with the config
option:
@@ -208,108 +208,6 @@ To copy files and directories in `https://example.com/path/to/dir` to `/tmp/dir`
To copy files and directories from `example.com` in the relative
directory `path/to/dir` to `/tmp/dir` using sftp.
### Connection strings {#connection-strings}
The above examples can also be written using a connection string
syntax, so instead of providing the arguments as command line
parameters `--http-url https://pub.rclone.org` they are provided as
part of the remote specification as a kind of connection string.
rclone lsd ":http,url='https://pub.rclone.org':"
rclone lsf ":http,url='https://example.com':path/to/dir"
rclone copy ":http,url='https://example.com':path/to/dir" /tmp/dir
rclone copy :sftp,host=example.com:path/to/dir /tmp/dir
These can apply to modify existing remotes as well as create new
remotes with the on the fly syntax. This example is equivalent to
adding the `--drive-shared-with-me` parameter to the remote `gdrive:`.
rclone lsf "gdrive,shared_with_me:path/to/dir"
The major advantage to using the connection string style syntax is
that it only applies the the remote, not to all the remotes of that
type of the command line. A common confusion is this attempt to copy a
file shared on google drive to the normal drive which **does not
work** because the `--drive-shared-with-me` flag applies to both the
source and the destination.
rclone copy --drive-shared-with-me gdrive:shared-file.txt gdrive:
However using the connection string syntax, this does work.
rclone copy "gdrive,shared_with_me:shared-file.txt" gdrive:
The connection strings have the following syntax
remote,parameter=value,parameter2=value2:path/to/dir
:backend,parameter=value,parameter2=value2:path/to/dir
If the `parameter` has a `:` or `,` then it must be placed in quotes `"` or
`'`, so
remote,parameter="colon:value",parameter2="comma,value":path/to/dir
:backend,parameter='colon:value',parameter2='comma,value':path/to/dir
If a quoted value needs to include that quote, then it should be
doubled, so
remote,parameter="with""quote",parameter2='with''quote':path/to/dir
This will make `parameter` be `with"quote` and `parameter2` be
`with'quote`.
If you leave off the `=parameter` then rclone will substitute `=true`
which works very well with flags. For example to use s3 configured in
the environment you could use:
rclone lsd :s3,env_auth:
Which is equivalent to
rclone lsd :s3,env_auth=true:
Note that on the command line you might need to surround these
connection strings with `"` or `'` to stop the shell interpreting any
special characters within them.
If you are a shell master then you'll know which strings are OK and
which aren't, but if you aren't sure then enclose them in `"` and use
`'` as the inside quote. This syntax works on all OSes.
rclone copy ":http,url='https://example.com':path/to/dir" /tmp/dir
On Linux/macOS some characters are still interpreted inside `"`
strings in the shell (notably `\` and `$` and `"`) so if your strings
contain those you can swap the roles of `"` and `'` thus. (This syntax
does not work on Windows.)
rclone copy ':http,url="https://example.com":path/to/dir' /tmp/dir
#### Connection strings, config and logging
If you supply extra configuration to a backend by command line flag,
environment variable or connection string then rclone will add a
suffix based on the hash of the config to the name of the remote, eg
rclone -vv lsf --s3-chunk-size 20M s3:
Has the log message
DEBUG : s3: detected overridden config - adding "{Srj1p}" suffix to name
This is so rclone can tell the modified remote apart from the
unmodified remote when caching the backends.
This should only be noticeable in the logs.
This means that on the fly backends such as
rclone -vv lsf :s3,env_auth:
Will get their own names
DEBUG : :s3: detected overridden config - adding "{YTu53}" suffix to name
### Valid remote names
- Remote names may only contain 0-9, A-Z ,a-z ,_ , - and space.
@@ -639,7 +537,7 @@ See `--copy-dest` and `--backup-dir`.
### --config=CONFIG_FILE ###
Specify the location of the rclone configuration file.
Specify the location of the rclone config file.
Normally the config file is in your home directory as a file called
`.config/rclone/rclone.conf` (or `.rclone.conf` if created with an
@@ -654,46 +552,10 @@ If you run `rclone config file` you will see where the default
location is for you.
Use this flag to override the config location, e.g. `rclone
--config=".myconfig" config`.
--config=".myconfig" .config`.
If the location is set to empty string `""` or the special value
`/notfound`, or the os null device represented by value `NUL` on
Windows and `/dev/null` on Unix systems, then rclone will keep the
config file in memory only.
The file format is basic [INI](https://en.wikipedia.org/wiki/INI_file#Format):
Sections of text, led by a `[section]` header and followed by
`key=value` entries on separate lines. In rclone each remote is
represented by its own section, where the section name defines the
name of the remote. Options are specified as the `key=value` entries,
where the key is the option name without the `--backend-` prefix,
in lowercase and with `_` instead of `-`. E.g. option `--mega-hard-delete`
corresponds to key `hard_delete`. Only backend options can be specified.
A special, and required, key `type` identifies the [storage system](/overview/),
where the value is the internal lowercase name as returned by command
`rclone help backends`. Comments are indicated by `;` or `#` at the
beginning of a line.
Example:
[megaremote]
type = mega
user = you@example.com
pass = PDPcQVVjVtzFY-GTdDFozqBhTdsPg3qH
Note that passwords are in [obscured](/commands/rclone_obscure/)
form. Also, many storage systems uses token-based authentication instead
of passwords, and this requires additional steps. It is easier, and safer,
to use the interactive command `rclone config` instead of manually
editing the configuration file.
The configuration file will typically contain login information, and
should therefore have restricted permissions so that only the current user
can read it. Rclone tries to ensure this when it writes the file.
You may also choose to [encrypt](#configuration-encryption) the file.
When token-based authentication are used, the configuration file
must be writable, because rclone needs to update the tokens inside it.
If this is set to empty string or the special value `/notfound` then
rclone will keep the config file in memory only.
### --contimeout=TIME ###
@@ -787,27 +649,6 @@ triggering follow-on actions if data was copied, or skipping if not.
NB: Enabling this option turns a usually non-fatal error into a potentially
fatal one - please check and adjust your scripts accordingly!
### --fs-cache-expire-duration=TIME
When using rclone via the API rclone caches created remotes for 5
minutes by default in the "fs cache". This means that if you do
repeated actions on the same remote then rclone won't have to build it
again from scratch, which makes it more efficient.
This flag sets the time that the remotes are cached for. If you set it
to `0` (or negative) then rclone won't cache the remotes at all.
Note that if you use some flags, eg `--backup-dir` and if this is set
to `0` rclone may build two remotes (one for the source or destination
and one for the `--backup-dir` where it may have only built one
before.
### --fs-cache-expire-interval=TIME
This controls how often rclone checks for cached remotes to expire.
See the `--fs-cache-expire-duration` documentation above for more
info. The default is 60s, set to 0 to disable expiry.
### --header ###
Add an HTTP header for all transactions. The flag can be repeated to
@@ -1779,7 +1620,7 @@ Configuration Encryption
------------------------
Your configuration file contains information for logging in to
your cloud services. This means that you should keep your
`rclone.conf` file in a secure location.
`.rclone.conf` file in a secure location.
If you are in an environment where that isn't possible, you can
add a password to your configuration. This means that you will
@@ -1885,16 +1726,6 @@ password prompts. To do that, pass the parameter
of asking for a password if `RCLONE_CONFIG_PASS` doesn't contain
a valid password, and `--password-command` has not been supplied.
Some rclone commands, such as `genautocomplete`, do not require configuration.
Nevertheless, rclone will read any configuration file found
according to the rules described [above](https://rclone.org/docs/#config-config-file).
If an encrypted configuration file is found, this means you will be prompted for
password (unless using `--password-command`). To avoid this, you can bypass
the loading of the configuration file by overriding the location with an empty
string `""` or the special value `/notfound`, or the os null device represented
by value `NUL` on Windows and `/dev/null` on Unix systems (before rclone
version 1.55 only this null device alternative was supported).
E.g. `rclone --config="" genautocomplete bash`.
Developer options
-----------------
@@ -2096,8 +1927,11 @@ so they take exactly the same form.
### Config file ###
You can set defaults for values in the config file on an individual
remote basis. The names of the config items are documented in the page
for each backend.
remote basis. If you want to use this feature, you will need to
discover the name of the config items that you want. The easiest way
is to run through `rclone config` by hand, then look in the config
file to see what the values are (the config file can be found by
looking at the help for `--config` in `rclone help`).
To find the name of the environment variable, you need to set, take
`RCLONE_CONFIG_` + name of remote + `_` + name of config file option
@@ -2119,11 +1953,6 @@ mys3:
Note that if you want to create a remote using environment variables
you must create the `..._TYPE` variable as above.
Note also that now rclone has [connection strings](#connection-strings),
it is probably easier to use those instead which makes the above example
rclone lsd :s3,access_key_id=XXX,secret_access_key=XXX:
### Precedence
The various different methods of backend configuration are read in

View File

@@ -197,21 +197,6 @@ memory. It can be set smaller if you are tight on memory.
Impersonate this user when using a business account.
Note that if you want to use impersonate, you should make sure this
flag is set when running "rclone config" as this will cause rclone to
request the "members.read" scope which it won't normally. This is
needed to lookup a members email address into the internal ID that
dropbox uses in the API.
Using the "members.read" scope will require a Dropbox Team Admin
to approve during the OAuth flow.
You will have to use your own App (setting your own client_id and
client_secret) to use this option as currently rclone's default set of
permissions doesn't include "members.read". This can be added once
v1.55 or later is in use everywhere.
- Config: impersonate
- Env Var: RCLONE_DROPBOX_IMPERSONATE
- Type: string
@@ -285,12 +270,6 @@ dropbox:dir` will return the error `Failed to purge: There are too
many files involved in this operation`. As a work-around do an
`rclone delete dropbox:dir` followed by an `rclone rmdir dropbox:dir`.
When using `rclone link` you'll need to set `--expire` if using a
non-personal account otherwise the visibility may not be correct.
(Note that `--expire` isn't supported on personal accounts). See the
[forum discussion](https://forum.rclone.org/t/rclone-link-dropbox-permissions/23211) and the
[dropbox SDK issue](https://github.com/dropbox/dropbox-sdk-go-unofficial/issues/75).
### Get your own Dropbox App ID ###
When you use rclone with Dropbox in its default configuration you are using rclone's App ID. This is shared between all the rclone users.

View File

@@ -236,28 +236,6 @@ Option `exclude-if-present` creates a directory exclude rule based
on the presence of a file in a directory and takes precedence over
other rclone directory filter rules.
When using pattern list syntax, if a pattern item contains either
`/` or `**`, then rclone will not able to imply a directory filter rule
from this pattern list.
E.g. for an include rule
{dir1/**,dir2/**}
Rclone will match files below directories `dir1` or `dir2` only,
but will not be able to use this filter to exclude a directory `dir3`
from being traversed.
Directory recursion optimisation may affect performance, but normally
not the result. One exception to this is sync operations with option
`--create-empty-src-dirs`, where any traversed empty directories will
be created. With the pattern list example `{dir1/**,dir2/**}` above,
this would create an empty directory `dir3` on destination (when it exists
on source). Changing the filter to `{dir1,dir2}/**`, or splitting it into
two include rules `--include dir1/** --include dir2/**`, will match the
same files while also filtering directories, with the result that an empty
directory `dir3` will no longer be created.
### `--exclude` - Exclude files matching pattern
Excludes path/file names from an rclone command based on a single exclude
@@ -418,7 +396,7 @@ processed in.
Arrange the order of filter rules with the most restrictive first and
work down.
E.g. for `filter-file.txt`:
E.g. For `filter-file.txt`:
# a sample filter rule file
- secret*.jpg

View File

@@ -27,10 +27,10 @@ These flags are available for every command.
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
--client-cert string Client SSL certificate (PEM) for mutual TLS auth
--client-key string Client SSL private key (PEM) for mutual TLS auth
--compare-dest stringArray Include additional comma separated server-side paths during comparison.
--compare-dest string Include additional server-side path during comparison.
--config string Config file. (default "$HOME/.config/rclone/rclone.conf")
--contimeout duration Connect timeout (default 1m0s)
--copy-dest stringArray Implies --compare-dest but also copies files from paths into destination.
--copy-dest string Implies --compare-dest but also copies files from path into destination.
--cpuprofile string Write cpu profile to file
--cutoff-mode string Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default "HARD")
--delete-after When synchronizing, delete files on destination after transferring (default)
@@ -39,10 +39,10 @@ These flags are available for every command.
--delete-excluded Delete files on dest excluded from sync
--disable string Disable a comma separated list of features. Use help to see a list.
-n, --dry-run Do a trial run with no permanent changes
--dscp string Set DSCP value to connections. Can be value or names, eg. CS1, LE, DF, AF21.
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
--dump-headers Dump HTTP headers - may contain sensitive info
--dscp DSCP Name or Value (default 0)
--error-on-no-transfer Sets exit code 9 if no files are transferred, useful in scripts
--exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read exclude patterns from file (use - to read from stdin)
@@ -53,8 +53,6 @@ These flags are available for every command.
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
-f, --filter stringArray Add a file-filtering rule
--filter-from stringArray Read filtering patterns from a file (use - to read from stdin)
--fs-cache-expire-duration duration cache remotes for this long (0 to disable caching) (default 5m0s)
--fs-cache-expire-interval duration interval to check for expired remotes (default 1m0s)
--header stringArray Set HTTP header for all transactions
--header-download stringArray Set HTTP header for download transactions
--header-upload stringArray Set HTTP header for upload transactions
@@ -153,7 +151,7 @@ These flags are available for every command.
--use-json-log Use json log format.
--use-mmap Use mmap allocator (see docs).
--use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.55.0")
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.54.0")
-v, --verbose count Print lots more stuff (repeat for more)
```
@@ -186,7 +184,6 @@ and may be set in the config file.
--azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any. Leave blank if msi_object_id or msi_mi_res_id specified.
--azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_object_id specified.
--azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_mi_res_id specified.
--azureblob-public-access string Public access level of a container: blob, container.
--azureblob-sas-url string SAS URL for container level access only
--azureblob-service-principal-file string Path to file containing credentials for use with a service principal.
--azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256MB). (Deprecated)
@@ -250,7 +247,6 @@ and may be set in the config file.
-L, --copy-links Follow symlinks and copy the pointed to item.
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
--crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted.
--crypt-password string Password or pass phrase for encryption. (obscured)
--crypt-password2 string Password or pass phrase for salt. Optional but recommended. (obscured)
--crypt-remote string Remote to encrypt/decrypt.
@@ -286,7 +282,7 @@ and may be set in the config file.
--drive-starred-only Only show files that are starred.
--drive-stop-on-download-limit Make download limit errors be fatal
--drive-stop-on-upload-limit Make upload limit errors be fatal
--drive-team-drive string ID of the Shared Drive (Team Drive)
--drive-team-drive string ID of the Team Drive
--drive-token string OAuth Access Token as a JSON blob.
--drive-token-url string Token server url.
--drive-trashed-only Only show files that are in the trash.
@@ -315,14 +311,12 @@ and may be set in the config file.
--filefabric-token-expiry string Token expiry time
--filefabric-url string URL of the Enterprise File Fabric to connect to
--filefabric-version string Version read from the file fabric
--ftp-close-timeout Duration Maximum time to wait for a response to close. (default 1m0s)
--ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
--ftp-disable-epsv Disable using EPSV even if server advertises support
--ftp-disable-mlsd Disable using MLSD even if server advertises support
--ftp-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Del,Ctl,RightSpace,Dot)
--ftp-explicit-tls Use Explicit FTPS (FTP over TLS)
--ftp-host string FTP host to connect to
--ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
--ftp-no-check-certificate Do not verify the TLS certificate of the server
--ftp-pass string FTP password (obscured)
--ftp-port string FTP port, leave blank to use default (21)
@@ -384,7 +378,6 @@ and may be set in the config file.
--local-case-sensitive Force the filesystem to report itself as case sensitive.
--local-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Dot)
--local-no-check-updated Don't check to see if the files change during upload
--local-no-preallocate Disable preallocation of disk space for transferred files
--local-no-set-modtime Disable setting modtime
--local-no-sparse Disable sparse files for multi-thread downloads
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
@@ -415,7 +408,6 @@ and may be set in the config file.
--onedrive-link-password string Set the password for links created by the link command.
--onedrive-link-scope string Set the scope of the links created by the link command. (default "anonymous")
--onedrive-link-type string Set the type of the links created by the link command. (default "view")
--onedrive-list-chunk int Size of listing chunk. (default 1000)
--onedrive-no-versions Remove all versions on modifying operations
--onedrive-region string Choose national cloud region for OneDrive. (default "global")
--onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs.
@@ -490,10 +482,8 @@ and may be set in the config file.
--seafile-url string URL of seafile host to connect to
--seafile-user string User name (usually email address)
--sftp-ask-password Allow asking for SFTP password when needed.
--sftp-disable-concurrent-reads If set don't use concurrent reads
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
--sftp-host string SSH host to connect to
--sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file. (obscured)
--sftp-key-pem string Raw PEM-encoded private key, If specified, will override key_file parameter.
@@ -563,10 +553,9 @@ and may be set in the config file.
--union-upstreams string List of space separated upstreams.
--webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
--webdav-bearer-token-command string Command to run to get a bearer token
--webdav-encoding string This sets the encoding for the backend.
--webdav-pass string Password. (obscured)
--webdav-url string URL of http host to connect to
--webdav-user string User name. In case NTLM authentication is used, the username should be in the format 'Domain\User'.
--webdav-user string User name
--webdav-vendor string Name of the Webdav site/service/software you are using
--yandex-auth-url string Auth server URL.
--yandex-client-id string OAuth Client Id
@@ -574,11 +563,6 @@ and may be set in the config file.
--yandex-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Del,Ctl,InvalidUtf8,Dot)
--yandex-token string OAuth Access Token as a JSON blob.
--yandex-token-url string Token server url.
--zoho-auth-url string Auth server URL.
--zoho-client-id string OAuth Client Id
--zoho-client-secret string OAuth Client Secret
--zoho-encoding MultiEncoder This sets the encoding for the backend. (default Del,Ctl,InvalidUtf8)
--zoho-region string Zoho region to connect to. You'll have to use the region you organization is registered in.
--zoho-token string OAuth Access Token as a JSON blob.
--zoho-token-url string Token server url.
```

View File

@@ -223,30 +223,6 @@ Disable using MLSD even if server advertises support
- Type: bool
- Default: false
#### --ftp-idle-timeout
Max time before closing idle connections
If no connections have been returned to the connection pool in the time
given, rclone will empty the connection pool.
Set to 0 to keep connections indefinitely.
- Config: idle_timeout
- Env Var: RCLONE_FTP_IDLE_TIMEOUT
- Type: Duration
- Default: 1m0s
#### --ftp-close-timeout
Maximum time to wait for a response to close.
- Config: close_timeout
- Env Var: RCLONE_FTP_CLOSE_TIMEOUT
- Type: Duration
- Default: 1m0s
#### --ftp-encoding
This sets the encoding for the backend.

View File

@@ -25,14 +25,13 @@ fi
#create tmp directory and move to it with macOS compatibility fallback
tmp_dir=$(mktemp -d 2>/dev/null || mktemp -d -t 'rclone-install.XXXXXXXXXX')
cd "$tmp_dir"
tmp_dir=`mktemp -d 2>/dev/null || mktemp -d -t 'rclone-install.XXXXXXXXXX'`; cd $tmp_dir
#make sure unzip tool is available and choose one to work with
set +e
for tool in ${unzip_tools_list[*]}; do
trash=$(hash "$tool" 2>>errors)
trash=`hash $tool 2>>errors`
if [ "$?" -eq 0 ]; then
unzip_tool="$tool"
break
@@ -41,7 +40,7 @@ done
set -e
# exit if no unzip tools available
if [ -z "$unzip_tool" ]; then
if [ -z "${unzip_tool}" ]; then
printf "\nNone of the supported tools for extracting zip archives (${unzip_tools_list[*]}) were found. "
printf "Please install one of them and try again.\n\n"
exit 4
@@ -51,11 +50,11 @@ fi
export XDG_CONFIG_HOME=config
#check installed version of rclone to determine if update is necessary
version=$(rclone --version 2>>errors | head -n 1)
if [ -z "$install_beta" ]; then
current_version=$(curl -f https://downloads.rclone.org/version.txt)
version=`rclone --version 2>>errors | head -n 1`
if [ -z "${install_beta}" ]; then
current_version=`curl https://downloads.rclone.org/version.txt`
else
current_version=$(curl -f https://beta.rclone.org/version.txt)
current_version=`curl https://beta.rclone.org/version.txt`
fi
if [ "$version" = "$current_version" ]; then
@@ -64,8 +63,9 @@ if [ "$version" = "$current_version" ]; then
fi
#detect the platform
OS="$(uname)"
OS="`uname`"
case $OS in
Linux)
OS='linux'
@@ -93,20 +93,20 @@ case $OS in
;;
esac
OS_type="$(uname -m)"
case "$OS_type" in
OS_type="`uname -m`"
case $OS_type in
x86_64|amd64)
OS_type='amd64'
;;
i?86|x86)
OS_type='386'
;;
aarch64|arm64)
OS_type='arm64'
;;
arm*)
OS_type='arm'
;;
aarch64)
OS_type='arm64'
;;
*)
echo 'OS type not supported'
exit 2
@@ -115,42 +115,43 @@ esac
#download and unzip
if [ -z "$install_beta" ]; then
download_link="https://downloads.rclone.org/rclone-current-${OS}-${OS_type}.zip"
rclone_zip="rclone-current-${OS}-${OS_type}.zip"
if [ -z "${install_beta}" ]; then
download_link="https://downloads.rclone.org/rclone-current-$OS-$OS_type.zip"
rclone_zip="rclone-current-$OS-$OS_type.zip"
else
download_link="https://beta.rclone.org/rclone-beta-latest-${OS}-${OS_type}.zip"
rclone_zip="rclone-beta-latest-${OS}-${OS_type}.zip"
download_link="https://beta.rclone.org/rclone-beta-latest-$OS-$OS_type.zip"
rclone_zip="rclone-beta-latest-$OS-$OS_type.zip"
fi
curl -Of "$download_link"
curl -O $download_link
unzip_dir="tmp_unzip_dir_for_rclone"
# there should be an entry in this switch for each element of unzip_tools_list
case "$unzip_tool" in
case $unzip_tool in
'unzip')
unzip -a "$rclone_zip" -d "$unzip_dir"
unzip -a $rclone_zip -d $unzip_dir
;;
'7z')
7z x "$rclone_zip" "-o$unzip_dir"
7z x $rclone_zip -o$unzip_dir
;;
'busybox')
mkdir -p "$unzip_dir"
busybox unzip "$rclone_zip" -d "$unzip_dir"
mkdir -p $unzip_dir
busybox unzip $rclone_zip -d $unzip_dir
;;
esac
cd $unzip_dir/*
#mounting rclone to environment
case "$OS" in
case $OS in
'linux')
#binary
cp rclone /usr/bin/rclone.new
chmod 755 /usr/bin/rclone.new
chown root:root /usr/bin/rclone.new
mv /usr/bin/rclone.new /usr/bin/rclone
#manual
#manuals
if ! [ -x "$(command -v mandb)" ]; then
echo 'mandb not found. The rclone man docs will not be installed.'
else
@@ -160,11 +161,11 @@ case "$OS" in
fi
;;
'freebsd'|'openbsd'|'netbsd')
#binary
#bin
cp rclone /usr/bin/rclone.new
chown root:wheel /usr/bin/rclone.new
mv /usr/bin/rclone.new /usr/bin/rclone
#manual
#man
mkdir -p /usr/local/man/man1
cp rclone.1 /usr/local/man/man1/
makewhatis
@@ -185,7 +186,7 @@ esac
#update version variable post install
version=$(rclone --version 2>>errors | head -n 1)
version=`rclone --version 2>>errors | head -n 1`
printf "\n${version} has successfully installed."
printf '\nNow run "rclone config" for setup. Check https://rclone.org/docs/ for more details.\n\n'

View File

@@ -320,9 +320,9 @@ filesystem.
where it isn't supported (e.g. Windows) it will be ignored.
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/local/local.go then run make backenddocs" >}}
### Advanced Options
### Standard Options
Here are the advanced options specific to local (Local Disk).
Here are the standard options specific to local (Local Disk).
#### --local-nounc
@@ -336,6 +336,10 @@ Disable UNC (long path names) conversion on Windows
- "true"
- Disables long file names
### Advanced Options
Here are the advanced options specific to local (Local Disk).
#### --copy-links / -L
Follow symlinks and copy the pointed to item.

View File

@@ -325,15 +325,6 @@ fall back to normal copy (which will be slightly slower).
- Type: bool
- Default: false
#### --onedrive-list-chunk
Size of listing chunk.
- Config: list_chunk
- Env Var: RCLONE_ONEDRIVE_LIST_CHUNK
- Type: int
- Default: 1000
#### --onedrive-no-versions
Remove all versions on modifying operations

View File

@@ -378,55 +378,6 @@ call and taken by the [options/set](#options-set) calls as well as the
- `BandwidthSpec` - this will be set and returned as a string, eg
"1M".
## Specifying remotes to work on
Remotes are specified with the `fs=`, `srcFs=`, `dstFs=`
parameters depending on the command being used.
The parameters can be a string as per the rest of rclone, eg
`s3:bucket/path` or `:sftp:/my/dir`. They can also be specified as
JSON blobs.
If specifyng a JSON blob it should be a object mapping strings to
strings. These values will be used to configure the remote. There are
3 special values which may be set:
- `type` - set to `type` to specify a remote called `:type:`
- `_name` - set to `name` to specify a remote called `name:`
- `_root` - sets the root of the remote - may be empty
One of `_name` or `type` should normally be set. If the `local`
backend is desired then `type` should be set to `local`. If `_root`
isn't specified then it defaults to the root of the remote.
For example this JSON is equivalent to `remote:/tmp`
```
{
"_name": "remote",
"_path": "/tmp"
}
```
And this is equivalent to `:sftp,host='example.com':/tmp`
```
{
"type": "sftp",
"host": "example.com",
"_path": "/tmp"
}
```
And this is equivalent to `/tmp/dir`
```
{
type = "local",
_ path = "/tmp/dir"
}
```
## Supported commands
{{< rem autogenerated start "- run make rcdocs - don't edit here" >}}
### backend/command: Runs a backend command. {#backend-command}
@@ -765,22 +716,18 @@ Returns the following values:
```
{
"bytes": total transferred bytes since the start of the group,
"checks": number of files checked,
"deletes" : number of files deleted,
"elapsedTime": time in floating point seconds since rclone was started,
"speed": average speed in bytes/sec since start of the process,
"bytes": total transferred bytes since the start of the process,
"errors": number of errors,
"eta": estimated time in seconds until the group completes,
"fatalError": boolean whether there has been at least one fatal error,
"lastError": last error string,
"renames" : number of files renamed,
"retryError": boolean showing whether there has been at least one non-NoRetryError,
"speed": average speed in bytes/sec since start of the group,
"totalBytes": total number of bytes in the group,
"totalChecks": total number of checks in the group,
"totalTransfers": total number of transfers in the group,
"transferTime" : total time spent on running jobs,
"fatalError": whether there has been at least one FatalError,
"retryError": whether there has been at least one non-NoRetryError,
"checks": number of checked files,
"transfers": number of transferred files,
"deletes" : number of deleted files,
"renames" : number of renamed files,
"transferTime" : total time spent on running jobs,
"elapsedTime": time in seconds since the start of the process,
"lastError": last occurred error,
"transferring": an array of currently active file transfers:
[
{
@@ -861,8 +808,6 @@ This shows the current version of go and the go runtime
- os - OS in use as according to Go
- arch - cpu architecture in use according to Go
- goVersion - version of Go runtime in use
- linking - type of rclone executable (static or dynamic)
- goTags - space separated build tags or "none"
### debug/set-block-profile-rate: Set runtime.SetBlockProfileRate for blocking profiling. {#debug-set-block-profile-rate}
@@ -902,26 +847,6 @@ Results
- previousRate - int
### fscache/clear: Clear the Fs cache. {#fscache-clear}
This clears the fs cache. This is where remotes created from backends
are cached for a short while to make repeated rc calls more efficient.
If you change the parameters of a backend then you may want to call
this to clear an existing remote out of the cache before re-creating
it.
**Authentication is required for this call.**
### fscache/entries: Returns the number of entries in the fs cache. {#fscache-entries}
This returns the number of entries in the fs cache.
Returns
- entries - number of items in the cache
**Authentication is required for this call.**
### job/list: Lists the IDs of the running jobs {#job-list}
Parameters - None
@@ -1282,7 +1207,6 @@ This takes the following parameters
- fs - a remote name string e.g. "drive:"
- remote - a path within that remote e.g. "dir"
- each part in body represents a file to be uploaded
See the [uploadfile command](/commands/rclone_uploadfile/) command for more information on the above.
**Authentication is required for this call.**
@@ -1291,31 +1215,11 @@ See the [uploadfile command](/commands/rclone_uploadfile/) command for more info
Returns
- options - a list of the options block names
### options/get: Get all the global options {#options-get}
### options/get: Get all the options {#options-get}
Returns an object where keys are option block names and values are an
object with the current option values in.
Note that these are the global options which are unaffected by use of
the _config and _filter parameters. If you wish to read the parameters
set in _config then use options/config and for _filter use options/filter.
This shows the internal names of the option within rclone which should
map to the external options very easily with a few exceptions.
### options/local: Get the currently active config for this call {#options-local}
Returns an object with the keys "config" and "filter".
The "config" key contains the local config and the "filter" key contains
the local filters.
Note that these are the local options specific to this rc call. If
_config was not supplied then they will be the global options.
Likewise with "_filter".
This call is mostly useful for seeing if _config and _filter passing
is working.
This shows the internal names of the option within rclone which should
map to the external options very easily with a few exceptions.
@@ -1468,7 +1372,6 @@ This takes the following parameters
- srcFs - a remote name string e.g. "drive:src" for the source
- dstFs - a remote name string e.g. "drive:dst" for the destination
- createEmptySrcDirs - create empty src directories on destination if set
See the [copy command](/commands/rclone_copy/) command for more information on the above.
@@ -1481,7 +1384,6 @@ This takes the following parameters
- srcFs - a remote name string e.g. "drive:src" for the source
- dstFs - a remote name string e.g. "drive:dst" for the destination
- createEmptySrcDirs - create empty src directories on destination if set
- deleteEmptySrcDirs - delete empty src directories if set
@@ -1495,7 +1397,6 @@ This takes the following parameters
- srcFs - a remote name string e.g. "drive:src" for the source
- dstFs - a remote name string e.g. "drive:dst" for the destination
- createEmptySrcDirs - create empty src directories on destination if set
See the [sync command](/commands/rclone_sync/) command for more information on the above.

View File

@@ -496,44 +496,6 @@ any given time.
- Type: bool
- Default: false
#### --sftp-disable-concurrent-reads
If set don't use concurrent reads
Normally concurrent reads are safe to use and not using them will
degrade performance, so this option is disabled by default.
Some servers limit the amount number of times a file can be
downloaded. Using concurrent reads can trigger this limit, so if you
have a server which returns
Failed to copy: file does not exist
Then you may need to enable this flag.
If concurrent reads are disabled, the use_fstat option is ignored.
- Config: disable_concurrent_reads
- Env Var: RCLONE_SFTP_DISABLE_CONCURRENT_READS
- Type: bool
- Default: false
#### --sftp-idle-timeout
Max time before closing idle connections
If no connections have been returned to the connection pool in the time
given, rclone will empty the connection pool.
Set to 0 to keep connections indefinitely.
- Config: idle_timeout
- Env Var: RCLONE_SFTP_IDLE_TIMEOUT
- Type: Duration
- Default: 1m0s
{{< rem autogenerated options stop >}}
### Limitations ###

View File

@@ -137,21 +137,23 @@ Name of the Webdav site/service/software you are using
- "owncloud"
- Owncloud
- "sharepoint"
- Sharepoint Online, authenticated by Microsoft account.
- Sharepoint
- "sharepoint-ntlm"
- Sharepoint with NTLM authentication. Usually self-hosted or on-premises.
- Sharepoint with NTLM authentication
- "other"
- Other site/service or software
#### --webdav-user
User name. In case NTLM authentication is used, the username should be in the format 'Domain\User'.
User name
- Config: user
- Env Var: RCLONE_WEBDAV_USER
- Type: string
- Default: ""
In case vendor mode `sharepoint-ntlm` is used, the user name is in the form `DOMAIN\user`
#### --webdav-pass
Password.
@@ -185,19 +187,6 @@ Command to run to get a bearer token
- Type: string
- Default: ""
#### --webdav-encoding
This sets the encoding for the backend.
See: the [encoding section in the overview](/overview/#encoding) for more info.
Default encoding is Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8 for sharepoint-ntlm or identity otherwise.
- Config: encoding
- Env Var: RCLONE_WEBDAV_ENCODING
- Type: string
- Default: ""
{{< rem autogenerated options stop >}}
## Provider notes ##

View File

@@ -128,26 +128,6 @@ from filenames during upload.
Here are the standard options specific to zoho (Zoho).
#### --zoho-client-id
OAuth Client Id
Leave blank normally.
- Config: client_id
- Env Var: RCLONE_ZOHO_CLIENT_ID
- Type: string
- Default: ""
#### --zoho-client-secret
OAuth Client Secret
Leave blank normally.
- Config: client_secret
- Env Var: RCLONE_ZOHO_CLIENT_SECRET
- Type: string
- Default: ""
#### --zoho-region
Zoho region to connect to. You'll have to use the region you organization is registered in.
@@ -170,35 +150,6 @@ Zoho region to connect to. You'll have to use the region you organization is reg
Here are the advanced options specific to zoho (Zoho).
#### --zoho-token
OAuth Access Token as a JSON blob.
- Config: token
- Env Var: RCLONE_ZOHO_TOKEN
- Type: string
- Default: ""
#### --zoho-auth-url
Auth server URL.
Leave blank to use the provider defaults.
- Config: auth_url
- Env Var: RCLONE_ZOHO_AUTH_URL
- Type: string
- Default: ""
#### --zoho-token-url
Token server url.
Leave blank to use the provider defaults.
- Config: token_url
- Env Var: RCLONE_ZOHO_TOKEN_URL
- Type: string
- Default: ""
#### --zoho-encoding
This sets the encoding for the backend.

View File

@@ -1 +1 @@
v1.56.0
v1.55.0

View File

@@ -214,10 +214,7 @@ func (acc *Account) averageLoop() {
acc.values.mu.Lock()
// Add average of last second.
elapsed := now.Sub(acc.values.lpTime).Seconds()
avg := 0.0
if elapsed > 0 {
avg = float64(acc.values.lpBytes) / elapsed
}
avg := float64(acc.values.lpBytes) / elapsed
// Soft start the moving average
if period < averagePeriod {
period++
@@ -445,11 +442,7 @@ func (acc *Account) speed() (bps, current float64) {
}
// Calculate speed from first read.
total := float64(time.Now().Sub(acc.values.start)) / float64(time.Second)
if total > 0 {
bps = float64(acc.values.bytes) / total
} else {
bps = 0.0
}
current = acc.values.avg
return
}
@@ -527,11 +520,14 @@ func (acc *Account) rcStats() (out rc.Params) {
out["speed"] = spd
out["speedAvg"] = cur
eta, etaOK := acc.eta()
if etaOK {
eta, etaok := acc.eta()
out["eta"] = nil
if etaok {
if eta > 0 {
out["eta"] = eta.Seconds()
} else {
out["eta"] = nil
out["eta"] = 0
}
}
out["name"] = acc.name

View File

@@ -90,7 +90,7 @@ func (c *RcloneCollector) Collect(ch chan<- prometheus.Metric) {
s.mu.RLock()
ch <- prometheus.MustNewConstMetric(c.bytesTransferred, prometheus.CounterValue, float64(s.bytes))
ch <- prometheus.MustNewConstMetric(c.transferSpeed, prometheus.GaugeValue, s.speed())
ch <- prometheus.MustNewConstMetric(c.transferSpeed, prometheus.GaugeValue, s.Speed())
ch <- prometheus.MustNewConstMetric(c.numOfErrors, prometheus.CounterValue, float64(s.errors))
ch <- prometheus.MustNewConstMetric(c.numOfCheckFiles, prometheus.CounterValue, float64(s.checks))
ch <- prometheus.MustNewConstMetric(c.transferredFiles, prometheus.CounterValue, float64(s.transfers))

View File

@@ -65,19 +65,9 @@ func NewStats(ctx context.Context) *StatsInfo {
// RemoteStats returns stats for rc
func (s *StatsInfo) RemoteStats() (out rc.Params, err error) {
// NB if adding values here - make sure you update the docs in
// stats_groups.go
out = make(rc.Params)
ts := s.calculateTransferStats()
out["totalChecks"] = ts.totalChecks
out["totalTransfers"] = ts.totalTransfers
out["totalBytes"] = ts.totalBytes
out["transferTime"] = ts.transferTime
out["speed"] = ts.speed
s.mu.RLock()
out["speed"] = s.Speed()
out["bytes"] = s.bytes
out["errors"] = s.errors
out["fatalError"] = s.fatalError
@@ -87,15 +77,9 @@ func (s *StatsInfo) RemoteStats() (out rc.Params, err error) {
out["deletes"] = s.deletes
out["deletedDirs"] = s.deletedDirs
out["renames"] = s.renames
out["transferTime"] = s.totalDuration().Seconds()
out["elapsedTime"] = time.Since(startTime).Seconds()
eta, etaOK := eta(s.bytes, ts.totalBytes, ts.speed)
if etaOK {
out["eta"] = eta.Seconds()
} else {
out["eta"] = nil
}
s.mu.RUnlock()
if !s.checking.empty() {
out["checking"] = s.checking.remotes()
}
@@ -105,14 +89,11 @@ func (s *StatsInfo) RemoteStats() (out rc.Params, err error) {
if s.errors > 0 {
out["lastError"] = s.lastError.Error()
}
return out, nil
}
// Speed returns the average speed of the transfer in bytes/second
//
// Call with lock held
func (s *StatsInfo) speed() float64 {
func (s *StatsInfo) Speed() float64 {
dt := s.totalDuration()
dtSeconds := dt.Seconds()
speed := 0.0
@@ -221,9 +202,6 @@ func eta(size, total int64, rate float64) (eta time.Duration, ok bool) {
return 0, false
}
seconds := float64(remaining) / rate
if seconds < 0 {
seconds = 0
}
return time.Second * time.Duration(seconds), true
}
@@ -249,60 +227,36 @@ func percent(a int64, b int64) string {
return fmt.Sprintf("%d%%", int(float64(a)*100/float64(b)+0.5))
}
// returned from calculateTransferStats
type transferStats struct {
totalChecks int64
totalTransfers int64
totalBytes int64
transferTime float64
speed float64
}
// calculateTransferStats calculates some addtional transfer stats not
// stored directly in StatsInfo
func (s *StatsInfo) calculateTransferStats() (ts transferStats) {
// String convert the StatsInfo to a string for printing
func (s *StatsInfo) String() string {
// checking and transferring have their own locking so read
// here before lock to prevent deadlock on GetBytes
transferring, checking := s.transferring.count(), s.checking.count()
transferringBytesDone, transferringBytesTotal := s.transferring.progress(s)
s.mu.RLock()
defer s.mu.RUnlock()
ts.totalChecks = int64(s.checkQueue) + s.checks + int64(checking)
ts.totalTransfers = int64(s.transferQueue) + s.transfers + int64(transferring)
// note that s.bytes already includes transferringBytesDone so
// we take it off here to avoid double counting
ts.totalBytes = s.transferQueueSize + s.bytes + transferringBytesTotal - transferringBytesDone
dt := s.totalDuration()
ts.transferTime = dt.Seconds()
ts.speed = 0.0
if dt > 0 {
ts.speed = float64(s.bytes) / ts.transferTime
}
return ts
}
// String convert the StatsInfo to a string for printing
func (s *StatsInfo) String() string {
// NB if adding more stats in here, remember to add them into
// RemoteStats() too.
ts := s.calculateTransferStats()
s.mu.RLock()
elapsedTime := time.Since(startTime)
elapsedTimeSecondsOnly := elapsedTime.Truncate(time.Second/10) % time.Minute
dt := s.totalDuration()
dtSeconds := dt.Seconds()
speed := 0.0
if dt > 0 {
speed = float64(s.bytes) / dtSeconds
}
displaySpeed := ts.speed
displaySpeed := speed
if s.ci.DataRateUnit == "bits" {
displaySpeed *= 8
}
var (
totalChecks = int64(s.checkQueue) + s.checks + int64(checking)
totalTransfer = int64(s.transferQueue) + s.transfers + int64(transferring)
// note that s.bytes already includes transferringBytesDone so
// we take it off here to avoid double counting
totalSize = s.transferQueueSize + s.bytes + transferringBytesTotal - transferringBytesDone
currentSize = s.bytes
buf = &bytes.Buffer{}
xfrchkString = ""
dateString = ""
@@ -312,11 +266,11 @@ func (s *StatsInfo) String() string {
_, _ = fmt.Fprintf(buf, "\nTransferred: ")
} else {
xfrchk := []string{}
if ts.totalTransfers > 0 && s.transferQueue > 0 {
xfrchk = append(xfrchk, fmt.Sprintf("xfr#%d/%d", s.transfers, ts.totalTransfers))
if totalTransfer > 0 && s.transferQueue > 0 {
xfrchk = append(xfrchk, fmt.Sprintf("xfr#%d/%d", s.transfers, totalTransfer))
}
if ts.totalChecks > 0 && s.checkQueue > 0 {
xfrchk = append(xfrchk, fmt.Sprintf("chk#%d/%d", s.checks, ts.totalChecks))
if totalChecks > 0 && s.checkQueue > 0 {
xfrchk = append(xfrchk, fmt.Sprintf("chk#%d/%d", s.checks, totalChecks))
}
if len(xfrchk) > 0 {
xfrchkString = fmt.Sprintf(" (%s)", strings.Join(xfrchk, ", "))
@@ -330,16 +284,16 @@ func (s *StatsInfo) String() string {
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s",
dateString,
fs.SizeSuffix(s.bytes),
fs.SizeSuffix(ts.totalBytes).Unit("Bytes"),
percent(s.bytes, ts.totalBytes),
fs.SizeSuffix(totalSize).Unit("Bytes"),
percent(s.bytes, totalSize),
fs.SizeSuffix(displaySpeed).Unit(strings.Title(s.ci.DataRateUnit)+"/s"),
etaString(s.bytes, ts.totalBytes, ts.speed),
etaString(currentSize, totalSize, speed),
xfrchkString,
)
if s.ci.ProgressTerminalTitle {
// Writes ETA to the terminal title
terminal.WriteTerminalTitle("ETA: " + etaString(s.bytes, ts.totalBytes, ts.speed))
terminal.WriteTerminalTitle("ETA: " + etaString(currentSize, totalSize, speed))
}
if !s.ci.StatsOneLine {
@@ -360,9 +314,9 @@ func (s *StatsInfo) String() string {
_, _ = fmt.Fprintf(buf, "Errors: %10d%s\n",
s.errors, errorDetails)
}
if s.checks != 0 || ts.totalChecks != 0 {
if s.checks != 0 || totalChecks != 0 {
_, _ = fmt.Fprintf(buf, "Checks: %10d / %d, %s\n",
s.checks, ts.totalChecks, percent(s.checks, ts.totalChecks))
s.checks, totalChecks, percent(s.checks, totalChecks))
}
if s.deletes != 0 || s.deletedDirs != 0 {
_, _ = fmt.Fprintf(buf, "Deleted: %10d (files), %d (dirs)\n", s.deletes, s.deletedDirs)
@@ -370,9 +324,9 @@ func (s *StatsInfo) String() string {
if s.renames != 0 {
_, _ = fmt.Fprintf(buf, "Renamed: %10d\n", s.renames)
}
if s.transfers != 0 || ts.totalTransfers != 0 {
if s.transfers != 0 || totalTransfer != 0 {
_, _ = fmt.Fprintf(buf, "Transferred: %10d / %d, %s\n",
s.transfers, ts.totalTransfers, percent(s.transfers, ts.totalTransfers))
s.transfers, totalTransfer, percent(s.transfers, totalTransfer))
}
_, _ = fmt.Fprintf(buf, "Elapsed time: %10ss\n", strings.TrimRight(elapsedTime.Truncate(time.Minute).String(), "0s")+fmt.Sprintf("%.1f", elapsedTimeSecondsOnly.Seconds()))
}

View File

@@ -86,22 +86,18 @@ Returns the following values:
` + "```" + `
{
"bytes": total transferred bytes since the start of the group,
"checks": number of files checked,
"deletes" : number of files deleted,
"elapsedTime": time in floating point seconds since rclone was started,
"speed": average speed in bytes/sec since start of the process,
"bytes": total transferred bytes since the start of the process,
"errors": number of errors,
"eta": estimated time in seconds until the group completes,
"fatalError": boolean whether there has been at least one fatal error,
"lastError": last error string,
"renames" : number of files renamed,
"retryError": boolean showing whether there has been at least one non-NoRetryError,
"speed": average speed in bytes/sec since start of the group,
"totalBytes": total number of bytes in the group,
"totalChecks": total number of checks in the group,
"totalTransfers": total number of transfers in the group,
"transferTime" : total time spent on running jobs,
"fatalError": whether there has been at least one FatalError,
"retryError": whether there has been at least one non-NoRetryError,
"checks": number of checked files,
"transfers": number of transferred files,
"deletes" : number of deleted files,
"renames" : number of renamed files,
"transferTime" : total time spent on running jobs,
"elapsedTime": time in seconds since the start of the process,
"lastError": last occurred error,
"transferring": an array of currently active file transfers:
[
{

59
fs/cache/cache.go vendored
View File

@@ -7,31 +7,18 @@ import (
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/lib/cache"
)
var (
once sync.Once // creation
c *cache.Cache
c = cache.New()
mu sync.Mutex // mutex to protect remap
remap = map[string]string{} // map user supplied names to canonical names
)
// Create the cache just once
func createOnFirstUse() {
once.Do(func() {
ci := fs.GetConfig(context.Background())
c = cache.New()
c.SetExpireDuration(ci.FsCacheExpireDuration)
c.SetExpireInterval(ci.FsCacheExpireInterval)
})
}
// Canonicalize looks up fsString in the mapping from user supplied
// names to canonical names and return the canonical form
func Canonicalize(fsString string) string {
createOnFirstUse()
mu.Lock()
canonicalName, ok := remap[fsString]
mu.Unlock()
@@ -55,11 +42,10 @@ func addMapping(fsString, canonicalName string) {
// GetFn gets an fs.Fs named fsString either from the cache or creates
// it afresh with the create function
func GetFn(ctx context.Context, fsString string, create func(ctx context.Context, fsString string) (fs.Fs, error)) (f fs.Fs, err error) {
createOnFirstUse()
canonicalFsString := Canonicalize(fsString)
fsString = Canonicalize(fsString)
created := false
value, err := c.Get(canonicalFsString, func(canonicalFsString string) (f interface{}, ok bool, err error) {
f, err = create(ctx, fsString) // always create the backend with the original non-canonicalised string
value, err := c.Get(fsString, func(fsString string) (f interface{}, ok bool, err error) {
f, err = create(ctx, fsString)
ok = err == nil || err == fs.ErrorIsFile
created = ok
return f, ok, err
@@ -71,19 +57,19 @@ func GetFn(ctx context.Context, fsString string, create func(ctx context.Context
// Check we stored the Fs at the canonical name
if created {
canonicalName := fs.ConfigString(f)
if canonicalName != canonicalFsString {
if canonicalName != fsString {
// Note that if err == fs.ErrorIsFile at this moment
// then we can't rename the remote as it will have the
// wrong error status, we need to add a new one.
if err == nil {
fs.Debugf(nil, "fs cache: renaming cache item %q to be canonical %q", canonicalFsString, canonicalName)
value, found := c.Rename(canonicalFsString, canonicalName)
fs.Debugf(nil, "fs cache: renaming cache item %q to be canonical %q", fsString, canonicalName)
value, found := c.Rename(fsString, canonicalName)
if found {
f = value.(fs.Fs)
}
addMapping(canonicalFsString, canonicalName)
addMapping(fsString, canonicalName)
} else {
fs.Debugf(nil, "fs cache: adding new entry for parent of %q, %q", canonicalFsString, canonicalName)
fs.Debugf(nil, "fs cache: adding new entry for parent of %q, %q", fsString, canonicalName)
Put(canonicalName, f)
}
}
@@ -93,7 +79,6 @@ func GetFn(ctx context.Context, fsString string, create func(ctx context.Context
// Pin f into the cache until Unpin is called
func Pin(f fs.Fs) {
createOnFirstUse()
c.Pin(fs.ConfigString(f))
}
@@ -111,20 +96,12 @@ func PinUntilFinalized(f fs.Fs, x interface{}) {
// Unpin f from the cache
func Unpin(f fs.Fs) {
createOnFirstUse()
c.Pin(fs.ConfigString(f))
}
// Get gets an fs.Fs named fsString either from the cache or creates it afresh
func Get(ctx context.Context, fsString string) (f fs.Fs, err error) {
// If we are making a long lived backend which lives longer
// than this request, we want to disconnect it from the
// current context and in particular any WithCancel contexts,
// but we want to preserve the config embedded in the context.
newCtx := context.Background()
newCtx = fs.CopyConfig(newCtx, ctx)
newCtx = filter.CopyConfig(newCtx, ctx)
return GetFn(newCtx, fsString, fs.NewFs)
return GetFn(ctx, fsString, fs.NewFs)
}
// GetArr gets []fs.Fs from []fsStrings either from the cache or creates it afresh
@@ -142,28 +119,12 @@ func GetArr(ctx context.Context, fsStrings []string) (f []fs.Fs, err error) {
// Put puts an fs.Fs named fsString into the cache
func Put(fsString string, f fs.Fs) {
createOnFirstUse()
canonicalName := fs.ConfigString(f)
c.Put(canonicalName, f)
addMapping(fsString, canonicalName)
}
// ClearConfig deletes all entries which were based on the config name passed in
//
// Returns number of entries deleted
func ClearConfig(name string) (deleted int) {
createOnFirstUse()
return c.DeletePrefix(name + ":")
}
// Clear removes everything from the cache
func Clear() {
createOnFirstUse()
c.Clear()
}
// Entries returns the number of entries in the cache
func Entries() int {
createOnFirstUse()
return c.Entries()
}

View File

@@ -33,7 +33,7 @@ func mockNewFs(t *testing.T) (func(), func(ctx context.Context, path string) (fs
panic("unreachable")
}
cleanup := func() {
Clear()
c.Clear()
}
return cleanup, create
}
@@ -42,12 +42,12 @@ func TestGet(t *testing.T) {
cleanup, create := mockNewFs(t)
defer cleanup()
assert.Equal(t, 0, Entries())
assert.Equal(t, 0, c.Entries())
f, err := GetFn(context.Background(), "mock:/", create)
require.NoError(t, err)
assert.Equal(t, 1, Entries())
assert.Equal(t, 1, c.Entries())
f2, err := GetFn(context.Background(), "mock:/", create)
require.NoError(t, err)
@@ -59,13 +59,13 @@ func TestGetFile(t *testing.T) {
cleanup, create := mockNewFs(t)
defer cleanup()
assert.Equal(t, 0, Entries())
assert.Equal(t, 0, c.Entries())
f, err := GetFn(context.Background(), "mock:/file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
require.NotNil(t, f)
assert.Equal(t, 2, Entries())
assert.Equal(t, 2, c.Entries())
f2, err := GetFn(context.Background(), "mock:/file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
@@ -85,13 +85,13 @@ func TestGetFile2(t *testing.T) {
cleanup, create := mockNewFs(t)
defer cleanup()
assert.Equal(t, 0, Entries())
assert.Equal(t, 0, c.Entries())
f, err := GetFn(context.Background(), "mock:file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
require.NotNil(t, f)
assert.Equal(t, 2, Entries())
assert.Equal(t, 2, c.Entries())
f2, err := GetFn(context.Background(), "mock:file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
@@ -111,13 +111,13 @@ func TestGetError(t *testing.T) {
cleanup, create := mockNewFs(t)
defer cleanup()
assert.Equal(t, 0, Entries())
assert.Equal(t, 0, c.Entries())
f, err := GetFn(context.Background(), "mock:/error", create)
require.Equal(t, errSentinel, err)
require.Equal(t, nil, f)
assert.Equal(t, 0, Entries())
assert.Equal(t, 0, c.Entries())
}
func TestPut(t *testing.T) {
@@ -126,17 +126,17 @@ func TestPut(t *testing.T) {
f := mockfs.NewFs(context.Background(), "mock", "/alien")
assert.Equal(t, 0, Entries())
assert.Equal(t, 0, c.Entries())
Put("mock:/alien", f)
assert.Equal(t, 1, Entries())
assert.Equal(t, 1, c.Entries())
fNew, err := GetFn(context.Background(), "mock:/alien", create)
require.NoError(t, err)
require.Equal(t, f, fNew)
assert.Equal(t, 1, Entries())
assert.Equal(t, 1, c.Entries())
// Check canonicalisation
@@ -146,7 +146,7 @@ func TestPut(t *testing.T) {
require.NoError(t, err)
require.Equal(t, f, fNew)
assert.Equal(t, 1, Entries())
assert.Equal(t, 1, c.Entries())
}
@@ -166,22 +166,6 @@ func TestPin(t *testing.T) {
Unpin(f2)
}
func TestClearConfig(t *testing.T) {
cleanup, create := mockNewFs(t)
defer cleanup()
assert.Equal(t, 0, Entries())
_, err := GetFn(context.Background(), "mock:/file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
assert.Equal(t, 2, Entries()) // file + parent
assert.Equal(t, 2, ClearConfig("mock"))
assert.Equal(t, 0, Entries())
}
func TestClear(t *testing.T) {
cleanup, create := mockNewFs(t)
defer cleanup()
@@ -190,22 +174,9 @@ func TestClear(t *testing.T) {
_, err := GetFn(context.Background(), "mock:/", create)
require.NoError(t, err)
assert.Equal(t, 1, Entries())
assert.Equal(t, 1, c.Entries())
Clear()
assert.Equal(t, 0, Entries())
}
func TestEntries(t *testing.T) {
cleanup, create := mockNewFs(t)
defer cleanup()
assert.Equal(t, 0, Entries())
// Create something
_, err := GetFn(context.Background(), "mock:/", create)
require.NoError(t, err)
assert.Equal(t, 1, Entries())
assert.Equal(t, 0, c.Entries())
}

Some files were not shown because too many files have changed in this diff Show More