mirror of
https://github.com/rclone/rclone.git
synced 2025-12-24 12:13:19 +00:00
Compare commits
43 Commits
v1.53.0
...
pr-4625-sf
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f11255a801 | ||
|
|
433b73a5a8 | ||
|
|
ab88a3341f | ||
|
|
181da3ce9b | ||
|
|
b14a58c9b8 | ||
|
|
60cc2cba1f | ||
|
|
c797494d88 | ||
|
|
e2a57182be | ||
|
|
8928441466 | ||
|
|
0e8965060f | ||
|
|
f3cf6fcdd7 | ||
|
|
18ccf0f871 | ||
|
|
313647bcf3 | ||
|
|
61fe068c90 | ||
|
|
5c49096e11 | ||
|
|
a73c78545d | ||
|
|
e0fd560711 | ||
|
|
6a56ac1032 | ||
|
|
96299629b4 | ||
|
|
75de30cfa8 | ||
|
|
233bed6a73 | ||
|
|
b3964efe4d | ||
|
|
575f061629 | ||
|
|
640d7d3b4e | ||
|
|
e92294b482 | ||
|
|
22937e8982 | ||
|
|
c3d1474eb9 | ||
|
|
e2426ea87b | ||
|
|
e58a61175f | ||
|
|
05bea46c3e | ||
|
|
c8a719ae0d | ||
|
|
c3884aafd9 | ||
|
|
0a9785a4ff | ||
|
|
8140f67092 | ||
|
|
4a001b8a02 | ||
|
|
525433e6dd | ||
|
|
f71f6c57d7 | ||
|
|
e35623c72e | ||
|
|
344bce7e2a | ||
|
|
3a4322a7ba | ||
|
|
27b9ae4fc3 | ||
|
|
7e2488af10 | ||
|
|
41ecb586c4 |
18
Makefile
18
Makefile
@@ -8,7 +8,8 @@ VERSION := $(shell cat VERSION)
|
||||
# Last tag on this branch
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
# Next version
|
||||
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||
NEXT_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2+1,0}')
|
||||
NEXT_PATCH_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2,$$3+1}')
|
||||
# If we are working on a release, override branch to master
|
||||
ifdef RELEASE_TAG
|
||||
BRANCH := master
|
||||
@@ -164,6 +165,11 @@ validate_website: website
|
||||
tarball:
|
||||
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
|
||||
|
||||
vendorball:
|
||||
go mod vendor
|
||||
tar -zcf build/rclone-$(TAG)-vendor.tar.gz vendor
|
||||
rm -rf vendor
|
||||
|
||||
sign_upload:
|
||||
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
|
||||
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
|
||||
@@ -239,7 +245,15 @@ startdev:
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_VERSION)" > VERSION
|
||||
git commit -m "Start $(VERSION)-DEV development" fs/version.go
|
||||
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||
|
||||
startstable:
|
||||
@echo "Version is $(VERSION)"
|
||||
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_PATCH_VERSION)" > VERSION
|
||||
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
@@ -64,6 +64,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
|
||||
58
RELEASE.md
58
RELEASE.md
@@ -9,7 +9,7 @@ This file describes how to make the various kinds of releases
|
||||
|
||||
## Making a release
|
||||
|
||||
* git checkout master
|
||||
* git checkout master # see below for stable branch
|
||||
* git pull
|
||||
* git status - make sure everything is checked in
|
||||
* Check GitHub actions build for master is Green
|
||||
@@ -21,16 +21,17 @@ This file describes how to make the various kinds of releases
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX.0"
|
||||
* make retag
|
||||
* git push --tags origin master
|
||||
* git push --follow-tags origin
|
||||
* # Wait for the GitHub builds to complete then...
|
||||
* make fetch_binaries
|
||||
* make tarball
|
||||
* make vendorball
|
||||
* make sign_upload
|
||||
* make check_sign
|
||||
* make upload
|
||||
* make upload_website
|
||||
* make upload_github
|
||||
* make startdev
|
||||
* make startdev # make startstable for stable branch
|
||||
* # announce with forum post, twitter post, patreon post
|
||||
|
||||
Early in the next release cycle update the dependencies
|
||||
@@ -41,62 +42,35 @@ Early in the next release cycle update the dependencies
|
||||
* git add new files
|
||||
* git commit -a -v
|
||||
|
||||
If `make update` fails with errors like this:
|
||||
|
||||
```
|
||||
# github.com/cpuguy83/go-md2man/md2man
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
|
||||
```
|
||||
|
||||
Can be fixed with
|
||||
|
||||
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
|
||||
* GO111MODULE=on go mod tidy
|
||||
|
||||
|
||||
## Making a point release
|
||||
|
||||
If rclone needs a point release due to some horrendous bug:
|
||||
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
Set vars
|
||||
|
||||
* BASE_TAG=v1.XX # eg v1.52
|
||||
* NEW_TAG=${BASE_TAG}.Y # eg v1.52.1
|
||||
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
||||
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
|
||||
* git branch ${BASE_TAG} ${BASE_TAG}-stable
|
||||
* git co ${BASE_TAG}-stable
|
||||
* make startstable
|
||||
|
||||
Now
|
||||
|
||||
* FIXME this is now broken with new semver layout - needs fixing
|
||||
* FIXME the TAG=${NEW_TAG} shouldn't be necessary any more
|
||||
* git co ${BASE_TAG}-stable
|
||||
* git cherry-pick any fixes
|
||||
* Test (see above)
|
||||
* make NEXT_VERSION=${NEW_TAG} tag
|
||||
* edit docs/content/changelog.md
|
||||
* make TAG=${NEW_TAG} doc
|
||||
* git commit -a -v -m "Version ${NEW_TAG}"
|
||||
* git tag -d ${NEW_TAG}
|
||||
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
|
||||
* git push --tags -u origin ${BASE_TAG}-stable
|
||||
* Wait for builds to complete
|
||||
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
|
||||
* make TAG=${NEW_TAG} tarball
|
||||
* make TAG=${NEW_TAG} sign_upload
|
||||
* make TAG=${NEW_TAG} check_sign
|
||||
* make TAG=${NEW_TAG} upload
|
||||
* make TAG=${NEW_TAG} upload_website
|
||||
* make TAG=${NEW_TAG} upload_github
|
||||
* Do the steps as above
|
||||
* make startstable
|
||||
* NB this overwrites the current beta so we need to do this
|
||||
* git co master
|
||||
* make VERSION=${NEW_TAG} startdev
|
||||
* # cherry pick the changes to the changelog and VERSION
|
||||
* git checkout ${BASE_TAG}-stable VERSION docs/content/changelog.md
|
||||
* git commit --amend
|
||||
* # cherry pick the changes to the changelog
|
||||
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||
* git push
|
||||
* Announce!
|
||||
|
||||
## Making a manual build of docker
|
||||
|
||||
|
||||
@@ -958,6 +958,8 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
|
||||
}
|
||||
info := f.wrapInfo(src, chunkRemote, size)
|
||||
|
||||
// Refill chunkLimit and let basePut repeatedly call chunkingReader.Read()
|
||||
c.chunkLimit = c.chunkSize
|
||||
// TODO: handle range/limit options
|
||||
chunk, errChunk := basePut(ctx, wrapIn, info, options...)
|
||||
if errChunk != nil {
|
||||
@@ -1166,10 +1168,14 @@ func (c *chunkingReader) updateHashes() {
|
||||
func (c *chunkingReader) Read(buf []byte) (bytesRead int, err error) {
|
||||
if c.chunkLimit <= 0 {
|
||||
// Chunk complete - switch to next one.
|
||||
// Note #1:
|
||||
// We might not get here because some remotes (eg. box multi-uploader)
|
||||
// read the specified size exactly and skip the concluding EOF Read.
|
||||
// Then a check in the put loop will kick in.
|
||||
c.chunkLimit = c.chunkSize
|
||||
// Note #2:
|
||||
// The crypt backend after receiving EOF here will call Read again
|
||||
// and we must insist on returning EOF, so we postpone refilling
|
||||
// chunkLimit to the main loop.
|
||||
return 0, io.EOF
|
||||
}
|
||||
if int64(len(buf)) > c.chunkLimit {
|
||||
|
||||
@@ -157,6 +157,17 @@ func driveScopesContainsAppFolder(scopes []string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func driveOAuthOptions() []fs.Option {
|
||||
opts := []fs.Option{}
|
||||
for _, opt := range oauthutil.SharedOptions {
|
||||
if opt.Name == config.ConfigClientID {
|
||||
opt.Help = "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance."
|
||||
}
|
||||
opts = append(opts, opt)
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -192,7 +203,7 @@ func init() {
|
||||
log.Fatalf("Failed to configure team drive: %v", err)
|
||||
}
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Options: append(driveOAuthOptions(), []fs.Option{{
|
||||
Name: "scope",
|
||||
Help: "Scope that rclone should use when requesting access from drive.",
|
||||
Examples: []fs.OptionExample{{
|
||||
|
||||
@@ -142,6 +142,31 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||
Help: "Impersonate this user when using a business account.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "shared_files",
|
||||
Help: `Instructs rclone to work on individual shared files.
|
||||
|
||||
In this mode rclone's features are extremely limited - only list (ls, lsl, etc.)
|
||||
operations and read operations (e.g. downloading) are supported in this mode.
|
||||
All other operations will be disabled.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "shared_folders",
|
||||
Help: `Instructs rclone to work on shared folders.
|
||||
|
||||
When this flag is used with no path only the List operation is supported and
|
||||
all available shared folders will be listed. If you specify a path the first part
|
||||
will be interpreted as the name of shared folder. Rclone will then try to mount this
|
||||
shared to the root namespace. On success shared folder rclone proceeds normally.
|
||||
The shared folder is now pretty much a normal folder and all normal operations
|
||||
are supported.
|
||||
|
||||
Note that we don't unmount the shared folder afterwards so the
|
||||
--dropbox-shared-folders can be omitted after the first use of a particular
|
||||
shared folder.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -161,9 +186,11 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
SharedFiles bool `config:"shared_files"`
|
||||
SharedFolders bool `config:"shared_folders"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
@@ -186,7 +213,9 @@ type Fs struct {
|
||||
//
|
||||
// Dropbox Objects always have full metadata
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
fs *Fs // what this object is part of
|
||||
id string
|
||||
url string
|
||||
remote string // The remote path
|
||||
bytes int64 // size of the object
|
||||
modTime time.Time // time it was last modified
|
||||
@@ -332,8 +361,60 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
f.setRoot(root)
|
||||
})
|
||||
|
||||
// do not fill features yet
|
||||
if f.opt.SharedFiles {
|
||||
f.setRoot(root)
|
||||
if f.root == "" {
|
||||
return f, nil
|
||||
}
|
||||
_, err := f.findSharedFile(f.root)
|
||||
f.root = ""
|
||||
if err == nil {
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
if f.opt.SharedFolders {
|
||||
f.setRoot(root)
|
||||
if f.root == "" {
|
||||
return f, nil // our root it empty so we probably want to list shared folders
|
||||
}
|
||||
|
||||
dir := path.Dir(f.root)
|
||||
if dir == "." {
|
||||
dir = f.root
|
||||
}
|
||||
|
||||
// root is not empty so we have find the right shared folder if it exists
|
||||
id, err := f.findSharedFolder(dir)
|
||||
if err != nil {
|
||||
// if we didn't find the specified shared folder we have to bail out here
|
||||
return nil, err
|
||||
}
|
||||
// we found the specified shared folder so let's mount it
|
||||
// this will add it to the users normal root namespace and allows us
|
||||
// to actually perform operations on it using the normal api endpoints.
|
||||
err = f.mountSharedFolder(id)
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
case sharing.MountFolderAPIError:
|
||||
if e.EndpointError == nil || (e.EndpointError != nil && e.EndpointError.Tag != sharing.MountFolderErrorAlreadyMounted) {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
// if the moint failed we have to abort here
|
||||
}
|
||||
// if the mount succeeded it's now a normal folder in the users root namespace
|
||||
// we disable shared folder mode and proceed normally
|
||||
f.opt.SharedFolders = false
|
||||
}
|
||||
|
||||
f.features.Fill(f)
|
||||
|
||||
// If root starts with / then use the actual root
|
||||
if strings.HasPrefix(root, "/") {
|
||||
@@ -355,6 +436,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
fs.Debugf(f, "Using root namespace %q", f.ns)
|
||||
}
|
||||
f.setRoot(root)
|
||||
|
||||
// See if the root is actually an object
|
||||
_, err = f.getFileMetadata(f.slashRoot)
|
||||
@@ -465,9 +547,150 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
if f.opt.SharedFiles {
|
||||
return f.findSharedFile(remote)
|
||||
}
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// listSharedFoldersApi lists all available shared folders mounted and not mounted
|
||||
// we'll need the id later so we have to return them in original format
|
||||
func (f *Fs) listSharedFolders() (entries fs.DirEntries, err error) {
|
||||
started := false
|
||||
var res *sharing.ListFoldersResult
|
||||
for {
|
||||
if !started {
|
||||
arg := sharing.ListFoldersArgs{
|
||||
Limit: 100,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.sharing.ListFolders(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
started = true
|
||||
} else {
|
||||
arg := sharing.ListFoldersContinueArg{
|
||||
Cursor: res.Cursor,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.sharing.ListFoldersContinue(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list continue")
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
leaf := f.opt.Enc.ToStandardName(entry.Name)
|
||||
d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
|
||||
entries = append(entries, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if res.Cursor == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// findSharedFolder find the id for a given shared folder name
|
||||
// somewhat annoyingly there is no endpoint to query a shared folder by it's name
|
||||
// so our only option is to iterate over all shared folders
|
||||
func (f *Fs) findSharedFolder(name string) (id string, err error) {
|
||||
entries, err := f.listSharedFolders()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if entry.(*fs.Dir).Remote() == name {
|
||||
return entry.(*fs.Dir).ID(), nil
|
||||
}
|
||||
}
|
||||
return "", fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
// mountSharedFolders mount a shared folder to the root namespace
|
||||
func (f *Fs) mountSharedFolder(id string) error {
|
||||
arg := sharing.MountFolderArg{
|
||||
SharedFolderId: id,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.sharing.MountFolder(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// listSharedFolders lists shared the user as access to (note this means individual
|
||||
// files not files contained in shared folders)
|
||||
func (f *Fs) listReceivedFiles() (entries fs.DirEntries, err error) {
|
||||
started := false
|
||||
var res *sharing.ListFilesResult
|
||||
for {
|
||||
if !started {
|
||||
arg := sharing.ListFilesArg{
|
||||
Limit: 100,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.sharing.ListReceivedFiles(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
started = true
|
||||
} else {
|
||||
arg := sharing.ListFilesContinueArg{
|
||||
Cursor: res.Cursor,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.sharing.ListReceivedFilesContinue(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list continue")
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
fmt.Printf("%+v\n", entry)
|
||||
entryPath := entry.Name
|
||||
o := &Object{
|
||||
fs: f,
|
||||
url: entry.PreviewUrl,
|
||||
remote: entryPath,
|
||||
modTime: entry.TimeInvited,
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
if res.Cursor == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) findSharedFile(name string) (o *Object, err error) {
|
||||
files, err := f.listReceivedFiles()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, entry := range files {
|
||||
if entry.(*Object).remote == name {
|
||||
return entry.(*Object), nil
|
||||
}
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
@@ -478,6 +701,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if f.opt.SharedFiles {
|
||||
return f.listReceivedFiles()
|
||||
}
|
||||
if f.opt.SharedFolders {
|
||||
return f.listSharedFolders()
|
||||
}
|
||||
|
||||
root := f.slashRoot
|
||||
if dir != "" {
|
||||
root += "/" + dir
|
||||
@@ -541,7 +771,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
|
||||
remote := path.Join(dir, leaf)
|
||||
if folderInfo != nil {
|
||||
d := fs.NewDir(remote, time.Now())
|
||||
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
|
||||
entries = append(entries, d)
|
||||
} else if fileInfo != nil {
|
||||
o, err := f.newObjectWithInfo(remote, fileInfo)
|
||||
@@ -564,6 +794,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if f.opt.SharedFiles || f.opt.SharedFolders {
|
||||
return nil, fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
@@ -579,6 +812,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
if f.opt.SharedFiles || f.opt.SharedFolders {
|
||||
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
root := path.Join(f.slashRoot, dir)
|
||||
|
||||
// can't create or run metadata on root
|
||||
@@ -656,6 +892,9 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
if f.opt.SharedFiles || f.opt.SharedFolders {
|
||||
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
@@ -927,8 +1166,16 @@ func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ID returns the object id
|
||||
func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
// Hash returns the dropbox special hash
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
|
||||
return "", fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
if t != DbHashType {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -948,6 +1195,7 @@ func (o *Object) Size() int64 {
|
||||
//
|
||||
// This isn't a complete set of metadata and has an inacurate date
|
||||
func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
|
||||
o.id = info.Id
|
||||
o.bytes = int64(info.Size)
|
||||
o.modTime = info.ClientModified
|
||||
o.hash = info.ContentHash
|
||||
@@ -1016,10 +1264,27 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.fs.opt.SharedFiles {
|
||||
if len(options) != 0 {
|
||||
return nil, errors.New("OpenOptions not supported for shared files")
|
||||
}
|
||||
arg := sharing.GetSharedLinkMetadataArg{
|
||||
Url: o.url,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, in, err = o.fs.sharing.GetSharedLinkFile(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
arg := files.DownloadArg{
|
||||
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
|
||||
Path: o.id,
|
||||
ExtraHeaders: headers,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1153,6 +1418,9 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
|
||||
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
remote := o.remotePath()
|
||||
if ignoredFiles.MatchString(remote) {
|
||||
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
||||
@@ -1181,6 +1449,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
|
||||
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
|
||||
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
|
||||
@@ -1201,4 +1472,5 @@ var (
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -1213,7 +1213,7 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
||||
// Set the file to be a sparse file (important on Windows)
|
||||
err = file.SetSparse(out)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to set sparse: %v", err)
|
||||
fs.Errorf(o, "Failed to set sparse: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -159,71 +159,6 @@ type FolderInfoResponse struct {
|
||||
Email string `json:"email"`
|
||||
}
|
||||
|
||||
// ShardInfoResponse ...
|
||||
type ShardInfoResponse struct {
|
||||
Email string `json:"email"`
|
||||
Body struct {
|
||||
Video []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"video"`
|
||||
ViewDirect []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"view_direct"`
|
||||
WeblinkView []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_view"`
|
||||
WeblinkVideo []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_video"`
|
||||
WeblinkGet []struct {
|
||||
Count int `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_get"`
|
||||
Stock []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"stock"`
|
||||
WeblinkThumbnails []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_thumbnails"`
|
||||
PublicUpload []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"public_upload"`
|
||||
Auth []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"auth"`
|
||||
Web []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"web"`
|
||||
View []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"view"`
|
||||
Upload []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"upload"`
|
||||
Get []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"get"`
|
||||
Thumbnails []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"thumbnails"`
|
||||
} `json:"body"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
// CleanupResponse ...
|
||||
type CleanupResponse struct {
|
||||
Email string `json:"email"`
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -1861,30 +1862,30 @@ func (f *Fs) uploadShard(ctx context.Context) (string, error) {
|
||||
return f.shardURL, nil
|
||||
}
|
||||
|
||||
token, err := f.accessToken()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/api/m1/dispatcher",
|
||||
Parameters: url.Values{
|
||||
"client_id": {api.OAuthClientID},
|
||||
"access_token": {token},
|
||||
},
|
||||
RootURL: api.DispatchServerURL,
|
||||
Method: "GET",
|
||||
Path: "/u",
|
||||
}
|
||||
|
||||
var info api.ShardInfoResponse
|
||||
var (
|
||||
res *http.Response
|
||||
url string
|
||||
err error
|
||||
)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
res, err = f.srv.Call(ctx, &opts)
|
||||
if err == nil {
|
||||
url, err = readBodyWord(res)
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
})
|
||||
if err != nil {
|
||||
closeBody(res)
|
||||
return "", err
|
||||
}
|
||||
|
||||
f.shardURL = info.Body.Upload[0].URL
|
||||
f.shardURL = url
|
||||
f.shardExpiry = time.Now().Add(shardExpirySec * time.Second)
|
||||
fs.Debugf(f, "new upload shard: %s", f.shardURL)
|
||||
|
||||
@@ -2116,7 +2117,18 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, err
|
||||
}
|
||||
|
||||
start, end, partial := getTransferRange(o.size, options...)
|
||||
start, end, partialRequest := getTransferRange(o.size, options...)
|
||||
|
||||
headers := map[string]string{
|
||||
"Accept": "*/*",
|
||||
"Content-Type": "application/octet-stream",
|
||||
}
|
||||
if partialRequest {
|
||||
rangeStr := fmt.Sprintf("bytes=%d-%d", start, end-1)
|
||||
headers["Range"] = rangeStr
|
||||
// headers["Content-Range"] = rangeStr
|
||||
headers["Accept-Ranges"] = "bytes"
|
||||
}
|
||||
|
||||
// TODO: set custom timeouts
|
||||
opts := rest.Opts{
|
||||
@@ -2127,10 +2139,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
"client_id": {api.OAuthClientID},
|
||||
"token": {token},
|
||||
},
|
||||
ExtraHeaders: map[string]string{
|
||||
"Accept": "*/*",
|
||||
"Range": fmt.Sprintf("bytes=%d-%d", start, end-1),
|
||||
},
|
||||
ExtraHeaders: headers,
|
||||
}
|
||||
|
||||
var res *http.Response
|
||||
@@ -2151,18 +2160,36 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var hasher gohash.Hash
|
||||
if !partial {
|
||||
// Server should respond with Status 206 and Content-Range header to a range
|
||||
// request. Status 200 (and no Content-Range) means a full-content response.
|
||||
partialResponse := res.StatusCode == 206
|
||||
|
||||
var (
|
||||
hasher gohash.Hash
|
||||
wrapStream io.ReadCloser
|
||||
)
|
||||
if !partialResponse {
|
||||
// Cannot check hash of partial download
|
||||
hasher = mrhash.New()
|
||||
}
|
||||
wrapStream := &endHandler{
|
||||
wrapStream = &endHandler{
|
||||
ctx: ctx,
|
||||
stream: res.Body,
|
||||
hasher: hasher,
|
||||
o: o,
|
||||
server: server,
|
||||
}
|
||||
if partialRequest && !partialResponse {
|
||||
fs.Debugf(o, "Server returned full content instead of range")
|
||||
if start > 0 {
|
||||
// Discard the beginning of the data
|
||||
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
wrapStream = readers.NewLimitedReadCloser(wrapStream, end-start)
|
||||
}
|
||||
return wrapStream, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -646,7 +646,6 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
400, // Bad request (seen in "Next token is expired")
|
||||
401, // Unauthorized (seen in "Token has expired")
|
||||
408, // Request Timeout
|
||||
423, // Locked - get this on folders sometimes
|
||||
|
||||
106
backend/s3/s3.go
106
backend/s3/s3.go
@@ -58,7 +58,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
|
||||
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
@@ -94,6 +94,9 @@ func init() {
|
||||
}, {
|
||||
Value: "StackPath",
|
||||
Help: "StackPath Object Storage",
|
||||
}, {
|
||||
Value: "TencentCOS",
|
||||
Help: "Tencent Cloud Object Storage (COS)",
|
||||
}, {
|
||||
Value: "Wasabi",
|
||||
Help: "Wasabi Object Storage",
|
||||
@@ -166,7 +169,7 @@ func init() {
|
||||
Help: "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
|
||||
}, {
|
||||
Value: "ap-east-1",
|
||||
Help: "Asia Patific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
|
||||
Help: "Asia Pacific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
|
||||
}, {
|
||||
Value: "sa-east-1",
|
||||
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
|
||||
@@ -185,7 +188,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,Scaleway",
|
||||
Provider: "!AWS,Alibaba,Scaleway,TencentCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
|
||||
@@ -476,10 +479,73 @@ func init() {
|
||||
Value: "s3.eu-central-1.stackpathstorage.com",
|
||||
Help: "EU Endpoint",
|
||||
}},
|
||||
}, {
|
||||
// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Tencent COS API.",
|
||||
Provider: "TencentCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "cos.ap-beijing.myqcloud.com",
|
||||
Help: "Beijing Region.",
|
||||
}, {
|
||||
Value: "cos.ap-nanjing.myqcloud.com",
|
||||
Help: "Nanjing Region.",
|
||||
}, {
|
||||
Value: "cos.ap-shanghai.myqcloud.com",
|
||||
Help: "Shanghai Region.",
|
||||
}, {
|
||||
Value: "cos.ap-guangzhou.myqcloud.com",
|
||||
Help: "Guangzhou Region.",
|
||||
}, {
|
||||
Value: "cos.ap-nanjing.myqcloud.com",
|
||||
Help: "Nanjing Region.",
|
||||
}, {
|
||||
Value: "cos.ap-chengdu.myqcloud.com",
|
||||
Help: "Chengdu Region.",
|
||||
}, {
|
||||
Value: "cos.ap-chongqing.myqcloud.com",
|
||||
Help: "Chongqing Region.",
|
||||
}, {
|
||||
Value: "cos.ap-hongkong.myqcloud.com",
|
||||
Help: "Hong Kong (China) Region.",
|
||||
}, {
|
||||
Value: "cos.ap-singapore.myqcloud.com",
|
||||
Help: "Singapore Region.",
|
||||
}, {
|
||||
Value: "cos.ap-mumbai.myqcloud.com",
|
||||
Help: "Mumbai Region.",
|
||||
}, {
|
||||
Value: "cos.ap-seoul.myqcloud.com",
|
||||
Help: "Seoul Region.",
|
||||
}, {
|
||||
Value: "cos.ap-bangkok.myqcloud.com",
|
||||
Help: "Bangkok Region.",
|
||||
}, {
|
||||
Value: "cos.ap-tokyo.myqcloud.com",
|
||||
Help: "Tokyo Region.",
|
||||
}, {
|
||||
Value: "cos.na-siliconvalley.myqcloud.com",
|
||||
Help: "Silicon Valley Region.",
|
||||
}, {
|
||||
Value: "cos.na-ashburn.myqcloud.com",
|
||||
Help: "Virginia Region.",
|
||||
}, {
|
||||
Value: "cos.na-toronto.myqcloud.com",
|
||||
Help: "Toronto Region.",
|
||||
}, {
|
||||
Value: "cos.eu-frankfurt.myqcloud.com",
|
||||
Help: "Frankfurt Region.",
|
||||
}, {
|
||||
Value: "cos.eu-moscow.myqcloud.com",
|
||||
Help: "Moscow Region.",
|
||||
}, {
|
||||
Value: "cos.accelerate.myqcloud.com",
|
||||
Help: "Use Tencent COS Accelerate Endpoint.",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath",
|
||||
Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -666,7 +732,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath",
|
||||
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath,TencentCOS",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -678,9 +744,13 @@ For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview
|
||||
Note that this ACL is applied when server side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "default",
|
||||
Help: "Owner gets Full_CONTROL. No one else has access rights (default).",
|
||||
Provider: "TencentCOS",
|
||||
}, {
|
||||
Value: "private",
|
||||
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
|
||||
Provider: "!IBMCOS",
|
||||
Provider: "!IBMCOS,TencentCOS",
|
||||
}, {
|
||||
Value: "public-read",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
|
||||
@@ -842,6 +912,24 @@ isn't set then "acl" is used instead.`,
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Infrequent access storage mode.",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in Tencent COS.",
|
||||
Provider: "TencentCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Default",
|
||||
}, {
|
||||
Value: "STANDARD",
|
||||
Help: "Standard storage class",
|
||||
}, {
|
||||
Value: "ARCHIVE",
|
||||
Help: "Archive storage mode.",
|
||||
}, {
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Infrequent access storage mode.",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://www.scaleway.com/en/docs/object-storage-glacier/#-Scaleway-Storage-Classes
|
||||
Name: "storage_class",
|
||||
@@ -975,7 +1063,7 @@ if false then rclone will use virtual path style. See [the AWS S3
|
||||
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
|
||||
for more info.
|
||||
|
||||
Some providers (eg AWS, Aliyun OSS or Netease COS) require this set to
|
||||
Some providers (eg AWS, Aliyun OSS, Netease COS or Tencent COS) require this set to
|
||||
false - rclone will do this automatically based on the provider
|
||||
setting.`,
|
||||
Default: true,
|
||||
@@ -1305,7 +1393,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
if opt.Region == "" {
|
||||
opt.Region = "us-east-1"
|
||||
}
|
||||
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.Provider == "Scaleway" || opt.UseAccelerateEndpoint {
|
||||
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.Provider == "Scaleway" || opt.Provider == "TencentCOS" || opt.UseAccelerateEndpoint {
|
||||
opt.ForcePathStyle = false
|
||||
}
|
||||
if opt.Provider == "Scaleway" && opt.MaxUploadParts > 1000 {
|
||||
@@ -1587,7 +1675,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
//
|
||||
// So we enable only on providers we know supports it properly, all others can retry when a
|
||||
// XML Syntax error is detected.
|
||||
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio")
|
||||
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio" || f.opt.Provider == "TencentCOS")
|
||||
for {
|
||||
// FIXME need to implement ALL loop
|
||||
req := s3.ListObjectsInput{
|
||||
|
||||
@@ -46,7 +46,7 @@ type Library struct {
|
||||
Encrypted bool `json:"encrypted"`
|
||||
Owner string `json:"owner"`
|
||||
ID string `json:"id"`
|
||||
Size int `json:"size"`
|
||||
Size int64 `json:"size"`
|
||||
Name string `json:"name"`
|
||||
Modified int64 `json:"mtime"`
|
||||
}
|
||||
|
||||
@@ -1004,7 +1004,7 @@ func (f *Fs) listLibraries(ctx context.Context) (entries fs.DirEntries, err erro
|
||||
|
||||
for _, library := range libraries {
|
||||
d := fs.NewDir(library.Name, time.Unix(library.Modified, 0))
|
||||
d.SetSize(int64(library.Size))
|
||||
d.SetSize(library.Size)
|
||||
entries = append(entries, d)
|
||||
}
|
||||
|
||||
|
||||
@@ -82,6 +82,9 @@ func init() {
|
||||
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
||||
in the new OpenSSH format can't be used.`,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "pubkey_file",
|
||||
Help: "Optional path to public key file; set this if you have a signed certificate you want to use for authentication." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "key_use_agent",
|
||||
Help: `When set forces the usage of the ssh-agent.
|
||||
@@ -190,6 +193,7 @@ type Options struct {
|
||||
KeyPem string `config:"key_pem"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
PubKeyFile string `config:"pubkey_file"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||
@@ -438,6 +442,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
keyFile := env.ShellExpand(opt.KeyFile)
|
||||
pubkeyFile := env.ShellExpand(opt.PubKeyFile)
|
||||
//keyPem := env.ShellExpand(opt.KeyPem)
|
||||
// Add ssh agent-auth if no password or file or key PEM specified
|
||||
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
|
||||
@@ -507,7 +512,29 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse private key file")
|
||||
}
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signer))
|
||||
|
||||
// If a public key has been specified then use that
|
||||
if pubkeyFile != "" {
|
||||
certfile, err := ioutil.ReadFile(pubkeyFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to read cert file")
|
||||
}
|
||||
|
||||
pk, _, _, _, err := ssh.ParseAuthorizedKey(certfile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to parse cert file")
|
||||
}
|
||||
|
||||
// And the signer for this, which includes the private key signer
|
||||
// This is what we'll pass to the ssh client.
|
||||
pubsigner, err := ssh.NewCertSigner(pk.(*ssh.Certificate), signer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error generating cert signer")
|
||||
}
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(pubsigner))
|
||||
} else {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signer))
|
||||
}
|
||||
}
|
||||
|
||||
// Auth from password if specified
|
||||
@@ -1087,7 +1114,7 @@ func shellEscape(str string) string {
|
||||
func parseHash(bytes []byte) string {
|
||||
// For strings with backslash *sum writes a leading \
|
||||
// https://unix.stackexchange.com/q/313733/94054
|
||||
return strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0] // Split at hash / filename separator
|
||||
return strings.ToLower(strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0]) // Split at hash / filename separator / all convert to lowercase
|
||||
}
|
||||
|
||||
// Parses the byte array output from the SSH session
|
||||
|
||||
@@ -280,7 +280,7 @@ func stripVersion(goarch string) string {
|
||||
|
||||
// build the binary in dir returning success or failure
|
||||
func compileArch(version, goos, goarch, dir string) bool {
|
||||
log.Printf("Compiling %s/%s", goos, goarch)
|
||||
log.Printf("Compiling %s/%s into %s", goos, goarch, dir)
|
||||
output := filepath.Join(dir, "rclone")
|
||||
if goos == "windows" {
|
||||
output += ".exe"
|
||||
@@ -298,7 +298,6 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||
"go", "build",
|
||||
"--ldflags", "-s -X github.com/rclone/rclone/fs.Version=" + version,
|
||||
"-trimpath",
|
||||
"-i",
|
||||
"-o", output,
|
||||
"-tags", *tags,
|
||||
"..",
|
||||
@@ -325,7 +324,7 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||
artifacts := []string{buildZip(dir)}
|
||||
// build a .deb and .rpm if appropriate
|
||||
if goos == "linux" {
|
||||
artifacts = append(artifacts, buildDebAndRpm(dir, version, goarch)...)
|
||||
artifacts = append(artifacts, buildDebAndRpm(dir, version, stripVersion(goarch))...)
|
||||
}
|
||||
if *copyAs != "" {
|
||||
for _, artifact := range artifacts {
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
)
|
||||
|
||||
// version=$(sed <VERSION -e 's/\.[0-9]+*$//g')
|
||||
@@ -28,7 +30,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
||||
cmd := exec.Command("git", "log", "--oneline", from+".."+to)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to run git log: %v", err)
|
||||
log.Fatalf("failed to run git log %s: %v", from+".."+to, err)
|
||||
}
|
||||
logMap = map[string]string{}
|
||||
logs = []string{}
|
||||
@@ -53,15 +55,20 @@ func main() {
|
||||
if len(args) != 0 {
|
||||
log.Fatalf("Syntax: %s", os.Args[0])
|
||||
}
|
||||
// v1.54.0
|
||||
versionBytes, err := ioutil.ReadFile("VERSION")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read version: %v", err)
|
||||
}
|
||||
i := bytes.LastIndexByte(versionBytes, '.')
|
||||
version := string(versionBytes[:i])
|
||||
log.Printf("Finding commits not in stable %s", version)
|
||||
masterMap, masterLogs := readCommits(version+".0", "master")
|
||||
stableMap, _ := readCommits(version+".0", version+"-stable")
|
||||
if versionBytes[0] == 'v' {
|
||||
versionBytes = versionBytes[1:]
|
||||
}
|
||||
versionBytes = bytes.TrimSpace(versionBytes)
|
||||
semver := semver.New(string(versionBytes))
|
||||
stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1)
|
||||
log.Printf("Finding commits in %v not in stable %s", semver, stable)
|
||||
masterMap, masterLogs := readCommits(stable+".0", "master")
|
||||
stableMap, _ := readCommits(stable+".0", stable+"-stable")
|
||||
for _, logMessage := range masterLogs {
|
||||
// Commit found in stable already
|
||||
if _, found := stableMap[logMessage]; found {
|
||||
|
||||
@@ -29,6 +29,7 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash.")
|
||||
AddFlags(cmdFlags)
|
||||
}
|
||||
|
||||
@@ -50,7 +51,7 @@ the source match the files in the destination, not the other way
|
||||
around. This means that extra files in the destination that are not in
|
||||
the source will not be detected.
|
||||
|
||||
The |--differ|, |--missing-on-dst|, |--missing-on-src|, |--src-only|
|
||||
The |--differ|, |--missing-on-dst|, |--missing-on-src|, |--match|
|
||||
and |--error| flags write paths, one per line, to the file name (or
|
||||
stdout if it is |-|) supplied. What they write is described in the
|
||||
help below. For example |--differ| will write all paths which are
|
||||
|
||||
@@ -14,7 +14,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "cleanup remote:path",
|
||||
Short: `Clean up the remote if possible`,
|
||||
Short: `Clean up the remote if possible.`,
|
||||
Long: `
|
||||
Clean up the remote if possible. Empty the trash or delete old file
|
||||
versions. Not supported by all remotes.
|
||||
|
||||
@@ -22,7 +22,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "copy source:path dest:path",
|
||||
Short: `Copy files from source to dest, skipping already copied`,
|
||||
Short: `Copy files from source to dest, skipping already copied.`,
|
||||
Long: `
|
||||
Copy the source to the destination. Doesn't transfer
|
||||
unchanged files, testing by size and modification time or
|
||||
|
||||
@@ -15,7 +15,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "copyto source:path dest:path",
|
||||
Short: `Copy files from source to dest, skipping already copied`,
|
||||
Short: `Copy files from source to dest, skipping already copied.`,
|
||||
Long: `
|
||||
If source:path is a file or directory then it copies it to a file or
|
||||
directory named dest:path.
|
||||
|
||||
@@ -44,7 +44,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "lsf remote:path",
|
||||
Short: `List directories and objects in remote:path formatted for parsing`,
|
||||
Short: `List directories and objects in remote:path formatted for parsing.`,
|
||||
Long: `
|
||||
List the contents of the source path (directories and objects) to
|
||||
standard output in a form which is easy to parse by scripts. By
|
||||
|
||||
@@ -67,8 +67,8 @@ func setAttr(node vfs.Node, attr *fuse.Attr) {
|
||||
modTime := node.ModTime()
|
||||
// set attributes
|
||||
vfs := node.VFS()
|
||||
attr.Owner.Gid = vfs.Opt.UID
|
||||
attr.Owner.Uid = vfs.Opt.GID
|
||||
attr.Owner.Gid = vfs.Opt.GID
|
||||
attr.Owner.Uid = vfs.Opt.UID
|
||||
attr.Mode = getMode(node)
|
||||
attr.Size = Size
|
||||
attr.Nlink = 1
|
||||
|
||||
@@ -192,6 +192,9 @@ Stopping the mount manually:
|
||||
# OS X
|
||||
umount /path/to/local/mount
|
||||
|
||||
**Note**: As of ` + "`rclone` 1.52.2, `rclone mount`" + ` now requires Go version 1.13
|
||||
or newer on some platforms depending on the underlying FUSE library in use.
|
||||
|
||||
### Installing on Windows
|
||||
|
||||
To run rclone ` + commandName + ` on Windows, you will need to
|
||||
|
||||
@@ -17,7 +17,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "obscure password",
|
||||
Short: `Obscure password for use in the rclone config file`,
|
||||
Short: `Obscure password for use in the rclone config file.`,
|
||||
Long: `In the rclone config file, human readable passwords are
|
||||
obscured. Obscuring them is done by encrypting them and writing them
|
||||
out in base64. This is **not** a secure way of encrypting these
|
||||
|
||||
@@ -148,6 +148,7 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
||||
{{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}
|
||||
{{< provider name="Tardigrade" home="https://tardigrade.io/" config="/tardigrade/" >}}
|
||||
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
|
||||
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
|
||||
{{< provider name="WebDAV" home="https://en.wikipedia.org/wiki/WebDAV" config="/webdav/" >}}
|
||||
{{< provider name="Yandex Disk" home="https://disk.yandex.com/" config="/yandex/" >}}
|
||||
|
||||
@@ -409,3 +409,7 @@ put them back in again.` >}}
|
||||
* Lucas Kanashiro <lucas.kanashiro@canonical.com>
|
||||
* WarpedPixel <WarpedPixel@users.noreply.github.com>
|
||||
* Sam Edwards <sam@samedwards.ca>
|
||||
* wjielai <gouki0123@gmail.com>
|
||||
* Muffin King <jinxz_k@live.com>
|
||||
* Christopher Stewart <6573710+1f47a@users.noreply.github.com>
|
||||
* Russell Cattelan <cattelan@digitalelves.com>
|
||||
|
||||
@@ -5,6 +5,36 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.53.1 - 2020-09-13
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.0...v1.53.1)
|
||||
|
||||
* Bug Fixes
|
||||
* accounting: Remove new line from end of --stats-one-line display (Nick Craig-Wood)
|
||||
* check
|
||||
* Add back missing --download flag (Nick Craig-Wood)
|
||||
* Fix docs (Nick Craig-Wood)
|
||||
* docs
|
||||
* Note --log-file does append (Nick Craig-Wood)
|
||||
* Add full stops for consistency in rclone --help (edwardxml)
|
||||
* Add Tencent COS to s3 provider list (wjielai)
|
||||
* Updated mount command to reflect that it requires Go 1.13 or newer (Evan Harris)
|
||||
* jottacloud: Mention that uploads from local disk will not need to cache files to disk for md5 calculation (albertony)
|
||||
* Fix formatting of rc docs page (Nick Craig-Wood)
|
||||
* build
|
||||
* Include vendor tar ball in release and fix startdev (Nick Craig-Wood)
|
||||
* Fix "Illegal instruction" error for ARMv6 builds (Nick Craig-Wood)
|
||||
* Fix architecture name in ARMv7 build (Nick Craig-Wood)
|
||||
* VFS
|
||||
* Fix spurious error "vfs cache: failed to _ensure cache EOF" (Nick Craig-Wood)
|
||||
* Log an ERROR if we fail to set the file to be sparse (Nick Craig-Wood)
|
||||
* Local
|
||||
* Log an ERROR if we fail to set the file to be sparse (Nick Craig-Wood)
|
||||
* Drive
|
||||
* Re-adds special oauth help text (Tim Gallant)
|
||||
* Opendrive
|
||||
* Do not retry 400 errors (Evan Harris)
|
||||
|
||||
## v1.53.0 - 2020-09-02
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.52.0...v1.53.0)
|
||||
|
||||
@@ -757,6 +757,8 @@ This can be useful for tracking down problems with syncs in
|
||||
combination with the `-v` flag. See the [Logging section](#logging)
|
||||
for more info.
|
||||
|
||||
If FILE exists then rclone will append to it.
|
||||
|
||||
Note that if you are using the `logrotate` program to manage rclone's
|
||||
logs, then you should use the `copytruncate` option as rclone doesn't
|
||||
have a signal to rotate logs.
|
||||
|
||||
@@ -202,6 +202,39 @@ Impersonate this user when using a business account.
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --dropbox-shared-files
|
||||
|
||||
Instructs rclone to work on individual shared files.
|
||||
|
||||
In this mode rclone's features are extremely limited - only list (ls, lsl, etc.)
|
||||
operations and read operations (e.g. downloading) are supported in this mode.
|
||||
All other operations will be disabled.
|
||||
|
||||
- Config: shared_files
|
||||
- Env Var: RCLONE_DROPBOX_SHARED_FILES
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --dropbox-shared-folders
|
||||
|
||||
Instructs rclone to work on shared folders.
|
||||
|
||||
When this flag is used with no path only the List operation is supported and
|
||||
all available shared folders will be listed. If you specify a path the first part
|
||||
will be interpreted as the name of shared folder. Rclone will then try to mount this
|
||||
shared to the root namespace. On success shared folder rclone proceeds normally.
|
||||
The shared folder is now pretty much a normal folder and all normal operations
|
||||
are supported.
|
||||
|
||||
Note that we don't unmount the shared folder afterwards so the
|
||||
--dropbox-shared-folders can be omitted after the first use of a particular
|
||||
shared folder.
|
||||
|
||||
- Config: shared_folders
|
||||
- Env Var: RCLONE_DROPBOX_SHARED_FOLDERS
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --dropbox-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
|
||||
@@ -148,8 +148,13 @@ flag.
|
||||
Note that Jottacloud requires the MD5 hash before upload so if the
|
||||
source does not have an MD5 checksum then the file will be cached
|
||||
temporarily on disk (wherever the `TMPDIR` environment variable points
|
||||
to) before it is uploaded. Small files will be cached in memory - see
|
||||
to) before it is uploaded. Small files will be cached in memory - see
|
||||
the [--jottacloud-md5-memory-limit](#jottacloud-md5-memory-limit) flag.
|
||||
When uploading from local disk the source checksum is always available,
|
||||
so this does not apply. Starting with rclone version 1.52 the same is
|
||||
true for crypted remotes (in older versions the crypt backend would not
|
||||
calculate hashes for uploads from local disk, so the Jottacloud
|
||||
backend had to do it as described above).
|
||||
|
||||
#### Restricted filename characters
|
||||
|
||||
|
||||
@@ -537,6 +537,8 @@ OR
|
||||
"result": "<Raw command line output>"
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
**Authentication is required for this call.**
|
||||
|
||||
### core/gc: Runs a garbage collection. {#core-gc}
|
||||
@@ -1212,7 +1214,7 @@ This allows you to remove a plugin using it's name
|
||||
|
||||
This takes parameters
|
||||
|
||||
- name: name of the plugin in the format <author>/<plugin_name>
|
||||
- name: name of the plugin in the format `author`/`plugin_name`
|
||||
|
||||
Eg
|
||||
|
||||
@@ -1226,7 +1228,7 @@ This allows you to remove a plugin using it's name
|
||||
|
||||
This takes the following parameters
|
||||
|
||||
- name: name of the plugin in the format <author>/<plugin_name>
|
||||
- name: name of the plugin in the format `author`/`plugin_name`
|
||||
|
||||
Eg
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ The S3 backend can be used with a number of different providers:
|
||||
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
|
||||
{{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}}
|
||||
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
||||
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
|
||||
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" end="true" >}}
|
||||
{{< /provider_list >}}
|
||||
|
||||
@@ -138,7 +139,7 @@ Choose a number from below, or type in your own value
|
||||
/ Asia Pacific (Mumbai)
|
||||
13 | Needs location constraint ap-south-1.
|
||||
\ "ap-south-1"
|
||||
/ Asia Patific (Hong Kong) Region
|
||||
/ Asia Pacific (Hong Kong) Region
|
||||
14 | Needs location constraint ap-east-1.
|
||||
\ "ap-east-1"
|
||||
/ South America (Sao Paulo) Region
|
||||
@@ -488,6 +489,8 @@ Choose your S3 provider.
|
||||
- StackPath Object Storage
|
||||
- "Wasabi"
|
||||
- Wasabi Object Storage
|
||||
- "TencentCOS"
|
||||
- Tencent Cloud Object Storage (COS)
|
||||
- "Other"
|
||||
- Any other S3 compatible provider
|
||||
|
||||
@@ -1122,7 +1125,7 @@ The storage class to use when storing new objects in S3.
|
||||
|
||||
### Advanced Options
|
||||
|
||||
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)).
|
||||
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)).
|
||||
|
||||
#### --s3-bucket-acl
|
||||
|
||||
@@ -2212,6 +2215,138 @@ d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
### Tencent COS {#tencent-cos}
|
||||
|
||||
[Tencent Cloud Object Storage (COS)](https://intl.cloud.tencent.com/product/cos) is a distributed storage service offered by Tencent Cloud for unstructured data. It is secure, stable, massive, convenient, low-delay and low-cost.
|
||||
|
||||
To configure access to Tencent COS, follow the steps below:
|
||||
|
||||
1. Run `rclone config` and select `n` for a new remote.
|
||||
|
||||
```
|
||||
rclone config
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
```
|
||||
|
||||
2. Give the name of the configuration. For example, name it 'cos'.
|
||||
|
||||
```
|
||||
name> cos
|
||||
```
|
||||
|
||||
3. Select `s3` storage.
|
||||
|
||||
```
|
||||
Choose a number from below, or type in your own value
|
||||
1 / 1Fichier
|
||||
\ "fichier"
|
||||
2 / Alias for an existing remote
|
||||
\ "alias"
|
||||
3 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
4 / Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)
|
||||
\ "s3"
|
||||
[snip]
|
||||
Storage> s3
|
||||
```
|
||||
|
||||
4. Select `TencentCOS` provider.
|
||||
```
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Amazon Web Services (AWS) S3
|
||||
\ "AWS"
|
||||
[snip]
|
||||
11 / Tencent Cloud Object Storage (COS)
|
||||
\ "TencentCOS"
|
||||
[snip]
|
||||
provider> TencentCOS
|
||||
```
|
||||
|
||||
5. Enter your SecretId and SecretKey of Tencent Cloud.
|
||||
|
||||
```
|
||||
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
Only applies if access_key_id and secret_access_key is blank.
|
||||
Enter a boolean value (true or false). Press Enter for the default ("false").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Enter AWS credentials in the next step
|
||||
\ "false"
|
||||
2 / Get AWS credentials from the environment (env vars or IAM)
|
||||
\ "true"
|
||||
env_auth> 1
|
||||
AWS Access Key ID.
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
access_key_id> AKIDxxxxxxxxxx
|
||||
AWS Secret Access Key (password)
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
secret_access_key> xxxxxxxxxxx
|
||||
```
|
||||
|
||||
6. Select endpoint for Tencent COS. This is the standard endpoint for different region.
|
||||
|
||||
```
|
||||
1 / Beijing Region.
|
||||
\ "cos.ap-beijing.myqcloud.com"
|
||||
2 / Nanjing Region.
|
||||
\ "cos.ap-nanjing.myqcloud.com"
|
||||
3 / Shanghai Region.
|
||||
\ "cos.ap-shanghai.myqcloud.com"
|
||||
4 / Guangzhou Region.
|
||||
\ "cos.ap-guangzhou.myqcloud.com"
|
||||
[snip]
|
||||
endpoint> 4
|
||||
```
|
||||
|
||||
7. Choose acl and storage class.
|
||||
|
||||
```
|
||||
Note that this ACL is applied when server side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Owner gets Full_CONTROL. No one else has access rights (default).
|
||||
\ "default"
|
||||
[snip]
|
||||
acl> 1
|
||||
The storage class to use when storing new objects in Tencent COS.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Default
|
||||
\ ""
|
||||
[snip]
|
||||
storage_class> 1
|
||||
Edit advanced config? (y/n)
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
Remote config
|
||||
--------------------
|
||||
[cos]
|
||||
type = s3
|
||||
provider = TencentCOS
|
||||
env_auth = false
|
||||
access_key_id = xxx
|
||||
secret_access_key = xxx
|
||||
endpoint = cos.ap-guangzhou.myqcloud.com
|
||||
acl = default
|
||||
--------------------
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
Current remotes:
|
||||
|
||||
Name Type
|
||||
==== ====
|
||||
cos s3
|
||||
```
|
||||
|
||||
### Netease NOS ###
|
||||
|
||||
For Netease NOS configure as per the configurator `rclone config`
|
||||
|
||||
@@ -102,7 +102,7 @@ excess files in the directory.
|
||||
The SFTP remote supports three authentication methods:
|
||||
|
||||
* Password
|
||||
* Key file
|
||||
* Key file, including certificate signed keys
|
||||
* ssh-agent
|
||||
|
||||
Key files should be PEM-encoded private key files. For instance `/home/$USER/.ssh/id_rsa`.
|
||||
@@ -128,6 +128,17 @@ Using an ssh-agent is the only way to load encrypted OpenSSH keys at the moment.
|
||||
If you set the `--sftp-ask-password` option, rclone will prompt for a
|
||||
password when needed and no password has been configured.
|
||||
|
||||
If you have a certificate then you can provide the path to the public key that contains the certificate. For example:
|
||||
|
||||
```
|
||||
[remote]
|
||||
type = sftp
|
||||
host = example.com
|
||||
user = sftpuser
|
||||
key_file = ~/id_rsa
|
||||
pubkey_file = ~/id_rsa-cert.pub
|
||||
````
|
||||
|
||||
### ssh-agent on macOS ###
|
||||
|
||||
Note that there seem to be various problems with using an ssh-agent on
|
||||
@@ -247,6 +258,18 @@ when the ssh-agent contains many keys.
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --sftp-pubkey-file
|
||||
|
||||
Path to public key file, set if you want to use certificate based authentication
|
||||
|
||||
Leading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`.
|
||||
|
||||
|
||||
- Config: pubkey_file
|
||||
- Env Var: RCLONE_SFTP_PUBKEY_FILE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --sftp-use-insecure-cipher
|
||||
|
||||
Enable the use of insecure ciphers and key exchange methods.
|
||||
|
||||
@@ -1 +1 @@
|
||||
v1.53.0
|
||||
v1.54.0
|
||||
@@ -26,6 +26,10 @@ var ErrorMaxTransferLimitReached = errors.New("Max transfer limit reached as set
|
||||
// transfer limit is reached.
|
||||
var ErrorMaxTransferLimitReachedFatal = fserrors.FatalError(ErrorMaxTransferLimitReached)
|
||||
|
||||
// ErrorMaxTransferLimitReachedGraceful is returned from operations.Copy when the max
|
||||
// transfer limit is reached and a graceful stop is required.
|
||||
var ErrorMaxTransferLimitReachedGraceful = fserrors.NoRetryError(ErrorMaxTransferLimitReached)
|
||||
|
||||
// Account limits and accounts for one transfer
|
||||
type Account struct {
|
||||
stats *StatsInfo
|
||||
|
||||
@@ -272,7 +272,7 @@ func (s *StatsInfo) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s\n",
|
||||
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s",
|
||||
dateString,
|
||||
fs.SizeSuffix(s.bytes),
|
||||
fs.SizeSuffix(totalSize).Unit("Bytes"),
|
||||
@@ -283,6 +283,7 @@ func (s *StatsInfo) String() string {
|
||||
)
|
||||
|
||||
if !fs.Config.StatsOneLine {
|
||||
_, _ = buf.WriteRune('\n')
|
||||
errorDetails := ""
|
||||
switch {
|
||||
case s.fatalError:
|
||||
@@ -291,6 +292,7 @@ func (s *StatsInfo) String() string {
|
||||
errorDetails = " (retrying may help)"
|
||||
case s.errors != 0:
|
||||
errorDetails = " (no need to retry)"
|
||||
|
||||
}
|
||||
|
||||
// Add only non zero stats
|
||||
|
||||
@@ -366,6 +366,8 @@ func (sg *statsGroups) sum() *StatsInfo {
|
||||
sum.lastError = stats.lastError
|
||||
}
|
||||
sum.startedTransfers = append(sum.startedTransfers, stats.startedTransfers...)
|
||||
sum.oldDuration += stats.oldDuration
|
||||
sum.oldTimeRanges = append(sum.oldTimeRanges, stats.oldTimeRanges...)
|
||||
}
|
||||
stats.mu.RUnlock()
|
||||
}
|
||||
|
||||
@@ -4,8 +4,10 @@ import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestStatsGroupOperations(t *testing.T) {
|
||||
@@ -43,17 +45,26 @@ func TestStatsGroupOperations(t *testing.T) {
|
||||
t.Parallel()
|
||||
stats1 := NewStats()
|
||||
stats1.bytes = 5
|
||||
stats1.errors = 5
|
||||
stats1.errors = 6
|
||||
stats1.oldDuration = time.Second
|
||||
stats1.oldTimeRanges = []timeRange{{time.Now(), time.Now().Add(time.Second)}}
|
||||
stats2 := NewStats()
|
||||
stats2.bytes = 10
|
||||
stats2.errors = 12
|
||||
stats2.oldDuration = 2 * time.Second
|
||||
stats2.oldTimeRanges = []timeRange{{time.Now(), time.Now().Add(2 * time.Second)}}
|
||||
sg := newStatsGroups()
|
||||
sg.set("test1", stats1)
|
||||
sg.set("test2", stats2)
|
||||
sum := sg.sum()
|
||||
if sum.bytes != stats1.bytes+stats2.bytes {
|
||||
t.Fatalf("sum() => bytes %d, expected %d", sum.bytes, stats1.bytes+stats2.bytes)
|
||||
}
|
||||
if sum.errors != stats1.errors+stats2.errors {
|
||||
t.Fatalf("sum() => errors %d, expected %d", sum.errors, stats1.errors+stats2.errors)
|
||||
assert.Equal(t, stats1.bytes+stats2.bytes, sum.bytes)
|
||||
assert.Equal(t, stats1.errors+stats2.errors, sum.errors)
|
||||
assert.Equal(t, stats1.oldDuration+stats2.oldDuration, sum.oldDuration)
|
||||
// dict can iterate in either order
|
||||
a := timeRanges{stats1.oldTimeRanges[0], stats2.oldTimeRanges[0]}
|
||||
b := timeRanges{stats2.oldTimeRanges[0], stats1.oldTimeRanges[0]}
|
||||
if !assert.ObjectsAreEqual(a, sum.oldTimeRanges) {
|
||||
assert.Equal(t, b, sum.oldTimeRanges)
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -72,8 +72,16 @@ func (tm *transferMap) _sortedSlice() []*Transfer {
|
||||
for _, tr := range tm.items {
|
||||
s = append(s, tr)
|
||||
}
|
||||
// sort by time first and if equal by name. Note that the relatively
|
||||
// low time resolution on Windows can cause equal times.
|
||||
sort.Slice(s, func(i, j int) bool {
|
||||
return s[i].startedAt.Before(s[j].startedAt)
|
||||
a, b := s[i], s[j]
|
||||
if a.startedAt.Before(b.startedAt) {
|
||||
return true
|
||||
} else if !a.startedAt.Equal(b.startedAt) {
|
||||
return false
|
||||
}
|
||||
return a.remote < b.remote
|
||||
})
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -363,7 +363,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
actionTaken = "Copied (server side copy)"
|
||||
if fs.Config.MaxTransfer >= 0 && (accounting.Stats(ctx).GetBytes() >= int64(fs.Config.MaxTransfer) ||
|
||||
(fs.Config.CutoffMode == fs.CutoffModeCautious && accounting.Stats(ctx).GetBytesWithPending()+src.Size() >= int64(fs.Config.MaxTransfer))) {
|
||||
return nil, accounting.ErrorMaxTransferLimitReachedFatal
|
||||
return nil, accounting.ErrorMaxTransferLimitReachedGraceful
|
||||
}
|
||||
if doCopy := f.Features().Copy; doCopy != nil && (SameConfig(src.Fs(), f) || (SameRemoteType(src.Fs(), f) && f.Features().ServerSideAcrossConfigs)) {
|
||||
in := tr.Account(ctx, nil) // account the transfer
|
||||
|
||||
@@ -1440,7 +1440,7 @@ func TestCopyFileMaxTransfer(t *testing.T) {
|
||||
err = operations.CopyFile(ctx, r.Fremote, r.Flocal, file3.Path, file3.Path)
|
||||
require.NotNil(t, err)
|
||||
assert.Contains(t, err.Error(), "Max transfer limit reached")
|
||||
assert.True(t, fserrors.IsFatalError(err))
|
||||
assert.True(t, fserrors.IsNoRetryError(err))
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3, file4)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
|
||||
@@ -379,7 +379,7 @@ OR
|
||||
"result": "<Raw command line output>"
|
||||
}
|
||||
|
||||
|
||||
` + "```" + `
|
||||
`,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ func init() {
|
||||
|
||||
This takes the following parameters
|
||||
|
||||
- name: name of the plugin in the format <author>/<plugin_name>
|
||||
- name: name of the plugin in the format ` + "`author`/`plugin_name`" + `
|
||||
|
||||
Eg
|
||||
|
||||
@@ -212,7 +212,7 @@ func init() {
|
||||
|
||||
This takes parameters
|
||||
|
||||
- name: name of the plugin in the format <author>/<plugin_name>
|
||||
- name: name of the plugin in the format ` + "`author`/`plugin_name`" + `
|
||||
|
||||
Eg
|
||||
|
||||
|
||||
@@ -59,17 +59,30 @@ func CheckAndDownloadWebGUIRelease(checkUpdate bool, forceUpdate bool, fetchURL
|
||||
return errors.New("Web GUI path exists, but is a file instead of folder. Please check the path " + extractPath)
|
||||
}
|
||||
|
||||
// Get the latest release details
|
||||
WebUIURL, tag, size, err := GetLatestReleaseURL(fetchURL)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error checking for web gui release update, skipping update")
|
||||
}
|
||||
dat, err := ioutil.ReadFile(tagPath)
|
||||
tagsMatch := false
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Error reading tag file at %s ", tagPath)
|
||||
checkUpdate = true
|
||||
} else if string(dat) == tag {
|
||||
tagsMatch = true
|
||||
}
|
||||
fs.Debugf(nil, "Current tag: %s, Release tag: %s", string(dat), tag)
|
||||
|
||||
if !tagsMatch {
|
||||
fs.Infof(nil, "A release (%s) for gui is present at %s. Use --rc-web-gui-update to update. Your current version is (%s)", tag, WebUIURL, string(dat))
|
||||
}
|
||||
|
||||
// if the old file exists does not exist or forced update is enforced.
|
||||
// TODO: Add hashing to check integrity of the previous update.
|
||||
if !extractPathExist || checkUpdate || forceUpdate {
|
||||
// Get the latest release details
|
||||
WebUIURL, tag, size, err := GetLatestReleaseURL(fetchURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dat, err := ioutil.ReadFile(tagPath)
|
||||
if err == nil && string(dat) == tag {
|
||||
if tagsMatch {
|
||||
fs.Logf(nil, "No update to Web GUI available.")
|
||||
if !forceUpdate {
|
||||
return nil
|
||||
@@ -91,7 +104,7 @@ func CheckAndDownloadWebGUIRelease(checkUpdate bool, forceUpdate bool, fetchURL
|
||||
return errors.New("Web GUI path is a file instead of folder. Please check it " + extractPath)
|
||||
}
|
||||
|
||||
fs.Logf(nil, "A new release for gui is present at "+WebUIURL)
|
||||
fs.Logf(nil, "A new release for gui (%s) is present at %s", tag, WebUIURL)
|
||||
fs.Logf(nil, "Downloading webgui binary. Please wait. [Size: %s, Path : %s]\n", strconv.Itoa(size), zipPath)
|
||||
|
||||
// download the zip from latest url
|
||||
|
||||
@@ -32,6 +32,8 @@ type syncCopyMove struct {
|
||||
// internal state
|
||||
ctx context.Context // internal context for controlling go-routines
|
||||
cancel func() // cancel the context
|
||||
inCtx context.Context // internal context for controlling march
|
||||
inCancel func() // cancel the march context
|
||||
noTraverse bool // if set don't traverse the dst
|
||||
noCheckDest bool // if set transfer all objects regardless without checking dst
|
||||
noUnicodeNormalization bool // don't normalize unicode characters in filenames
|
||||
@@ -144,6 +146,8 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
||||
} else {
|
||||
s.ctx, s.cancel = context.WithCancel(ctx)
|
||||
}
|
||||
// Input context - cancel this for graceful stop
|
||||
s.inCtx, s.inCancel = context.WithCancel(s.ctx)
|
||||
if s.noTraverse && s.deleteMode != fs.DeleteModeOff {
|
||||
fs.Errorf(nil, "Ignoring --no-traverse with sync")
|
||||
s.noTraverse = false
|
||||
@@ -248,6 +252,12 @@ func (s *syncCopyMove) processError(err error) {
|
||||
}
|
||||
if err == context.DeadlineExceeded {
|
||||
err = fserrors.NoRetryError(err)
|
||||
} else if err == accounting.ErrorMaxTransferLimitReachedGraceful {
|
||||
if s.inCtx.Err() == nil {
|
||||
fs.Logf(nil, "%v - stopping transfers", err)
|
||||
// Cancel the march and stop the pipes
|
||||
s.inCancel()
|
||||
}
|
||||
}
|
||||
s.errorMu.Lock()
|
||||
defer s.errorMu.Unlock()
|
||||
@@ -287,7 +297,7 @@ func (s *syncCopyMove) currentError() error {
|
||||
func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
pair, ok := in.GetMax(s.ctx, fraction)
|
||||
pair, ok := in.GetMax(s.inCtx, fraction)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
@@ -343,7 +353,7 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.W
|
||||
func (s *syncCopyMove) pairRenamer(in *pipe, out *pipe, fraction int, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
pair, ok := in.GetMax(s.ctx, fraction)
|
||||
pair, ok := in.GetMax(s.inCtx, fraction)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
@@ -363,7 +373,7 @@ func (s *syncCopyMove) pairCopyOrMove(ctx context.Context, in *pipe, fdst fs.Fs,
|
||||
defer wg.Done()
|
||||
var err error
|
||||
for {
|
||||
pair, ok := in.GetMax(s.ctx, fraction)
|
||||
pair, ok := in.GetMax(s.inCtx, fraction)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
@@ -809,7 +819,7 @@ func (s *syncCopyMove) run() error {
|
||||
|
||||
// set up a march over fdst and fsrc
|
||||
m := &march.March{
|
||||
Ctx: s.ctx,
|
||||
Ctx: s.inCtx,
|
||||
Fdst: s.fdst,
|
||||
Fsrc: s.fsrc,
|
||||
Dir: s.dir,
|
||||
|
||||
@@ -1851,38 +1851,51 @@ func TestSyncIgnoreCase(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
}
|
||||
|
||||
// Test that aborting on max upload works
|
||||
func TestAbort(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
if r.Fremote.Name() != "local" {
|
||||
t.Skip("This test only runs on local")
|
||||
}
|
||||
|
||||
// Test that aborting on --max-transfer works
|
||||
func TestMaxTransfer(t *testing.T) {
|
||||
oldMaxTransfer := fs.Config.MaxTransfer
|
||||
oldTransfers := fs.Config.Transfers
|
||||
oldCheckers := fs.Config.Checkers
|
||||
oldCutoff := fs.Config.CutoffMode
|
||||
fs.Config.MaxTransfer = 3 * 1024
|
||||
fs.Config.Transfers = 1
|
||||
fs.Config.Checkers = 1
|
||||
fs.Config.CutoffMode = fs.CutoffModeHard
|
||||
defer func() {
|
||||
fs.Config.MaxTransfer = oldMaxTransfer
|
||||
fs.Config.Transfers = oldTransfers
|
||||
fs.Config.Checkers = oldCheckers
|
||||
fs.Config.CutoffMode = oldCutoff
|
||||
}()
|
||||
|
||||
// Create file on source
|
||||
file1 := r.WriteFile("file1", string(make([]byte, 5*1024)), t1)
|
||||
file2 := r.WriteFile("file2", string(make([]byte, 2*1024)), t1)
|
||||
file3 := r.WriteFile("file3", string(make([]byte, 3*1024)), t1)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||
fstest.CheckItems(t, r.Fremote)
|
||||
test := func(t *testing.T, cutoff fs.CutoffMode) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
fs.Config.CutoffMode = cutoff
|
||||
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
if r.Fremote.Name() != "local" {
|
||||
t.Skip("This test only runs on local")
|
||||
}
|
||||
|
||||
err := Sync(context.Background(), r.Fremote, r.Flocal, false)
|
||||
expectedErr := fserrors.FsError(accounting.ErrorMaxTransferLimitReachedFatal)
|
||||
fserrors.Count(expectedErr)
|
||||
assert.Equal(t, expectedErr, err)
|
||||
// Create file on source
|
||||
file1 := r.WriteFile("file1", string(make([]byte, 5*1024)), t1)
|
||||
file2 := r.WriteFile("file2", string(make([]byte, 2*1024)), t1)
|
||||
file3 := r.WriteFile("file3", string(make([]byte, 3*1024)), t1)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||
fstest.CheckItems(t, r.Fremote)
|
||||
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
|
||||
err := Sync(context.Background(), r.Fremote, r.Flocal, false)
|
||||
expectedErr := fserrors.FsError(accounting.ErrorMaxTransferLimitReachedFatal)
|
||||
if cutoff != fs.CutoffModeHard {
|
||||
expectedErr = accounting.ErrorMaxTransferLimitReachedGraceful
|
||||
}
|
||||
fserrors.Count(expectedErr)
|
||||
assert.Equal(t, expectedErr, err)
|
||||
}
|
||||
|
||||
t.Run("Hard", func(t *testing.T) { test(t, fs.CutoffModeHard) })
|
||||
t.Run("Soft", func(t *testing.T) { test(t, fs.CutoffModeSoft) })
|
||||
t.Run("Cautious", func(t *testing.T) { test(t, fs.CutoffModeCautious) })
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package fs
|
||||
|
||||
// Version of rclone
|
||||
var Version = "v1.53.0-DEV"
|
||||
var Version = "v1.54.0-DEV"
|
||||
|
||||
@@ -41,15 +41,13 @@ backends:
|
||||
remote: "TestChunkerChunk3bNometaLocal:"
|
||||
fastlist: true
|
||||
maxfile: 6k
|
||||
# Disable chunker with mailru tests until Mailru is fixed - see
|
||||
# https://github.com/rclone/rclone/issues/4376
|
||||
# - backend: "chunker"
|
||||
# remote: "TestChunkerMailru:"
|
||||
# fastlist: true
|
||||
# - backend: "chunker"
|
||||
# remote: "TestChunkerChunk50bMailru:"
|
||||
# fastlist: true
|
||||
# maxfile: 10k
|
||||
- backend: "chunker"
|
||||
remote: "TestChunkerMailru:"
|
||||
fastlist: true
|
||||
- backend: "chunker"
|
||||
remote: "TestChunkerChunk50bMailru:"
|
||||
fastlist: true
|
||||
maxfile: 10k
|
||||
- backend: "chunker"
|
||||
remote: "TestChunkerChunk50bYandex:"
|
||||
fastlist: true
|
||||
@@ -73,6 +71,10 @@ backends:
|
||||
remote: "TestChunkerChunk50bSHA1HashS3:"
|
||||
fastlist: true
|
||||
maxfile: 1k
|
||||
- backend: "chunker"
|
||||
remote: "TestChunkerOverCrypt:"
|
||||
fastlist: true
|
||||
maxfile: 6k
|
||||
- backend: "chunker"
|
||||
remote: "TestChunkerChunk50bMD5QuickS3:"
|
||||
fastlist: true
|
||||
|
||||
@@ -166,6 +166,11 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
### VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
|
||||
@@ -239,8 +239,23 @@ func (item *Item) _truncate(size int64) (err error) {
|
||||
// Use open handle if available
|
||||
fd := item.fd
|
||||
if fd == nil {
|
||||
// If the metadata says we have some blockes cached then the
|
||||
// file should exist, so open without O_CREATE
|
||||
oFlags := os.O_WRONLY
|
||||
if item.info.Rs.Size() == 0 {
|
||||
oFlags |= os.O_CREATE
|
||||
}
|
||||
osPath := item.c.toOSPath(item.name) // No locking in Cache
|
||||
fd, err = file.OpenFile(osPath, os.O_CREATE|os.O_WRONLY, 0600)
|
||||
fd, err = file.OpenFile(osPath, oFlags, 0600)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
// If the metadata has info but the file doesn't
|
||||
// not exist then it has been externally removed
|
||||
fs.Errorf(item.name, "vfs cache: detected external removal of cache file")
|
||||
item.info.Rs = nil // show we have no blocks cached
|
||||
item.info.Dirty = false // file can't be dirty if it doesn't exist
|
||||
item._removeMeta("cache file externally deleted")
|
||||
fd, err = file.OpenFile(osPath, os.O_CREATE|os.O_WRONLY, 0600)
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "vfs cache: truncate: failed to open cache file")
|
||||
}
|
||||
@@ -249,7 +264,7 @@ func (item *Item) _truncate(size int64) (err error) {
|
||||
|
||||
err = file.SetSparse(fd)
|
||||
if err != nil {
|
||||
fs.Debugf(item.name, "vfs cache: truncate: failed to set as a sparse file: %v", err)
|
||||
fs.Errorf(item.name, "vfs cache: truncate: failed to set as a sparse file: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -446,7 +461,7 @@ func (item *Item) _createFile(osPath string) (err error) {
|
||||
}
|
||||
err = file.SetSparse(fd)
|
||||
if err != nil {
|
||||
fs.Debugf(item.name, "vfs cache: failed to set as a sparse file: %v", err)
|
||||
fs.Errorf(item.name, "vfs cache: failed to set as a sparse file: %v", err)
|
||||
}
|
||||
item.fd = fd
|
||||
|
||||
@@ -1158,7 +1173,7 @@ func (item *Item) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
item.preAccess()
|
||||
n, err = item.readAt(b, off)
|
||||
item.postAccess()
|
||||
if err == nil {
|
||||
if err == nil || err == io.EOF {
|
||||
break
|
||||
}
|
||||
fs.Errorf(item.name, "vfs cache: failed to _ensure cache %v", err)
|
||||
|
||||
Reference in New Issue
Block a user