mirror of
https://github.com/rclone/rclone.git
synced 2026-01-23 04:43:21 +00:00
Compare commits
124 Commits
v1.50-fixe
...
fix-mega-d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aefe18fc41 | ||
|
|
ae340cf7d9 | ||
|
|
11f501bd44 | ||
|
|
a4bc4daf30 | ||
|
|
51dca8c8d4 | ||
|
|
6b3021209a | ||
|
|
f263828edc | ||
|
|
b7019a91c2 | ||
|
|
27c3481ea4 | ||
|
|
706da80d88 | ||
|
|
b6e86b2c7f | ||
|
|
4453fa4ba6 | ||
|
|
540fd3f173 | ||
|
|
1af4bb0c84 | ||
|
|
15d19131bd | ||
|
|
9d993e584b | ||
|
|
21b17b14a9 | ||
|
|
1b89b38a4c | ||
|
|
7242c7ce95 | ||
|
|
ad2bb86d8c | ||
|
|
eb10ac346f | ||
|
|
7e6fac8b1e | ||
|
|
2e0774f3cf | ||
|
|
b9fb313f71 | ||
|
|
0e64df4b4c | ||
|
|
69ac04fec9 | ||
|
|
8a2d1dbe24 | ||
|
|
584e705c0c | ||
|
|
32a3ba9e3f | ||
|
|
db1c7f9ca8 | ||
|
|
207474abab | ||
|
|
f754d897e5 | ||
|
|
4daecd3158 | ||
|
|
59c75ba442 | ||
|
|
0ecb8bc2f9 | ||
|
|
1ab4985046 | ||
|
|
6e683b4359 | ||
|
|
241921c786 | ||
|
|
a186284b23 | ||
|
|
41ba1bba2b | ||
|
|
50bb9b7bdd | ||
|
|
4537d9b5cf | ||
|
|
684dbe0e9d | ||
|
|
572c1079a5 | ||
|
|
cb97239a60 | ||
|
|
e48145f959 | ||
|
|
2150cf7362 | ||
|
|
707e51eac7 | ||
|
|
0d10640aaa | ||
|
|
f4746f5064 | ||
|
|
c05bb63f96 | ||
|
|
e2773b3b4e | ||
|
|
d3b0bed091 | ||
|
|
33c80bbb96 | ||
|
|
705e4694ed | ||
|
|
4fbc90d115 | ||
|
|
ed39adc65b | ||
|
|
162fdfe455 | ||
|
|
8f33c932f2 | ||
|
|
4195bd7880 | ||
|
|
d72f3e31c0 | ||
|
|
11f44cff50 | ||
|
|
c3751e9a50 | ||
|
|
420ae905b5 | ||
|
|
a7d65bd519 | ||
|
|
1db31d7149 | ||
|
|
4641bd5116 | ||
|
|
7e602dbf39 | ||
|
|
e14d968f8d | ||
|
|
e0eeeaafcd | ||
|
|
d46f8d0ae5 | ||
|
|
1e6278556c | ||
|
|
303f4ee152 | ||
|
|
2fe8285f89 | ||
|
|
f5443ac939 | ||
|
|
7cf056b2c2 | ||
|
|
75a6c49f87 | ||
|
|
19229b1215 | ||
|
|
b5bb4c2a21 | ||
|
|
479c803fd9 | ||
|
|
3dcf1e61cf | ||
|
|
3da1cbfc81 | ||
|
|
0c9a8cf776 | ||
|
|
f3871377c3 | ||
|
|
cc9a7dc073 | ||
|
|
b61dd809ee | ||
|
|
f158a398f3 | ||
|
|
acefa5c40d | ||
|
|
2784c3234b | ||
|
|
c21a4fee58 | ||
|
|
358f5a8084 | ||
|
|
9115752679 | ||
|
|
51efb349ac | ||
|
|
e0d9314059 | ||
|
|
21c6babdbb | ||
|
|
5beeac7959 | ||
|
|
be5392f448 | ||
|
|
c00dcb7e67 | ||
|
|
6150ae89d6 | ||
|
|
1e423d21e1 | ||
|
|
53d55ae760 | ||
|
|
5928704e1b | ||
|
|
5ddfa9f7f6 | ||
|
|
9b5308144f | ||
|
|
4b20afa94a | ||
|
|
049ff1f269 | ||
|
|
3f7af64316 | ||
|
|
0eaf5475ef | ||
|
|
7bf056316f | ||
|
|
520ddbcceb | ||
|
|
1ce1ea34aa | ||
|
|
e6378daadf | ||
|
|
7ff95c6250 | ||
|
|
6d58d9a86f | ||
|
|
e0356f5aae | ||
|
|
191cfb79d1 | ||
|
|
e81eca4055 | ||
|
|
ee3215ac76 | ||
|
|
199ac61bde | ||
|
|
a40cc1167d | ||
|
|
c57ea8d867 | ||
|
|
1868c77e16 | ||
|
|
378a3f4133 | ||
|
|
daff5a824e |
16
.github/workflows/build.yml
vendored
16
.github/workflows/build.yml
vendored
@@ -102,9 +102,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@master
|
||||
uses: actions/checkout@v1
|
||||
with:
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
# Checkout into a fixed path to avoid import path problems on go < 1.11
|
||||
path: ./src/github.com/rclone/rclone
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v1
|
||||
@@ -201,7 +202,8 @@ jobs:
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# working-directory: '$(modulePath)'
|
||||
if: matrix.deploy && github.head_ref == ''
|
||||
# Deploy binaries if enabled in config && not a PR && not a fork
|
||||
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
xgo:
|
||||
timeout-minutes: 60
|
||||
@@ -211,9 +213,10 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@master
|
||||
uses: actions/checkout@v1
|
||||
with:
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
# Checkout into a fixed path to avoid import path problems on go < 1.11
|
||||
path: ./src/github.com/rclone/rclone
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
@@ -247,4 +250,5 @@ jobs:
|
||||
make circleci_upload
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
if: github.head_ref == ''
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
if: github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
@@ -82,13 +82,9 @@ You patch will get reviewed and you might get asked to fix some stuff.
|
||||
If so, then make the changes in the same branch, squash the commits,
|
||||
rebase it to master then push it to GitHub with `--force`.
|
||||
|
||||
## Enabling CI for your fork ##
|
||||
## CI for your fork ##
|
||||
|
||||
The CI config files for rclone have taken care of forks of the project, so you can enable CI for your fork repo easily.
|
||||
|
||||
rclone currently uses [Travis CI](https://travis-ci.org/), [AppVeyor](https://ci.appveyor.com/), and
|
||||
[Circle CI](https://circleci.com/) to build the project. To enable them for your fork, simply go into their
|
||||
websites, find your fork of rclone, and enable building there.
|
||||
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
||||
|
||||
## Testing ##
|
||||
|
||||
|
||||
3
Makefile
3
Makefile
@@ -46,7 +46,8 @@ endif
|
||||
rclone:
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
mkdir -p `go env GOPATH`/bin/
|
||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/
|
||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
||||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||
|
||||
test_all:
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
||||
|
||||
@@ -34,6 +34,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
|
||||
@@ -89,7 +89,7 @@ Now
|
||||
* make TAG=${NEW_TAG} upload_github
|
||||
* NB this overwrites the current beta so we need to do this
|
||||
* git co master
|
||||
* make LAST_TAG=${NEW_TAG} startdev
|
||||
* make VERSION=${NEW_TAG} startdev
|
||||
* # cherry pick the changes to the changelog and VERSION
|
||||
* git checkout ${BASE_TAG}-fixes VERSION docs/content/changelog.md
|
||||
* git commit --amend
|
||||
|
||||
20
backend/cache/cache_mount_other_test.go
vendored
Normal file
20
backend/cache/cache_mount_other_test.go
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// +build !linux !go1.11
|
||||
// +build !darwin !go1.11
|
||||
// +build !freebsd !go1.11
|
||||
// +build !windows
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
||||
panic("mountFs not defined for this platform")
|
||||
}
|
||||
|
||||
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
|
||||
panic("unmountFs not defined for this platform")
|
||||
}
|
||||
2
backend/cache/cache_mount_unix_test.go
vendored
2
backend/cache/cache_mount_unix_test.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!windows
|
||||
// +build linux,go1.11 darwin,go1.11 freebsd,go1.11
|
||||
|
||||
package cache_test
|
||||
|
||||
|
||||
2
backend/cache/storage_persistent.go
vendored
2
backend/cache/storage_persistent.go
vendored
@@ -16,7 +16,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
bolt "github.com/coreos/bbolt"
|
||||
bolt "github.com/etcd-io/bbolt"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
|
||||
@@ -12,11 +12,13 @@ import (
|
||||
gohash "hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -34,46 +36,57 @@ import (
|
||||
// and optional metadata object. If it's present,
|
||||
// meta object is named after the original file.
|
||||
//
|
||||
// The only supported metadata format is simplejson atm.
|
||||
// It supports only per-file meta objects that are rudimentary,
|
||||
// used mostly for consistency checks (lazily for performance reasons).
|
||||
// Other formats can be developed that use an external meta store
|
||||
// free of these limitations, but this needs some support from
|
||||
// rclone core (eg. metadata store interfaces).
|
||||
//
|
||||
// The following types of chunks are supported:
|
||||
// data and control, active and temporary.
|
||||
// Chunk type is identified by matching chunk file name
|
||||
// based on the chunk name format configured by user.
|
||||
//
|
||||
// Both data and control chunks can be either temporary or
|
||||
// active (non-temporary).
|
||||
// Both data and control chunks can be either temporary (aka hidden)
|
||||
// or active (non-temporary aka normal aka permanent).
|
||||
// An operation creates temporary chunks while it runs.
|
||||
// By completion it removes temporary and leaves active
|
||||
// (aka normal aka permanent) chunks.
|
||||
// By completion it removes temporary and leaves active chunks.
|
||||
//
|
||||
// Temporary (aka hidden) chunks have a special hardcoded suffix
|
||||
// in addition to the configured name pattern. The suffix comes last
|
||||
// to prevent name collisions with non-temporary chunks.
|
||||
// Temporary suffix includes so called transaction number usually
|
||||
// abbreviated as `xactNo` below, a generic non-negative integer
|
||||
// Temporary chunks have a special hardcoded suffix in addition
|
||||
// to the configured name pattern.
|
||||
// Temporary suffix includes so called transaction identifier
|
||||
// (abbreviated as `xactID` below), a generic non-negative base-36 "number"
|
||||
// used by parallel operations to share a composite object.
|
||||
// Chunker also accepts the longer decimal temporary suffix (obsolete),
|
||||
// which is transparently converted to the new format. In its maximum
|
||||
// length of 13 decimals it makes a 7-digit base-36 number.
|
||||
//
|
||||
// Chunker can tell data chunks from control chunks by the characters
|
||||
// located in the "hash placeholder" position of configured format.
|
||||
// Data chunks have decimal digits there.
|
||||
// Control chunks have a short lowercase literal prepended by underscore
|
||||
// in that position.
|
||||
// Control chunks have in that position a short lowercase alphanumeric
|
||||
// string (starting with a letter) prepended by underscore.
|
||||
//
|
||||
// Metadata format v1 does not define any control chunk types,
|
||||
// they are currently ignored aka reserved.
|
||||
// In future they can be used to implement resumable uploads etc.
|
||||
//
|
||||
const (
|
||||
ctrlTypeRegStr = `[a-z]{3,9}`
|
||||
tempChunkFormat = `%s..tmp_%010d`
|
||||
tempChunkRegStr = `\.\.tmp_([0-9]{10,19})`
|
||||
ctrlTypeRegStr = `[a-z][a-z0-9]{2,6}`
|
||||
tempSuffixFormat = `_%04s`
|
||||
tempSuffixRegStr = `_([0-9a-z]{4,9})`
|
||||
tempSuffixRegOld = `\.\.tmp_([0-9]{10,13})`
|
||||
)
|
||||
|
||||
var (
|
||||
ctrlTypeRegexp = regexp.MustCompile(`^` + ctrlTypeRegStr + `$`)
|
||||
// regular expressions to validate control type and temporary suffix
|
||||
ctrlTypeRegexp = regexp.MustCompile(`^` + ctrlTypeRegStr + `$`)
|
||||
tempSuffixRegexp = regexp.MustCompile(`^` + tempSuffixRegStr + `$`)
|
||||
)
|
||||
|
||||
// Normally metadata is a small piece of JSON (about 100-300 bytes).
|
||||
// The size of valid metadata size must never exceed this limit.
|
||||
// The size of valid metadata must never exceed this limit.
|
||||
// Current maximum provides a reasonable room for future extensions.
|
||||
//
|
||||
// Please refrain from increasing it, this can cause old rclone versions
|
||||
@@ -101,6 +114,9 @@ const revealHidden = false
|
||||
// Prevent memory overflow due to specially crafted chunk name
|
||||
const maxSafeChunkNumber = 10000000
|
||||
|
||||
// Number of attempts to find unique transaction identifier
|
||||
const maxTransactionProbes = 100
|
||||
|
||||
// standard chunker errors
|
||||
var (
|
||||
ErrChunkOverflow = errors.New("chunk number overflow")
|
||||
@@ -113,13 +129,6 @@ const (
|
||||
delFailed = 2 // move, then delete and try again if failed
|
||||
)
|
||||
|
||||
// Note: metadata logic is tightly coupled with chunker code in many
|
||||
// places, eg. in checks whether a file should have meta object or is
|
||||
// eligible for chunking.
|
||||
// If more metadata formats (or versions of a format) are added in future,
|
||||
// it may be advisable to factor it into a "metadata strategy" interface
|
||||
// similar to chunkingReader or linearReader below.
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -261,7 +270,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// detects a composite file because it finds the first chunk!
|
||||
// (yet can't satisfy fstest.CheckListing, will ignore)
|
||||
if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
|
||||
firstChunkPath := f.makeChunkName(remotePath, 0, "", -1)
|
||||
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
|
||||
_, testErr := baseInfo.NewFs(baseName, firstChunkPath, baseConfig)
|
||||
if testErr == fs.ErrorIsFile {
|
||||
err = testErr
|
||||
@@ -310,12 +319,16 @@ type Fs struct {
|
||||
dataNameFmt string // name format of data chunks
|
||||
ctrlNameFmt string // name format of control chunks
|
||||
nameRegexp *regexp.Regexp // regular expression to match chunk names
|
||||
xactIDRand *rand.Rand // generator of random transaction identifiers
|
||||
xactIDMutex sync.Mutex // mutex for the source of randomness
|
||||
opt Options // copy of Options
|
||||
features *fs.Features // optional features
|
||||
dirSort bool // reserved for future, ignored
|
||||
}
|
||||
|
||||
// configure must be called only from NewFs or by unit tests
|
||||
// configure sets up chunker for given name format, meta format and hash type.
|
||||
// It also seeds the source of random transaction identifiers.
|
||||
// configure must be called only from NewFs or by unit tests.
|
||||
func (f *Fs) configure(nameFormat, metaFormat, hashType string) error {
|
||||
if err := f.setChunkNameFormat(nameFormat); err != nil {
|
||||
return errors.Wrapf(err, "invalid name format '%s'", nameFormat)
|
||||
@@ -326,6 +339,10 @@ func (f *Fs) configure(nameFormat, metaFormat, hashType string) error {
|
||||
if err := f.setHashType(hashType); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
randomSeed := time.Now().UnixNano()
|
||||
f.xactIDRand = rand.New(rand.NewSource(randomSeed))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -414,13 +431,13 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
|
||||
}
|
||||
reDataOrCtrl := fmt.Sprintf("(?:(%s)|_(%s))", reDigits, ctrlTypeRegStr)
|
||||
|
||||
// this must be non-greedy or else it can eat up temporary suffix
|
||||
// this must be non-greedy or else it could eat up temporary suffix
|
||||
const mainNameRegStr = "(.+?)"
|
||||
|
||||
strRegex := regexp.QuoteMeta(pattern)
|
||||
strRegex = reHashes.ReplaceAllLiteralString(strRegex, reDataOrCtrl)
|
||||
strRegex = strings.Replace(strRegex, "\\*", mainNameRegStr, -1)
|
||||
strRegex = fmt.Sprintf("^%s(?:%s)?$", strRegex, tempChunkRegStr)
|
||||
strRegex = fmt.Sprintf("^%s(?:%s|%s)?$", strRegex, tempSuffixRegStr, tempSuffixRegOld)
|
||||
f.nameRegexp = regexp.MustCompile(strRegex)
|
||||
|
||||
// craft printf formats for active data/control chunks
|
||||
@@ -435,34 +452,36 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeChunkName produces chunk name (or path) for given file.
|
||||
// makeChunkName produces chunk name (or path) for a given file.
|
||||
//
|
||||
// mainPath can be name, relative or absolute path of main file.
|
||||
// filePath can be name, relative or absolute path of main file.
|
||||
//
|
||||
// chunkNo must be a zero based index of data chunk.
|
||||
// Negative chunkNo eg. -1 indicates a control chunk.
|
||||
// ctrlType is type of control chunk (must be valid).
|
||||
// ctrlType must be "" for data chunks.
|
||||
//
|
||||
// xactNo is a transaction number.
|
||||
// Negative xactNo eg. -1 indicates an active chunk,
|
||||
// otherwise produce temporary chunk name.
|
||||
// xactID is a transaction identifier. Empty xactID denotes active chunk,
|
||||
// otherwise temporary chunk name is produced.
|
||||
//
|
||||
func (f *Fs) makeChunkName(mainPath string, chunkNo int, ctrlType string, xactNo int64) string {
|
||||
dir, mainName := path.Split(mainPath)
|
||||
var name string
|
||||
func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string) string {
|
||||
dir, parentName := path.Split(filePath)
|
||||
var name, tempSuffix string
|
||||
switch {
|
||||
case chunkNo >= 0 && ctrlType == "":
|
||||
name = fmt.Sprintf(f.dataNameFmt, mainName, chunkNo+f.opt.StartFrom)
|
||||
name = fmt.Sprintf(f.dataNameFmt, parentName, chunkNo+f.opt.StartFrom)
|
||||
case chunkNo < 0 && ctrlTypeRegexp.MatchString(ctrlType):
|
||||
name = fmt.Sprintf(f.ctrlNameFmt, mainName, ctrlType)
|
||||
name = fmt.Sprintf(f.ctrlNameFmt, parentName, ctrlType)
|
||||
default:
|
||||
panic("makeChunkName: invalid argument") // must not produce something we can't consume
|
||||
}
|
||||
if xactNo >= 0 {
|
||||
name = fmt.Sprintf(tempChunkFormat, name, xactNo)
|
||||
if xactID != "" {
|
||||
tempSuffix = fmt.Sprintf(tempSuffixFormat, xactID)
|
||||
if !tempSuffixRegexp.MatchString(tempSuffix) {
|
||||
panic("makeChunkName: invalid argument")
|
||||
}
|
||||
}
|
||||
return dir + name
|
||||
return dir + name + tempSuffix
|
||||
}
|
||||
|
||||
// parseChunkName checks whether given file path belongs to
|
||||
@@ -470,20 +489,21 @@ func (f *Fs) makeChunkName(mainPath string, chunkNo int, ctrlType string, xactNo
|
||||
//
|
||||
// filePath can be name, relative or absolute path of a file.
|
||||
//
|
||||
// Returned mainPath is a non-empty string if valid chunk name
|
||||
// is detected or "" if it's not a chunk.
|
||||
// Returned parentPath is path of the composite file owning the chunk.
|
||||
// It's a non-empty string if valid chunk name is detected
|
||||
// or "" if it's not a chunk.
|
||||
// Other returned values depend on detected chunk type:
|
||||
// data or control, active or temporary:
|
||||
//
|
||||
// data chunk - the returned chunkNo is non-negative and ctrlType is ""
|
||||
// control chunk - the chunkNo is -1 and ctrlType is non-empty string
|
||||
// active chunk - the returned xactNo is -1
|
||||
// temporary chunk - the xactNo is non-negative integer
|
||||
func (f *Fs) parseChunkName(filePath string) (mainPath string, chunkNo int, ctrlType string, xactNo int64) {
|
||||
// control chunk - the chunkNo is -1 and ctrlType is a non-empty string
|
||||
// active chunk - the returned xactID is ""
|
||||
// temporary chunk - the xactID is a non-empty string
|
||||
func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ctrlType, xactID string) {
|
||||
dir, name := path.Split(filePath)
|
||||
match := f.nameRegexp.FindStringSubmatch(name)
|
||||
if match == nil || match[1] == "" {
|
||||
return "", -1, "", -1
|
||||
return "", -1, "", ""
|
||||
}
|
||||
var err error
|
||||
|
||||
@@ -494,19 +514,26 @@ func (f *Fs) parseChunkName(filePath string) (mainPath string, chunkNo int, ctrl
|
||||
}
|
||||
if chunkNo -= f.opt.StartFrom; chunkNo < 0 {
|
||||
fs.Infof(f, "invalid data chunk number in file %q", name)
|
||||
return "", -1, "", -1
|
||||
return "", -1, "", ""
|
||||
}
|
||||
}
|
||||
|
||||
xactNo = -1
|
||||
if match[4] != "" {
|
||||
if xactNo, err = strconv.ParseInt(match[4], 10, 64); err != nil || xactNo < 0 {
|
||||
fs.Infof(f, "invalid transaction number in file %q", name)
|
||||
return "", -1, "", -1
|
||||
xactID = match[4]
|
||||
}
|
||||
if match[5] != "" {
|
||||
// old-style temporary suffix
|
||||
number, err := strconv.ParseInt(match[5], 10, 64)
|
||||
if err != nil || number < 0 {
|
||||
fs.Infof(f, "invalid old-style transaction number in file %q", name)
|
||||
return "", -1, "", ""
|
||||
}
|
||||
// convert old-style transaction number to base-36 transaction ID
|
||||
xactID = fmt.Sprintf(tempSuffixFormat, strconv.FormatInt(number, 36))
|
||||
xactID = xactID[1:] // strip leading underscore
|
||||
}
|
||||
|
||||
mainPath = dir + match[1]
|
||||
parentPath = dir + match[1]
|
||||
ctrlType = match[3]
|
||||
return
|
||||
}
|
||||
@@ -514,17 +541,74 @@ func (f *Fs) parseChunkName(filePath string) (mainPath string, chunkNo int, ctrl
|
||||
// forbidChunk prints error message or raises error if file is chunk.
|
||||
// First argument sets log prefix, use `false` to suppress message.
|
||||
func (f *Fs) forbidChunk(o interface{}, filePath string) error {
|
||||
if mainPath, _, _, _ := f.parseChunkName(filePath); mainPath != "" {
|
||||
if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" {
|
||||
if f.opt.FailHard {
|
||||
return fmt.Errorf("chunk overlap with %q", mainPath)
|
||||
return fmt.Errorf("chunk overlap with %q", parentPath)
|
||||
}
|
||||
if boolVal, isBool := o.(bool); !isBool || boolVal {
|
||||
fs.Errorf(o, "chunk overlap with %q", mainPath)
|
||||
fs.Errorf(o, "chunk overlap with %q", parentPath)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// newXactID produces a sufficiently random transaction identifier.
|
||||
//
|
||||
// The temporary suffix mask allows identifiers consisting of 4-9
|
||||
// base-36 digits (ie. digits 0-9 or lowercase letters a-z).
|
||||
// The identifiers must be unique between transactions running on
|
||||
// the single file in parallel.
|
||||
//
|
||||
// Currently the function produces 6-character identifiers.
|
||||
// Together with underscore this makes a 7-character temporary suffix.
|
||||
//
|
||||
// The first 4 characters isolate groups of transactions by time intervals.
|
||||
// The maximum length of interval is base-36 "zzzz" ie. 1,679,615 seconds.
|
||||
// The function rather takes a maximum prime closest to this number
|
||||
// (see https://primes.utm.edu) as the interval length to better safeguard
|
||||
// against repeating pseudo-random sequences in cases when rclone is
|
||||
// invoked from a periodic scheduler like unix cron.
|
||||
// Thus, the interval is slightly more than 19 days 10 hours 33 minutes.
|
||||
//
|
||||
// The remaining 2 base-36 digits (in the range from 0 to 1295 inclusive)
|
||||
// are taken from the local random source.
|
||||
// This provides about 0.1% collision probability for two parallel
|
||||
// operations started at the same second and working on the same file.
|
||||
//
|
||||
// Non-empty filePath argument enables probing for existing temporary chunk
|
||||
// to further eliminate collisions.
|
||||
func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err error) {
|
||||
const closestPrimeZzzzSeconds = 1679609
|
||||
const maxTwoBase36Digits = 1295
|
||||
|
||||
unixSec := time.Now().Unix()
|
||||
if unixSec < 0 {
|
||||
unixSec = -unixSec // unlikely but the number must be positive
|
||||
}
|
||||
circleSec := unixSec % closestPrimeZzzzSeconds
|
||||
first4chars := strconv.FormatInt(circleSec, 36)
|
||||
|
||||
for tries := 0; tries < maxTransactionProbes; tries++ {
|
||||
f.xactIDMutex.Lock()
|
||||
randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1)
|
||||
f.xactIDMutex.Unlock()
|
||||
|
||||
last2chars := strconv.FormatInt(randomness, 36)
|
||||
xactID = fmt.Sprintf("%04s%02s", first4chars, last2chars)
|
||||
|
||||
if filePath == "" {
|
||||
return
|
||||
}
|
||||
probeChunk := f.makeChunkName(filePath, 0, "", xactID)
|
||||
_, probeErr := f.base.NewObject(ctx, probeChunk)
|
||||
if probeErr != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("can't setup transaction for %s", filePath)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries.
|
||||
// The entries can be returned in any order but should be
|
||||
// for a complete directory.
|
||||
@@ -602,8 +686,8 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
||||
switch entry := dirOrObject.(type) {
|
||||
case fs.Object:
|
||||
remote := entry.Remote()
|
||||
if mainRemote, chunkNo, ctrlType, xactNo := f.parseChunkName(remote); mainRemote != "" {
|
||||
if xactNo != -1 {
|
||||
if mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(remote); mainRemote != "" {
|
||||
if xactID != "" {
|
||||
if revealHidden {
|
||||
fs.Infof(f, "ignore temporary chunk %q", remote)
|
||||
}
|
||||
@@ -686,7 +770,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
||||
//
|
||||
// Please note that every NewObject invocation will scan the whole directory.
|
||||
// Using here something like fs.DirCache might improve performance
|
||||
// (but will make logic more complex, though).
|
||||
// (yet making the logic more complex).
|
||||
//
|
||||
// Note that chunker prefers analyzing file names rather than reading
|
||||
// the content of meta object assuming that directory scans are fast
|
||||
@@ -752,8 +836,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
if !strings.Contains(entryRemote, remote) {
|
||||
continue // bypass regexp to save cpu
|
||||
}
|
||||
mainRemote, chunkNo, ctrlType, xactNo := f.parseChunkName(entryRemote)
|
||||
if mainRemote == "" || mainRemote != remote || ctrlType != "" || xactNo != -1 {
|
||||
mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(entryRemote)
|
||||
if mainRemote == "" || mainRemote != remote || ctrlType != "" || xactID != "" {
|
||||
continue // skip non-conforming, temporary and control chunks
|
||||
}
|
||||
//fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
|
||||
@@ -786,7 +870,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// This is either a composite object with metadata or a non-chunked
|
||||
// file without metadata. Validate it and update the total data size.
|
||||
// As an optimization, skip metadata reading here - we will call
|
||||
// readMetadata lazily when needed.
|
||||
// readMetadata lazily when needed (reading can be expensive).
|
||||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -843,14 +927,11 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
|
||||
}
|
||||
}()
|
||||
|
||||
// Use system timer as a trivial source of transaction numbers,
|
||||
// don't try hard to safeguard against chunk collisions between
|
||||
// parallel transactions.
|
||||
xactNo := time.Now().Unix()
|
||||
if xactNo < 0 {
|
||||
xactNo = -xactNo // unlikely but transaction number must be positive
|
||||
}
|
||||
baseRemote := remote
|
||||
xactID, errXact := f.newXactID(ctx, baseRemote)
|
||||
if errXact != nil {
|
||||
return nil, errXact
|
||||
}
|
||||
|
||||
// Transfer chunks data
|
||||
for c.chunkNo = 0; !c.done; c.chunkNo++ {
|
||||
@@ -858,7 +939,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
|
||||
return nil, ErrChunkOverflow
|
||||
}
|
||||
|
||||
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactNo)
|
||||
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID)
|
||||
size := c.sizeLeft
|
||||
if size > c.chunkSize {
|
||||
size = c.chunkSize
|
||||
@@ -962,7 +1043,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
|
||||
|
||||
// Rename data chunks from temporary to final names
|
||||
for chunkNo, chunk := range c.chunks {
|
||||
chunkRemote := f.makeChunkName(baseRemote, chunkNo, "", -1)
|
||||
chunkRemote := f.makeChunkName(baseRemote, chunkNo, "", "")
|
||||
chunkMoved, errMove := f.baseMove(ctx, chunk, chunkRemote, delFailed)
|
||||
if errMove != nil {
|
||||
return nil, errMove
|
||||
@@ -1221,11 +1302,6 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
return f.newObject("", o, nil), nil
|
||||
}
|
||||
|
||||
// Precision returns the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return f.base.Precision()
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
// Chunker advertises a hash type if and only if it can be calculated
|
||||
// for files of any size, non-chunked or composite.
|
||||
@@ -1613,8 +1689,8 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
//fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
||||
if entryType == fs.EntryObject {
|
||||
mainPath, _, _, xactNo := f.parseChunkName(path)
|
||||
if mainPath != "" && xactNo == -1 {
|
||||
mainPath, _, _, xactID := f.parseChunkName(path)
|
||||
if mainPath != "" && xactID == "" {
|
||||
path = mainPath
|
||||
}
|
||||
}
|
||||
@@ -2063,7 +2139,7 @@ type metaSimpleJSON struct {
|
||||
// Current implementation creates metadata in three cases:
|
||||
// - for files larger than chunk size
|
||||
// - if file contents can be mistaken as meta object
|
||||
// - if consistent hashing is on but wrapped remote can't provide given hash
|
||||
// - if consistent hashing is On but wrapped remote can't provide given hash
|
||||
//
|
||||
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1 string) ([]byte, error) {
|
||||
version := metadataVersion
|
||||
@@ -2177,6 +2253,11 @@ func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Chunked '%s:%s'", f.name, f.root)
|
||||
}
|
||||
|
||||
// Precision returns the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return f.base.Precision()
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
|
||||
@@ -64,35 +64,40 @@ func testChunkNameFormat(t *testing.T, f *Fs) {
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType string, xactNo int64) {
|
||||
gotChunkName := f.makeChunkName(mainName, chunkNo, ctrlType, xactNo)
|
||||
assert.Equal(t, wantChunkName, gotChunkName)
|
||||
assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType, xactID string) {
|
||||
gotChunkName := ""
|
||||
assert.NotPanics(t, func() {
|
||||
gotChunkName = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
|
||||
}, "makeChunkName(%q,%d,%q,%q) must not panic", mainName, chunkNo, ctrlType, xactID)
|
||||
if gotChunkName != "" {
|
||||
assert.Equal(t, wantChunkName, gotChunkName)
|
||||
}
|
||||
}
|
||||
|
||||
assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType string, xactNo int64) {
|
||||
assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType, xactID string) {
|
||||
assert.Panics(t, func() {
|
||||
_ = f.makeChunkName(mainName, chunkNo, ctrlType, xactNo)
|
||||
}, "makeChunkName(%q,%d,%q,%d) should panic", mainName, chunkNo, ctrlType, xactNo)
|
||||
_ = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
|
||||
}, "makeChunkName(%q,%d,%q,%q) should panic", mainName, chunkNo, ctrlType, xactID)
|
||||
}
|
||||
|
||||
assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType string, wantXactNo int64) {
|
||||
gotMainName, gotChunkNo, gotCtrlType, gotXactNo := f.parseChunkName(fileName)
|
||||
assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType, wantXactID string) {
|
||||
gotMainName, gotChunkNo, gotCtrlType, gotXactID := f.parseChunkName(fileName)
|
||||
assert.Equal(t, wantMainName, gotMainName)
|
||||
assert.Equal(t, wantChunkNo, gotChunkNo)
|
||||
assert.Equal(t, wantCtrlType, gotCtrlType)
|
||||
assert.Equal(t, wantXactNo, gotXactNo)
|
||||
assert.Equal(t, wantXactID, gotXactID)
|
||||
}
|
||||
|
||||
const newFormatSupported = false // support for patterns not starting with base name (*)
|
||||
|
||||
// valid formats
|
||||
assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z]{3,9}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
||||
assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
||||
assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
||||
assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
||||
assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
||||
if newFormatSupported {
|
||||
assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z]{3,9})),(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z][a-z0-9]{2,6})),(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
||||
}
|
||||
|
||||
// invalid formats
|
||||
@@ -111,142 +116,223 @@ func testChunkNameFormat(t *testing.T, f *Fs) {
|
||||
|
||||
// quick tests
|
||||
if newFormatSupported {
|
||||
assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9][0-9a-z]{3,8})\.\.tmp_([0-9]{10,13}))?$`)
|
||||
f.opt.StartFrom = 1
|
||||
|
||||
assertMakeName(`part_fish_1`, "fish", 0, "", -1)
|
||||
assertParseName(`part_fish_43`, "fish", 42, "", -1)
|
||||
assertMakeName(`part_fish_3..tmp_0000000004`, "fish", 2, "", 4)
|
||||
assertParseName(`part_fish_4..tmp_0000000005`, "fish", 3, "", 5)
|
||||
assertMakeName(`part_fish__locks`, "fish", -2, "locks", -3)
|
||||
assertParseName(`part_fish__locks`, "fish", -1, "locks", -1)
|
||||
assertMakeName(`part_fish__blockinfo..tmp_1234567890123456789`, "fish", -3, "blockinfo", 1234567890123456789)
|
||||
assertParseName(`part_fish__blockinfo..tmp_1234567890123456789`, "fish", -1, "blockinfo", 1234567890123456789)
|
||||
assertMakeName(`part_fish_1`, "fish", 0, "", "")
|
||||
assertParseName(`part_fish_43`, "fish", 42, "", "")
|
||||
assertMakeName(`part_fish__locks`, "fish", -2, "locks", "")
|
||||
assertParseName(`part_fish__locks`, "fish", -1, "locks", "")
|
||||
assertMakeName(`part_fish__x2y`, "fish", -2, "x2y", "")
|
||||
assertParseName(`part_fish__x2y`, "fish", -1, "x2y", "")
|
||||
assertMakeName(`part_fish_3_0004`, "fish", 2, "", "4")
|
||||
assertParseName(`part_fish_4_0005`, "fish", 3, "", "0005")
|
||||
assertMakeName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -3, "blkinfo", "jj5fvo3wr")
|
||||
assertParseName(`part_fish__blkinfo_zz9fvo3wr`, "fish", -1, "blkinfo", "zz9fvo3wr")
|
||||
|
||||
// old-style temporary suffix (parse only)
|
||||
assertParseName(`part_fish_4..tmp_0000000011`, "fish", 3, "", "000b")
|
||||
assertParseName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -1, "blkinfo", "jj5fvo3wr")
|
||||
}
|
||||
|
||||
// prepare format for long tests
|
||||
assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
||||
f.opt.StartFrom = 2
|
||||
|
||||
// valid data chunks
|
||||
assertMakeName(`fish.chunk.003`, "fish", 1, "", -1)
|
||||
assertMakeName(`fish.chunk.011..tmp_0000054321`, "fish", 9, "", 54321)
|
||||
assertMakeName(`fish.chunk.011..tmp_1234567890`, "fish", 9, "", 1234567890)
|
||||
assertMakeName(`fish.chunk.1916..tmp_123456789012345`, "fish", 1914, "", 123456789012345)
|
||||
assertMakeName(`fish.chunk.003`, "fish", 1, "", "")
|
||||
assertParseName(`fish.chunk.003`, "fish", 1, "", "")
|
||||
assertMakeName(`fish.chunk.021`, "fish", 19, "", "")
|
||||
assertParseName(`fish.chunk.021`, "fish", 19, "", "")
|
||||
|
||||
assertParseName(`fish.chunk.003`, "fish", 1, "", -1)
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021`, "fish", 2, "", 21)
|
||||
assertParseName(`fish.chunk.021`, "fish", 19, "", -1)
|
||||
assertParseName(`fish.chunk.323..tmp_1234567890123456789`, "fish", 321, "", 1234567890123456789)
|
||||
// valid temporary data chunks
|
||||
assertMakeName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
|
||||
assertParseName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
|
||||
assertMakeName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
|
||||
assertParseName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
|
||||
assertMakeName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
|
||||
assertParseName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
|
||||
assertMakeName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
|
||||
assertParseName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
|
||||
|
||||
// valid temporary data chunks (old temporary suffix, only parse)
|
||||
assertParseName(`fish.chunk.004..tmp_0000000047`, "fish", 2, "", "001b")
|
||||
assertParseName(`fish.chunk.323..tmp_9994567890123`, "fish", 321, "", "3jjfvo3wr")
|
||||
|
||||
// parsing invalid data chunk names
|
||||
assertParseName(`fish.chunk.3`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.001`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.21`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.-21`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.3`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.001`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.21`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.-21`, "", -1, "", "")
|
||||
|
||||
assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.004abcd`, "", -1, "", "") // missing underscore delimiter
|
||||
assertParseName(`fish.chunk.004__1234`, "", -1, "", "") // extra underscore delimiter
|
||||
assertParseName(`fish.chunk.004_123`, "", -1, "", "") // too short temporary suffix
|
||||
assertParseName(`fish.chunk.004_1234567890`, "", -1, "", "") // too long temporary suffix
|
||||
assertParseName(`fish.chunk.004_-1234`, "", -1, "", "") // temporary suffix must be positive
|
||||
assertParseName(`fish.chunk.004_123E`, "", -1, "", "") // uppercase not allowed
|
||||
assertParseName(`fish.chunk.004_12.3`, "", -1, "", "") // punctuation not allowed
|
||||
|
||||
// parsing invalid data chunk names (old temporary suffix)
|
||||
assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.323..tmp_12345678901234`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", "")
|
||||
|
||||
// valid control chunks
|
||||
assertMakeName(`fish.chunk._info`, "fish", -1, "info", -1)
|
||||
assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", -1)
|
||||
assertMakeName(`fish.chunk._blockinfo`, "fish", -3, "blockinfo", -1)
|
||||
assertMakeName(`fish.chunk._info`, "fish", -1, "info", "")
|
||||
assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", "")
|
||||
assertMakeName(`fish.chunk._blkinfo`, "fish", -3, "blkinfo", "")
|
||||
assertMakeName(`fish.chunk._x2y`, "fish", -4, "x2y", "")
|
||||
|
||||
assertParseName(`fish.chunk._info`, "fish", -1, "info", -1)
|
||||
assertParseName(`fish.chunk._locks`, "fish", -1, "locks", -1)
|
||||
assertParseName(`fish.chunk._blockinfo`, "fish", -1, "blockinfo", -1)
|
||||
assertParseName(`fish.chunk._info`, "fish", -1, "info", "")
|
||||
assertParseName(`fish.chunk._locks`, "fish", -1, "locks", "")
|
||||
assertParseName(`fish.chunk._blkinfo`, "fish", -1, "blkinfo", "")
|
||||
assertParseName(`fish.chunk._x2y`, "fish", -1, "x2y", "")
|
||||
|
||||
// valid temporary control chunks
|
||||
assertMakeName(`fish.chunk._info..tmp_0000000021`, "fish", -1, "info", 21)
|
||||
assertMakeName(`fish.chunk._locks..tmp_0000054321`, "fish", -2, "locks", 54321)
|
||||
assertMakeName(`fish.chunk._uploads..tmp_0000000000`, "fish", -3, "uploads", 0)
|
||||
assertMakeName(`fish.chunk._blockinfo..tmp_1234567890123456789`, "fish", -4, "blockinfo", 1234567890123456789)
|
||||
assertMakeName(`fish.chunk._info_0001`, "fish", -1, "info", "1")
|
||||
assertMakeName(`fish.chunk._locks_4321`, "fish", -2, "locks", "4321")
|
||||
assertMakeName(`fish.chunk._uploads_abcd`, "fish", -3, "uploads", "abcd")
|
||||
assertMakeName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -4, "blkinfo", "xyzabcdef")
|
||||
assertMakeName(`fish.chunk._x2y_1aaa`, "fish", -5, "x2y", "1aaa")
|
||||
|
||||
assertParseName(`fish.chunk._info..tmp_0000000021`, "fish", -1, "info", 21)
|
||||
assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", 54321)
|
||||
assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", 0)
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789`, "fish", -1, "blockinfo", 1234567890123456789)
|
||||
assertParseName(`fish.chunk._info_0001`, "fish", -1, "info", "0001")
|
||||
assertParseName(`fish.chunk._locks_4321`, "fish", -1, "locks", "4321")
|
||||
assertParseName(`fish.chunk._uploads_9abc`, "fish", -1, "uploads", "9abc")
|
||||
assertParseName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -1, "blkinfo", "xyzabcdef")
|
||||
assertParseName(`fish.chunk._x2y_1aaa`, "fish", -1, "x2y", "1aaa")
|
||||
|
||||
// valid temporary control chunks (old temporary suffix, parse only)
|
||||
assertParseName(`fish.chunk._info..tmp_0000000047`, "fish", -1, "info", "001b")
|
||||
assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", "15wx")
|
||||
assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", "0000")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123`, "fish", -1, "blkinfo", "3jjfvo3wr")
|
||||
assertParseName(`fish.chunk._x2y..tmp_0000000000`, "fish", -1, "x2y", "0000")
|
||||
|
||||
// parsing invalid control chunk names
|
||||
assertParseName(`fish.chunk.info`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.locks`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.uploads`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.blockinfo`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.metadata`, "", -1, "", "") // must be prepended by underscore
|
||||
assertParseName(`fish.chunk.info`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.locks`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.uploads`, "", -1, "", "")
|
||||
|
||||
assertParseName(`fish.chunk._os`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._futuredata`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._me_ta`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._in-fo`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._.bin`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._os`, "", -1, "", "") // too short
|
||||
assertParseName(`fish.chunk._metadata`, "", -1, "", "") // too long
|
||||
assertParseName(`fish.chunk._blockinfo`, "", -1, "", "") // way too long
|
||||
assertParseName(`fish.chunk._4me`, "", -1, "", "") // cannot start with digit
|
||||
assertParseName(`fish.chunk._567`, "", -1, "", "") // cannot be all digits
|
||||
assertParseName(`fish.chunk._me_ta`, "", -1, "", "") // punctuation not allowed
|
||||
assertParseName(`fish.chunk._in-fo`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk._.bin`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk._.2xy`, "", -1, "", "")
|
||||
|
||||
assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", -1)
|
||||
// parsing invalid temporary control chunks
|
||||
assertParseName(`fish.chunk._blkinfo1234`, "", -1, "", "") // missing underscore delimiter
|
||||
assertParseName(`fish.chunk._info__1234`, "", -1, "", "") // extra underscore delimiter
|
||||
assertParseName(`fish.chunk._info_123`, "", -1, "", "") // too short temporary suffix
|
||||
assertParseName(`fish.chunk._info_1234567890`, "", -1, "", "") // too long temporary suffix
|
||||
assertParseName(`fish.chunk._info_-1234`, "", -1, "", "") // temporary suffix must be positive
|
||||
assertParseName(`fish.chunk._info_123E`, "", -1, "", "") // uppercase not allowed
|
||||
assertParseName(`fish.chunk._info_12.3`, "", -1, "", "") // punctuation not allowed
|
||||
|
||||
assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", "")
|
||||
|
||||
// short control chunk names: 3 letters ok, 1-2 letters not allowed
|
||||
assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", -1)
|
||||
assertMakeName(`fish.chunk._ext..tmp_0000000021`, "fish", -1, "ext", 21)
|
||||
assertParseName(`fish.chunk._int`, "fish", -1, "int", -1)
|
||||
assertParseName(`fish.chunk._int..tmp_0000000021`, "fish", -1, "int", 21)
|
||||
assertMakeNamePanics("fish", -1, "in", -1)
|
||||
assertMakeNamePanics("fish", -1, "up", 4)
|
||||
assertMakeNamePanics("fish", -1, "x", -1)
|
||||
assertMakeNamePanics("fish", -1, "c", 4)
|
||||
assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", "")
|
||||
assertParseName(`fish.chunk._int`, "fish", -1, "int", "")
|
||||
|
||||
assertMakeNamePanics("fish", -1, "in", "")
|
||||
assertMakeNamePanics("fish", -1, "up", "4")
|
||||
assertMakeNamePanics("fish", -1, "x", "")
|
||||
assertMakeNamePanics("fish", -1, "c", "1z")
|
||||
|
||||
assertMakeName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0")
|
||||
assertMakeName(`fish.chunk._ext_0026`, "fish", -1, "ext", "26")
|
||||
assertMakeName(`fish.chunk._int_0abc`, "fish", -1, "int", "abc")
|
||||
assertMakeName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
|
||||
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
|
||||
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
|
||||
|
||||
assertParseName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0000")
|
||||
assertParseName(`fish.chunk._ext_0026`, "fish", -1, "ext", "0026")
|
||||
assertParseName(`fish.chunk._int_0abc`, "fish", -1, "int", "0abc")
|
||||
assertParseName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
|
||||
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
|
||||
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
|
||||
|
||||
// base file name can sometimes look like a valid chunk name
|
||||
assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", -1)
|
||||
assertParseName(`fish.chunk.003.chunk.005..tmp_0000000021`, "fish.chunk.003", 3, "", 21)
|
||||
assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", -1)
|
||||
assertParseName(`fish.chunk.003.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk.003", -1, "blockinfo", 1234567890123456789)
|
||||
assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", "")
|
||||
assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", "")
|
||||
assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", "")
|
||||
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", -1)
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000021`, "fish.chunk.004..tmp_0000000021", 3, "", 21)
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", -1)
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk.004..tmp_0000000021", -1, "blockinfo", 1234567890123456789)
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", "")
|
||||
assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", "")
|
||||
assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", "")
|
||||
|
||||
assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", -1)
|
||||
assertParseName(`fish.chunk._info.chunk.005..tmp_0000000021`, "fish.chunk._info", 3, "", 21)
|
||||
assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", -1)
|
||||
assertParseName(`fish.chunk._info.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk._info", -1, "blockinfo", 1234567890123456789)
|
||||
assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", -1)
|
||||
// base file name looking like a valid chunk name (old temporary suffix)
|
||||
assertParseName(`fish.chunk.003.chunk.005..tmp_0000000022`, "fish.chunk.003", 3, "", "000m")
|
||||
assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk._info.chunk.005..tmp_0000000023`, "fish.chunk._info", 3, "", "000n")
|
||||
assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
|
||||
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blockinfo..tmp_1234567890123456789", 2, "", -1)
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk.005..tmp_0000000021`, "fish.chunk._blockinfo..tmp_1234567890123456789", 3, "", 21)
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blockinfo..tmp_1234567890123456789", -1, "info", -1)
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk._blockinfo..tmp_1234567890123456789", -1, "blockinfo", 1234567890123456789)
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.003.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.003", -1, "blkinfo", "3jjfvo3wr")
|
||||
assertParseName(`fish.chunk._info.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._info", -1, "blkinfo", "3jjfvo3wr")
|
||||
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", "")
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000025`, "fish.chunk.004..tmp_0000000021", 3, "", "000p")
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", "")
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.004..tmp_0000000021", -1, "blkinfo", "3jjfvo3wr")
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", "")
|
||||
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.004`, "fish.chunk._blkinfo..tmp_9994567890123", 2, "", "")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.005..tmp_0000000026`, "fish.chunk._blkinfo..tmp_9994567890123", 3, "", "000q")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "info", "")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "blkinfo", "3jjfvo3wr")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._Meta`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
|
||||
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blkinfo..tmp_1234567890123456789", 2, "", "")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.005..tmp_0000000022`, "fish.chunk._blkinfo..tmp_1234567890123456789", 3, "", "000m")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "info", "")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "blkinfo", "3jjfvo3wr")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
|
||||
|
||||
// attempts to make invalid chunk names
|
||||
assertMakeNamePanics("fish", -1, "", -1) // neither data nor control
|
||||
assertMakeNamePanics("fish", 0, "info", -1) // both data and control
|
||||
assertMakeNamePanics("fish", -1, "futuredata", -1) // control type too long
|
||||
assertMakeNamePanics("fish", -1, "123", -1) // digits not allowed
|
||||
assertMakeNamePanics("fish", -1, "Meta", -1) // only lower case letters allowed
|
||||
assertMakeNamePanics("fish", -1, "in-fo", -1) // punctuation not allowed
|
||||
assertMakeNamePanics("fish", -1, "_info", -1)
|
||||
assertMakeNamePanics("fish", -1, "info_", -1)
|
||||
assertMakeNamePanics("fish", -2, ".bind", -3)
|
||||
assertMakeNamePanics("fish", -2, "bind.", -3)
|
||||
assertMakeNamePanics("fish", -1, "", "") // neither data nor control
|
||||
assertMakeNamePanics("fish", 0, "info", "") // both data and control
|
||||
assertMakeNamePanics("fish", -1, "metadata", "") // control type too long
|
||||
assertMakeNamePanics("fish", -1, "blockinfo", "") // control type way too long
|
||||
assertMakeNamePanics("fish", -1, "2xy", "") // first digit not allowed
|
||||
assertMakeNamePanics("fish", -1, "123", "") // all digits not allowed
|
||||
assertMakeNamePanics("fish", -1, "Meta", "") // only lower case letters allowed
|
||||
assertMakeNamePanics("fish", -1, "in-fo", "") // punctuation not allowed
|
||||
assertMakeNamePanics("fish", -1, "_info", "")
|
||||
assertMakeNamePanics("fish", -1, "info_", "")
|
||||
assertMakeNamePanics("fish", -2, ".bind", "")
|
||||
assertMakeNamePanics("fish", -2, "bind.", "")
|
||||
|
||||
assertMakeNamePanics("fish", -1, "", 1) // neither data nor control
|
||||
assertMakeNamePanics("fish", 0, "info", 12) // both data and control
|
||||
assertMakeNamePanics("fish", -1, "futuredata", 45) // control type too long
|
||||
assertMakeNamePanics("fish", -1, "123", 123) // digits not allowed
|
||||
assertMakeNamePanics("fish", -1, "Meta", 456) // only lower case letters allowed
|
||||
assertMakeNamePanics("fish", -1, "in-fo", 321) // punctuation not allowed
|
||||
assertMakeNamePanics("fish", -1, "_info", 15678)
|
||||
assertMakeNamePanics("fish", -1, "info_", 999)
|
||||
assertMakeNamePanics("fish", -2, ".bind", 0)
|
||||
assertMakeNamePanics("fish", -2, "bind.", 0)
|
||||
assertMakeNamePanics("fish", -1, "", "1") // neither data nor control
|
||||
assertMakeNamePanics("fish", 0, "info", "23") // both data and control
|
||||
assertMakeNamePanics("fish", -1, "metadata", "45") // control type too long
|
||||
assertMakeNamePanics("fish", -1, "blockinfo", "7") // control type way too long
|
||||
assertMakeNamePanics("fish", -1, "2xy", "abc") // first digit not allowed
|
||||
assertMakeNamePanics("fish", -1, "123", "def") // all digits not allowed
|
||||
assertMakeNamePanics("fish", -1, "Meta", "mnk") // only lower case letters allowed
|
||||
assertMakeNamePanics("fish", -1, "in-fo", "xyz") // punctuation not allowed
|
||||
assertMakeNamePanics("fish", -1, "_info", "5678")
|
||||
assertMakeNamePanics("fish", -1, "info_", "999")
|
||||
assertMakeNamePanics("fish", -2, ".bind", "0")
|
||||
assertMakeNamePanics("fish", -2, "bind.", "0")
|
||||
|
||||
assertMakeNamePanics("fish", 0, "", "1234567890") // temporary suffix too long
|
||||
assertMakeNamePanics("fish", 0, "", "123F4") // uppercase not allowed
|
||||
assertMakeNamePanics("fish", 0, "", "123.") // punctuation not allowed
|
||||
assertMakeNamePanics("fish", 0, "", "_123")
|
||||
}
|
||||
|
||||
func testSmallFileInternals(t *testing.T, f *Fs) {
|
||||
@@ -383,7 +469,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
billyObj := newFile("billy")
|
||||
|
||||
billyChunkName := func(chunkNo int) string {
|
||||
return f.makeChunkName(billyObj.Remote(), chunkNo, "", -1)
|
||||
return f.makeChunkName(billyObj.Remote(), chunkNo, "", "")
|
||||
}
|
||||
|
||||
err := f.Mkdir(ctx, billyChunkName(1))
|
||||
@@ -433,7 +519,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
|
||||
// recreate billy in case it was anyhow corrupted
|
||||
willyObj := newFile("willy")
|
||||
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", -1)
|
||||
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", "")
|
||||
f.opt.FailHard = false
|
||||
willyChunk, err := f.NewObject(ctx, willyChunkName)
|
||||
f.opt.FailHard = true
|
||||
@@ -484,7 +570,7 @@ func testChunkNumberOverflow(t *testing.T, f *Fs) {
|
||||
|
||||
f.opt.FailHard = false
|
||||
file, fileName := newFile(f, "wreaker")
|
||||
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", -1))
|
||||
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", ""))
|
||||
|
||||
f.opt.FailHard = false
|
||||
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
|
||||
@@ -532,7 +618,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
||||
filename := path.Join(dir, name)
|
||||
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
|
||||
|
||||
part := putFile(f.base, f.makeChunkName(filename, 0, "", -1), "oops", "", true)
|
||||
part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
|
||||
_ = putFile(f, filename, contents, "upload "+description, false)
|
||||
|
||||
obj, err := f.NewObject(ctx, filename)
|
||||
|
||||
@@ -63,6 +63,7 @@ func init() {
|
||||
Name: "password",
|
||||
Help: "Password or pass phrase for encryption.",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password2",
|
||||
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||
|
||||
@@ -326,6 +326,17 @@ Photos folder" option in your google drive settings. You can then copy
|
||||
or move the photos locally and use the date the image was taken
|
||||
(created) set as the modification date.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_shared_date",
|
||||
Default: false,
|
||||
Help: `Use date file was shared instead of modified date.
|
||||
|
||||
Note that, as with "--drive-use-created-date", this flag may have
|
||||
unexpected consequences when uploading/downloading files.
|
||||
|
||||
If both this flag and "--drive-use-created-date" are set, the created
|
||||
date is used.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Default: 1000,
|
||||
@@ -463,6 +474,7 @@ type Options struct {
|
||||
ImportExtensions string `config:"import_formats"`
|
||||
AllowImportNameChange bool `config:"allow_import_name_change"`
|
||||
UseCreatedDate bool `config:"use_created_date"`
|
||||
UseSharedDate bool `config:"use_shared_date"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
AlternateExport bool `config:"alternate_export"`
|
||||
@@ -694,6 +706,9 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
if f.opt.AuthOwnerOnly {
|
||||
fields += ",owners"
|
||||
}
|
||||
if f.opt.UseSharedDate {
|
||||
fields += ",sharedWithMeTime"
|
||||
}
|
||||
if f.opt.SkipChecksumGphotos {
|
||||
fields += ",spaces"
|
||||
}
|
||||
@@ -830,7 +845,7 @@ func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name
|
||||
} else {
|
||||
fmt.Printf("Change current team drive ID %q?\n", opt.TeamDriveID)
|
||||
}
|
||||
if !config.Confirm() {
|
||||
if !config.Confirm(false) {
|
||||
return nil
|
||||
}
|
||||
client, err := createOAuthClient(opt, name, m)
|
||||
@@ -1021,16 +1036,22 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// set root folder for a team drive or query the user root folder
|
||||
if f.isTeamDrive {
|
||||
f.rootFolderID = f.opt.TeamDriveID
|
||||
} else if opt.RootFolderID != "" {
|
||||
if opt.RootFolderID != "" {
|
||||
// override root folder if set or cached in the config
|
||||
f.rootFolderID = opt.RootFolderID
|
||||
} else if f.isTeamDrive {
|
||||
f.rootFolderID = f.opt.TeamDriveID
|
||||
} else {
|
||||
// Look up the root ID and cache it in the config
|
||||
rootID, err := f.getRootID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
// 404 means that this scope does not have permission to get the
|
||||
// root so just use "root"
|
||||
rootID = "root"
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
f.rootFolderID = rootID
|
||||
m.Set("root_folder_id", rootID)
|
||||
@@ -1089,6 +1110,8 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
|
||||
modifiedDate := info.ModifiedTime
|
||||
if f.opt.UseCreatedDate {
|
||||
modifiedDate = info.CreatedTime
|
||||
} else if f.opt.UseSharedDate && info.SharedWithMeTime != "" {
|
||||
modifiedDate = info.SharedWithMeTime
|
||||
}
|
||||
size := info.Size
|
||||
if f.opt.SizeAsQuota {
|
||||
@@ -1457,6 +1480,14 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if iErr != nil {
|
||||
return nil, iErr
|
||||
}
|
||||
// If listing the root of a teamdrive and got no entries,
|
||||
// double check we have access
|
||||
if f.isTeamDrive && len(entries) == 0 && f.root == "" && dir == "" {
|
||||
err = f.teamDriveOK(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
@@ -1515,15 +1546,23 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list
|
||||
listRSlices{dirs, paths}.Sort()
|
||||
var iErr error
|
||||
_, err := f.list(ctx, dirs, "", false, false, false, func(item *drive.File) bool {
|
||||
// shared with me items have no parents when at the root
|
||||
if f.opt.SharedWithMe && len(item.Parents) == 0 && len(paths) == 1 && paths[0] == "" {
|
||||
item.Parents = dirs
|
||||
}
|
||||
for _, parent := range item.Parents {
|
||||
// only handle parents that are in the requested dirs list
|
||||
i := sort.SearchStrings(dirs, parent)
|
||||
if i == len(dirs) || dirs[i] != parent {
|
||||
continue
|
||||
var i int
|
||||
// If only one item in paths then no need to search for the ID
|
||||
// assuming google drive is doing its job properly.
|
||||
//
|
||||
// Note that we at the root when len(paths) == 1 && paths[0] == ""
|
||||
if len(paths) == 1 {
|
||||
// don't check parents at root because
|
||||
// - shared with me items have no parents at the root
|
||||
// - if using a root alias, eg "root" or "appDataFolder" the ID won't match
|
||||
i = 0
|
||||
} else {
|
||||
// only handle parents that are in the requested dirs list if not at root
|
||||
i = sort.SearchStrings(dirs, parent)
|
||||
if i == len(dirs) || dirs[i] != parent {
|
||||
continue
|
||||
}
|
||||
}
|
||||
remote := path.Join(paths[i], item.Name)
|
||||
entry, err := f.itemToDirEntry(remote, item)
|
||||
@@ -1594,6 +1633,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
out := make(chan error, fs.Config.Checkers)
|
||||
list := walk.NewListRHelper(callback)
|
||||
overflow := []listREntry{}
|
||||
listed := 0
|
||||
|
||||
cb := func(entry fs.DirEntry) error {
|
||||
mu.Lock()
|
||||
@@ -1606,6 +1646,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
overflow = append(overflow, listREntry{d.ID(), d.Remote()})
|
||||
}
|
||||
}
|
||||
listed++
|
||||
return list.Add(entry)
|
||||
}
|
||||
|
||||
@@ -1662,7 +1703,21 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
return err
|
||||
}
|
||||
|
||||
return list.Flush()
|
||||
err = list.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If listing the root of a teamdrive and got no entries,
|
||||
// double check we have access
|
||||
if f.isTeamDrive && listed == 0 && f.root == "" && dir == "" {
|
||||
err = f.teamDriveOK(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// itemToDirEntry converts a drive.File to a fs.DirEntry.
|
||||
@@ -2035,9 +2090,30 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// teamDriveOK checks to see if we can access the team drive
|
||||
func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
|
||||
if !f.isTeamDrive {
|
||||
return nil
|
||||
}
|
||||
var td *drive.Drive
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
td, err = f.svc.Drives.Get(f.opt.TeamDriveID).Fields("name,id,capabilities,createdTime,restrictions").Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get Team/Shared Drive info")
|
||||
}
|
||||
fs.Debugf(f, "read info from team drive %q", td.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if f.isTeamDrive {
|
||||
err := f.teamDriveOK(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Teamdrives don't appear to have a usage API so just return empty
|
||||
return &fs.Usage{}, nil
|
||||
}
|
||||
|
||||
@@ -113,7 +113,7 @@ var (
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
DbHashType = hash.RegisterHash("Dropbox", 64, dbhash.New)
|
||||
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "dropbox",
|
||||
Description: "Dropbox",
|
||||
|
||||
@@ -46,13 +46,57 @@ func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
||||
// APIString returns Time string in Jottacloud API format
|
||||
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
|
||||
|
||||
// LoginToken is struct representing the login token generated in the WebUI
|
||||
type LoginToken struct {
|
||||
Username string `json:"username"`
|
||||
Realm string `json:"realm"`
|
||||
WellKnownLink string `json:"well_known_link"`
|
||||
AuthToken string `json:"auth_token"`
|
||||
}
|
||||
|
||||
// WellKnown contains some configuration parameters for setting up endpoints
|
||||
type WellKnown struct {
|
||||
Issuer string `json:"issuer"`
|
||||
AuthorizationEndpoint string `json:"authorization_endpoint"`
|
||||
TokenEndpoint string `json:"token_endpoint"`
|
||||
TokenIntrospectionEndpoint string `json:"token_introspection_endpoint"`
|
||||
UserinfoEndpoint string `json:"userinfo_endpoint"`
|
||||
EndSessionEndpoint string `json:"end_session_endpoint"`
|
||||
JwksURI string `json:"jwks_uri"`
|
||||
CheckSessionIframe string `json:"check_session_iframe"`
|
||||
GrantTypesSupported []string `json:"grant_types_supported"`
|
||||
ResponseTypesSupported []string `json:"response_types_supported"`
|
||||
SubjectTypesSupported []string `json:"subject_types_supported"`
|
||||
IDTokenSigningAlgValuesSupported []string `json:"id_token_signing_alg_values_supported"`
|
||||
UserinfoSigningAlgValuesSupported []string `json:"userinfo_signing_alg_values_supported"`
|
||||
RequestObjectSigningAlgValuesSupported []string `json:"request_object_signing_alg_values_supported"`
|
||||
ResponseNodesSupported []string `json:"response_modes_supported"`
|
||||
RegistrationEndpoint string `json:"registration_endpoint"`
|
||||
TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported"`
|
||||
TokenEndpointAuthSigningAlgValuesSupported []string `json:"token_endpoint_auth_signing_alg_values_supported"`
|
||||
ClaimsSupported []string `json:"claims_supported"`
|
||||
ClaimTypesSupported []string `json:"claim_types_supported"`
|
||||
ClaimsParameterSupported bool `json:"claims_parameter_supported"`
|
||||
ScopesSupported []string `json:"scopes_supported"`
|
||||
RequestParameterSupported bool `json:"request_parameter_supported"`
|
||||
RequestURIParameterSupported bool `json:"request_uri_parameter_supported"`
|
||||
CodeChallengeMethodsSupported []string `json:"code_challenge_methods_supported"`
|
||||
TLSClientCertificateBoundAccessTokens bool `json:"tls_client_certificate_bound_access_tokens"`
|
||||
IntrospectionEndpoint string `json:"introspection_endpoint"`
|
||||
}
|
||||
|
||||
// TokenJSON is the struct representing the HTTP response from OAuth2
|
||||
// providers returning a token in JSON form.
|
||||
type TokenJSON struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
RefreshExpiresIn int32 `json:"refresh_expires_in"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
IDToken string `json:"id_token"`
|
||||
NotBeforePolicy int32 `json:"not-before-policy"`
|
||||
SessionState string `json:"session_state"`
|
||||
Scope string `json:"scope"`
|
||||
}
|
||||
|
||||
// JSON structures returned by new API
|
||||
|
||||
@@ -4,12 +4,13 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -25,7 +26,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
@@ -41,32 +41,29 @@ const enc = encodings.JottaCloud
|
||||
|
||||
// Globals
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Archive"
|
||||
rootURL = "https://www.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
tokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||
registerURL = "https://api.jottacloud.com/auth/v1/register"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
configClientID = "client_id"
|
||||
configClientSecret = "client_secret"
|
||||
configDevice = "device"
|
||||
configMountpoint = "mountpoint"
|
||||
charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Archive"
|
||||
rootURL = "https://www.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
configDevice = "device"
|
||||
configMountpoint = "mountpoint"
|
||||
configTokenURL = "tokenURL"
|
||||
configVersion = 1
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app for a personal account
|
||||
oauthConfig = &oauth2.Config{
|
||||
ClientID: "jottacli",
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: tokenURL,
|
||||
TokenURL: tokenURL,
|
||||
AuthURL: defaultTokenURL,
|
||||
TokenURL: defaultTokenURL,
|
||||
},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
@@ -81,43 +78,37 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
ctx := context.TODO()
|
||||
tokenString, ok := m.Get("token")
|
||||
if ok && tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !config.Confirm() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
|
||||
if config.Confirm() {
|
||||
deviceRegistration, err := registerDevice(ctx, srv)
|
||||
refresh := false
|
||||
if version, ok := m.Get("configVersion"); ok {
|
||||
ver, err := strconv.Atoi(version)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to register device: %v", err)
|
||||
log.Fatalf("Failed to parse config version - corrupted config")
|
||||
}
|
||||
|
||||
m.Set(configClientID, deviceRegistration.ClientID)
|
||||
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
||||
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
|
||||
refresh = ver != configVersion
|
||||
}
|
||||
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = rcloneClientID
|
||||
if refresh {
|
||||
fmt.Printf("Config outdated - refreshing\n")
|
||||
} else {
|
||||
tokenString, ok := m.Get("token")
|
||||
if ok && tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !config.Confirm(false) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = rcloneEncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
fmt.Printf("Username> ")
|
||||
username := config.ReadLine()
|
||||
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
|
||||
clientConfig := *fs.Config
|
||||
clientConfig.UserAgent = "JottaCli 0.6.18626 windows-amd64"
|
||||
srv := rest.NewClient(fshttp.NewClient(&clientConfig))
|
||||
|
||||
token, err := doAuth(ctx, srv, username, password)
|
||||
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
|
||||
fmt.Printf("Login Token> ")
|
||||
loginToken := config.ReadLine()
|
||||
|
||||
token, err := doAuth(ctx, srv, loginToken, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
}
|
||||
@@ -127,7 +118,7 @@ func init() {
|
||||
}
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm() {
|
||||
if config.Confirm(false) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
@@ -143,6 +134,8 @@ func init() {
|
||||
m.Set(configDevice, device)
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
}
|
||||
|
||||
m.Set("configVersion", strconv.Itoa(configVersion))
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "md5_memory_limit",
|
||||
@@ -249,67 +242,57 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// registerDevice register a new device for use with the jottacloud API
|
||||
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
|
||||
// random generator to generate random device names
|
||||
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
randonDeviceNamePartLength := 21
|
||||
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
|
||||
for i := range randomDeviceNamePart {
|
||||
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
|
||||
}
|
||||
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
|
||||
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
|
||||
|
||||
values := url.Values{}
|
||||
values.Set("device_id", randomDeviceName)
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: registerURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
var deviceRegistration *api.DeviceRegistrationResponse
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &deviceRegistration)
|
||||
return deviceRegistration, err
|
||||
}
|
||||
|
||||
// doAuth runs the actual token request
|
||||
func doAuth(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
|
||||
func doAuth(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
|
||||
loginTokenBytes, err := base64.StdEncoding.DecodeString(loginTokenBase64)
|
||||
if err != nil {
|
||||
return token, err
|
||||
}
|
||||
|
||||
// decode login token
|
||||
var loginToken api.LoginToken
|
||||
decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes))
|
||||
err = decoder.Decode(&loginToken)
|
||||
if err != nil {
|
||||
return token, err
|
||||
}
|
||||
|
||||
// retrieve endpoint urls
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: loginToken.WellKnownLink,
|
||||
}
|
||||
var wellKnown api.WellKnown
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &wellKnown)
|
||||
if err != nil {
|
||||
return token, err
|
||||
}
|
||||
|
||||
// save the tokenurl
|
||||
oauthConfig.Endpoint.AuthURL = wellKnown.TokenEndpoint
|
||||
oauthConfig.Endpoint.TokenURL = wellKnown.TokenEndpoint
|
||||
m.Set(configTokenURL, wellKnown.TokenEndpoint)
|
||||
|
||||
// prepare out token request with username and password
|
||||
values := url.Values{}
|
||||
values.Set("grant_type", "PASSWORD")
|
||||
values.Set("password", password)
|
||||
values.Set("username", username)
|
||||
values.Set("client_id", oauthConfig.ClientID)
|
||||
values.Set("client_secret", oauthConfig.ClientSecret)
|
||||
opts := rest.Opts{
|
||||
values.Set("client_id", "jottacli")
|
||||
values.Set("grant_type", "password")
|
||||
values.Set("password", loginToken.AuthToken)
|
||||
values.Set("scope", "offline_access+openid")
|
||||
values.Set("username", loginToken.Username)
|
||||
values.Encode()
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: oauthConfig.Endpoint.AuthURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Parameters: values,
|
||||
Body: strings.NewReader(values.Encode()),
|
||||
}
|
||||
|
||||
// do the first request
|
||||
var jsonToken api.TokenJSON
|
||||
resp, err := srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
if err != nil {
|
||||
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
|
||||
if resp != nil {
|
||||
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
||||
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
|
||||
fmt.Printf("Enter verification code> ")
|
||||
authCode := config.ReadLine()
|
||||
|
||||
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||
resp, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
}
|
||||
}
|
||||
return token, err
|
||||
}
|
||||
|
||||
token.AccessToken = jsonToken.AccessToken
|
||||
@@ -471,29 +454,6 @@ func (f *Fs) filePath(file string) string {
|
||||
return urlPathEscape(f.filePathRaw(file))
|
||||
}
|
||||
|
||||
// Jottacloud requires the grant_type 'refresh_token' string
|
||||
// to be uppercase and throws a 400 Bad Request if we use the
|
||||
// lower case used by the oauth2 module
|
||||
//
|
||||
// This filter catches all refresh requests, reads the body,
|
||||
// changes the case and then sends it on
|
||||
func grantTypeFilter(req *http.Request) {
|
||||
if tokenURL == req.URL.String() {
|
||||
// read the entire body
|
||||
refreshBody, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_ = req.Body.Close()
|
||||
|
||||
// make the refresh token upper case
|
||||
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
|
||||
|
||||
// set the new ReadCloser (with a dummy Close())
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
|
||||
}
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
@@ -504,35 +464,37 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = parsePath(root)
|
||||
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = rcloneClientID
|
||||
// Check config version
|
||||
var ok bool
|
||||
var version string
|
||||
if version, ok = m.Get("configVersion"); ok {
|
||||
ver, err := strconv.Atoi(version)
|
||||
if err != nil {
|
||||
return nil, errors.New("Failed to parse config version")
|
||||
}
|
||||
ok = ver == configVersion
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = rcloneEncryptedClientSecret
|
||||
return nil, errors.New("Outdated config - please reconfigure this backend")
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
// the oauth client for the api servers needs
|
||||
// a filter to fix the grant_type issues (see above)
|
||||
// if custome endpoints are set use them else stick with defaults
|
||||
if tokenURL, ok := m.Get(configTokenURL); ok {
|
||||
oauthConfig.Endpoint.TokenURL = tokenURL
|
||||
// jottacloud is weird. we need to use the tokenURL as authURL
|
||||
oauthConfig.Endpoint.AuthURL = tokenURL
|
||||
}
|
||||
|
||||
// Create OAuth Client
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
SetRequestFilter(f func(req *http.Request))
|
||||
}); ok {
|
||||
do.SetRequestFilter(grantTypeFilter)
|
||||
} else {
|
||||
fs.Debugf(name+":", "Couldn't add request filter - uploads will fail")
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
|
||||
}
|
||||
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = parsePath(root)
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
|
||||
httpclient "github.com/koofr/go-httpclient"
|
||||
@@ -259,7 +260,9 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := koofrclient.NewKoofrClient(opt.Endpoint, false)
|
||||
httpClient := httpclient.New()
|
||||
httpClient.Client = fshttp.NewClient(fs.Config)
|
||||
client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient)
|
||||
basicAuth := fmt.Sprintf("Basic %s",
|
||||
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
|
||||
client.HTTPClient.Headers.Set("Authorization", basicAuth)
|
||||
|
||||
@@ -350,7 +350,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
err = errors.Wrapf(err, "failed to open directory %q", dir)
|
||||
fs.Errorf(dir, "%v", err)
|
||||
if isPerm {
|
||||
accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
|
||||
err = nil // ignore error but fail sync
|
||||
}
|
||||
return nil, err
|
||||
@@ -386,7 +386,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if fierr != nil {
|
||||
err = errors.Wrapf(err, "failed to read directory %q", namepath)
|
||||
fs.Errorf(dir, "%v", fierr)
|
||||
accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
continue
|
||||
}
|
||||
fis = append(fis, fi)
|
||||
@@ -409,7 +409,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Skip bad symlinks
|
||||
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
|
||||
fs.Errorf(newRemote, "Listing error: %v", err)
|
||||
accounting.Stats(ctx).Error(err)
|
||||
err = accounting.Stats(ctx).Error(err)
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
@@ -820,10 +820,10 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
|
||||
return 0, errors.Wrap(err, "can't read status of source file while transferring")
|
||||
}
|
||||
if file.o.size != fi.Size() {
|
||||
return 0, errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size())
|
||||
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size()))
|
||||
}
|
||||
if !file.o.modTime.Equal(fi.ModTime()) {
|
||||
return 0, errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime())
|
||||
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -956,7 +956,17 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if !o.translatedLink {
|
||||
f, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
if runtime.GOOS == "windows" && os.IsPermission(err) {
|
||||
// If permission denied on Windows might be trying to update a
|
||||
// hidden file, in which case try opening without CREATE
|
||||
// See: https://stackoverflow.com/questions/13215716/ioerror-errno-13-permission-denied-when-trying-to-open-hidden-file-in-w-mod
|
||||
f, err = file.OpenFile(o.path, os.O_WRONLY|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Pre-allocate the file for performance reasons
|
||||
err = preAllocate(src.Size(), f)
|
||||
@@ -1084,17 +1094,17 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
|
||||
func cleanRootPath(s string, noUNC bool) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
s = filepath.ToSlash(s)
|
||||
vol := filepath.VolumeName(s)
|
||||
s = vol + enc.FromStandardPath(s[len(vol):])
|
||||
s = filepath.FromSlash(s)
|
||||
|
||||
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
s = filepath.ToSlash(s)
|
||||
vol := filepath.VolumeName(s)
|
||||
s = vol + enc.FromStandardPath(s[len(vol):])
|
||||
s = filepath.FromSlash(s)
|
||||
|
||||
if !noUNC {
|
||||
// Convert to UNC
|
||||
s = uncPath(s)
|
||||
|
||||
@@ -54,7 +54,7 @@ var testsWindows = [][2]string{
|
||||
{`\\?\UNC\theserver\dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
|
||||
{`//?/UNC/theserver/dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
|
||||
{`c:/temp`, `c:\temp`},
|
||||
{`/temp/file.txt`, `\temp\file.txt`},
|
||||
{`C:/temp/file.txt`, `C:\temp\file.txt`},
|
||||
{`c:\!\"#¤%&/()=;:*^?+-`, `c:\!\"#¤%&\()=;:*^?+-`},
|
||||
{`c:\<>"|?*:&\<>"|?*:&\<>"|?*:&`, `c:\<>"|?*:&\<>"|?*:&\<>"|?*:&`},
|
||||
}
|
||||
|
||||
@@ -351,8 +351,13 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
// instead of simply using `drives/driveID/root:/itemPath` because it works for
|
||||
// "shared with me" folders in OneDrive Personal (See #2536, #2778)
|
||||
// This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
|
||||
//
|
||||
// If `relPath` == '', do not append the slash (See #3664)
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
opts := newOptsCall(normalizedID, "GET", ":/"+withTrailingColon(rest.URLPathEscape(enc.FromStandardPath(relPath))))
|
||||
if relPath != "" {
|
||||
relPath = "/" + withTrailingColon(rest.URLPathEscape(enc.FromStandardPath(relPath)))
|
||||
}
|
||||
opts := newOptsCall(normalizedID, "GET", ":"+relPath)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
|
||||
@@ -269,7 +269,7 @@ func qsServiceConnection(opt *Options) (*qs.Service, error) {
|
||||
cf.Protocol = protocol
|
||||
cf.Host = host
|
||||
cf.Port = port
|
||||
cf.ConnectionRetries = opt.ConnectionRetries
|
||||
// unsupported in v3.1: cf.ConnectionRetries = opt.ConnectionRetries
|
||||
cf.Connection = fshttp.NewClient(fs.Config)
|
||||
|
||||
return qs.Init(cf)
|
||||
|
||||
381
backend/s3/s3.go
381
backend/s3/s3.go
@@ -14,7 +14,9 @@ What happens if you CTRL-C a multipart upload
|
||||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
@@ -24,8 +26,10 @@ import (
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
@@ -33,12 +37,12 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/defaults"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/ncw/swift"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -51,7 +55,9 @@ import (
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const enc = encodings.S3
|
||||
@@ -159,6 +165,9 @@ func init() {
|
||||
}, {
|
||||
Value: "ap-south-1",
|
||||
Help: "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
|
||||
}, {
|
||||
Value: "ap-east-1",
|
||||
Help: "Asia Patific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
|
||||
}, {
|
||||
Value: "sa-east-1",
|
||||
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
|
||||
@@ -427,6 +436,9 @@ func init() {
|
||||
}, {
|
||||
Value: "ap-south-1",
|
||||
Help: "Asia Pacific (Mumbai)",
|
||||
}, {
|
||||
Value: "ap-east-1",
|
||||
Help: "Asia Pacific (Hong Kong)",
|
||||
}, {
|
||||
Value: "sa-east-1",
|
||||
Help: "South America (Sao Paulo) Region.",
|
||||
@@ -693,16 +705,37 @@ The minimum is 0 and the maximum is 5GB.`,
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
When uploading files larger than upload_cutoff or files with unknown
|
||||
size (eg from "rclone rcat" or uploaded with "rclone mount" or google
|
||||
photos or google docs) they will be uploaded as multipart uploads
|
||||
using this chunk size.
|
||||
|
||||
Note that "--s3-upload-concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
|
||||
If you are transferring large files over high speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.`,
|
||||
enough memory, then increasing this will speed up the transfers.
|
||||
|
||||
Rclone will automatically increase the chunk size when uploading a
|
||||
large file of known size to stay below the 10,000 chunks limit.
|
||||
|
||||
Files of unknown size are uploaded with the configured
|
||||
chunk_size. Since the default chunk size is 5MB and there can be at
|
||||
most 10,000 chunks, this means that by default the maximum size of
|
||||
file you can stream upload is 48GB. If you wish to stream upload
|
||||
larger files then you will need to increase chunk_size.`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_cutoff",
|
||||
Help: `Cutoff for switching to multipart copy
|
||||
|
||||
Any files larger than this that need to be server side copied will be
|
||||
copied in chunks of this size.
|
||||
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
Default: fs.SizeSuffix(maxSizeForCopy),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: "Don't store MD5 checksum with object metadata",
|
||||
@@ -733,7 +766,9 @@ if false then rclone will use virtual path style. See [the AWS S3
|
||||
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
|
||||
for more info.
|
||||
|
||||
Some providers (eg Aliyun OSS or Netease COS) require this set to false.`,
|
||||
Some providers (eg AWS, Aliyun OSS or Netease COS) require this set to
|
||||
false - rclone will do this automatically based on the provider
|
||||
setting.`,
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -765,19 +800,29 @@ WARNING: Storing parts of an incomplete multipart upload counts towards space us
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Help: `Size of listing chunk (response list for each ListObject S3 request).
|
||||
|
||||
This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification.
|
||||
Most services truncate the response list to 1000 objects even if requested more than that.
|
||||
In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html).
|
||||
In Ceph, this can be increased with the "rgw list buckets max chunk" option.
|
||||
`,
|
||||
Default: 1000,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Constants
|
||||
const (
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
listChunkSize = 1000 // number of items to read at once
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
|
||||
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
|
||||
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
|
||||
maxUploadParts = 10000 // maximum allowed number of parts in a multi-part upload
|
||||
minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
@@ -798,6 +843,7 @@ type Options struct {
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SessionToken string `config:"session_token"`
|
||||
@@ -806,6 +852,7 @@ type Options struct {
|
||||
V2Auth bool `config:"v2_auth"`
|
||||
UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@@ -961,7 +1008,12 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
Client: ec2metadata.New(session.New(), &aws.Config{
|
||||
HTTPClient: lowTimeoutClient,
|
||||
}),
|
||||
ExpiryWindow: 3,
|
||||
ExpiryWindow: 3 * time.Minute,
|
||||
},
|
||||
|
||||
// Pick up IAM role if we are in EKS
|
||||
&stscreds.WebIdentityRoleProvider{
|
||||
ExpiryWindow: 3 * time.Minute,
|
||||
},
|
||||
}
|
||||
cred := credentials.NewChainCredentials(providers)
|
||||
@@ -984,7 +1036,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
if opt.Region == "" {
|
||||
opt.Region = "us-east-1"
|
||||
}
|
||||
if opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.UseAccelerateEndpoint {
|
||||
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.UseAccelerateEndpoint {
|
||||
opt.ForcePathStyle = false
|
||||
}
|
||||
awsConfig := aws.NewConfig().
|
||||
@@ -1232,7 +1284,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
if directory != "" {
|
||||
directory += "/"
|
||||
}
|
||||
maxKeys := int64(listChunkSize)
|
||||
delimiter := ""
|
||||
if !recurse {
|
||||
delimiter = "/"
|
||||
@@ -1260,7 +1311,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
Bucket: &bucket,
|
||||
Delimiter: &delimiter,
|
||||
Prefix: &directory,
|
||||
MaxKeys: &maxKeys,
|
||||
MaxKeys: &f.opt.ListChunk,
|
||||
Marker: marker,
|
||||
}
|
||||
if urlEncodeListings {
|
||||
@@ -1376,6 +1427,12 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
} else {
|
||||
marker = resp.NextMarker
|
||||
}
|
||||
if urlEncodeListings {
|
||||
*marker, err = url.QueryUnescape(*marker)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to URL decode NextMarker %q", *marker)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1642,7 +1699,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
|
||||
req.StorageClass = &f.opt.StorageClass
|
||||
}
|
||||
|
||||
if srcSize >= int64(f.opt.UploadCutoff) {
|
||||
if srcSize >= int64(f.opt.CopyCutoff) {
|
||||
return f.copyMultipart(ctx, req, dstBucket, dstPath, srcBucket, srcPath, srcSize)
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
@@ -1655,8 +1712,8 @@ func calculateRange(partSize, partIndex, numParts, totalSize int64) string {
|
||||
start := partIndex * partSize
|
||||
var ends string
|
||||
if partIndex == numParts-1 {
|
||||
if totalSize >= 0 {
|
||||
ends = strconv.FormatInt(totalSize, 10)
|
||||
if totalSize >= 1 {
|
||||
ends = strconv.FormatInt(totalSize-1, 10)
|
||||
}
|
||||
} else {
|
||||
ends = strconv.FormatInt(start+partSize-1, 10)
|
||||
@@ -1693,7 +1750,7 @@ func (f *Fs) copyMultipart(ctx context.Context, req *s3.CopyObjectInput, dstBuck
|
||||
}
|
||||
}()
|
||||
|
||||
partSize := int64(f.opt.ChunkSize)
|
||||
partSize := int64(f.opt.CopyCutoff)
|
||||
numParts := (srcSize-1)/partSize + 1
|
||||
|
||||
var parts []*s3.CompletedPart
|
||||
@@ -1921,11 +1978,6 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
}
|
||||
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
|
||||
|
||||
if o.bytes >= maxSizeForCopy {
|
||||
fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Can't update metadata here, so return this error to force a recopy
|
||||
if o.storageClass == "GLACIER" || o.storageClass == "DEEP_ARCHIVE" {
|
||||
return fs.ErrorCantSetModTime
|
||||
@@ -1982,6 +2034,195 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (err error) {
|
||||
f := o.fs
|
||||
|
||||
// make concurrency machinery
|
||||
concurrency := f.opt.UploadConcurrency
|
||||
if concurrency < 1 {
|
||||
concurrency = 1
|
||||
}
|
||||
bufs := make(chan []byte, concurrency)
|
||||
defer func() {
|
||||
// empty the channel on exit
|
||||
close(bufs)
|
||||
for range bufs {
|
||||
}
|
||||
}()
|
||||
for i := 0; i < concurrency; i++ {
|
||||
bufs <- nil
|
||||
}
|
||||
|
||||
// calculate size of parts
|
||||
partSize := int(f.opt.ChunkSize)
|
||||
|
||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||
// buffers here (default 5MB). With a maximum number of parts (10,000) this will be a file of
|
||||
// 48GB which seems like a not too unreasonable limit.
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
f.opt.ChunkSize, fs.SizeSuffix(partSize*maxUploadParts))
|
||||
})
|
||||
} else {
|
||||
// Adjust partSize until the number of parts is small enough.
|
||||
if size/int64(partSize) >= maxUploadParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
partSize = int((((size / maxUploadParts) >> 20) + 1) << 20)
|
||||
}
|
||||
}
|
||||
|
||||
var cout *s3.CreateMultipartUploadOutput
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
cout, err = f.c.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
|
||||
Bucket: req.Bucket,
|
||||
ACL: req.ACL,
|
||||
Key: req.Key,
|
||||
ContentType: req.ContentType,
|
||||
Metadata: req.Metadata,
|
||||
ServerSideEncryption: req.ServerSideEncryption,
|
||||
SSEKMSKeyId: req.SSEKMSKeyId,
|
||||
StorageClass: req.StorageClass,
|
||||
})
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to initialise")
|
||||
}
|
||||
uid := cout.UploadId
|
||||
|
||||
defer func() {
|
||||
if o.fs.opt.LeavePartsOnError {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
// We can try to abort the upload, but ignore the error.
|
||||
fs.Debugf(o, "Cancelling multipart upload")
|
||||
errCancel := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.AbortMultipartUploadWithContext(ctx, &s3.AbortMultipartUploadInput{
|
||||
Bucket: req.Bucket,
|
||||
Key: req.Key,
|
||||
UploadId: uid,
|
||||
RequestPayer: req.RequestPayer,
|
||||
})
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if errCancel != nil {
|
||||
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
finished = false
|
||||
partsMu sync.Mutex // to protect parts
|
||||
parts []*s3.CompletedPart
|
||||
off int64
|
||||
)
|
||||
|
||||
for partNum := int64(1); !finished; partNum++ {
|
||||
// Get a block of memory from the channel (which limits concurrency)
|
||||
buf := <-bufs
|
||||
if buf == nil {
|
||||
buf = make([]byte, partSize)
|
||||
}
|
||||
|
||||
// Read the chunk
|
||||
var n int
|
||||
n, err = readers.ReadFill(in, buf) // this can never return 0, nil
|
||||
if err == io.EOF {
|
||||
if n == 0 && partNum != 1 { // end if no data and if not first chunk
|
||||
break
|
||||
}
|
||||
finished = true
|
||||
} else if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to read source")
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
partNum := partNum
|
||||
fs.Debugf(o, "multipart upload starting chunk %d size %v offset %v/%v", partNum, fs.SizeSuffix(n), fs.SizeSuffix(off), fs.SizeSuffix(size))
|
||||
off += int64(n)
|
||||
g.Go(func() (err error) {
|
||||
partLength := int64(len(buf))
|
||||
|
||||
// create checksum of buffer for integrity checking
|
||||
md5sumBinary := md5.Sum(buf)
|
||||
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
uploadPartReq := &s3.UploadPartInput{
|
||||
Body: bytes.NewReader(buf),
|
||||
Bucket: req.Bucket,
|
||||
Key: req.Key,
|
||||
PartNumber: &partNum,
|
||||
UploadId: uid,
|
||||
ContentMD5: &md5sum,
|
||||
ContentLength: &partLength,
|
||||
RequestPayer: req.RequestPayer,
|
||||
SSECustomerAlgorithm: req.SSECustomerAlgorithm,
|
||||
SSECustomerKey: req.SSECustomerKey,
|
||||
SSECustomerKeyMD5: req.SSECustomerKeyMD5,
|
||||
}
|
||||
uout, err := f.c.UploadPartWithContext(gCtx, uploadPartReq)
|
||||
if err != nil {
|
||||
if partNum <= int64(concurrency) {
|
||||
return f.shouldRetry(err)
|
||||
}
|
||||
// retry all chunks once have done the first batch
|
||||
return true, err
|
||||
}
|
||||
partsMu.Lock()
|
||||
parts = append(parts, &s3.CompletedPart{
|
||||
PartNumber: &partNum,
|
||||
ETag: uout.ETag,
|
||||
})
|
||||
partsMu.Unlock()
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
||||
// return the memory
|
||||
bufs <- buf[:partSize]
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to upload part")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// sort the completed parts by part number
|
||||
sort.Slice(parts, func(i, j int) bool {
|
||||
return *parts[i].PartNumber < *parts[j].PartNumber
|
||||
})
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
|
||||
Bucket: req.Bucket,
|
||||
Key: req.Key,
|
||||
MultipartUpload: &s3.CompletedMultipartUpload{
|
||||
Parts: parts,
|
||||
},
|
||||
RequestPayer: req.RequestPayer,
|
||||
UploadId: uid,
|
||||
})
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to finalise")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update the Object from in with modTime and size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
bucket, bucketPath := o.split()
|
||||
@@ -1993,35 +2234,17 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
size := src.Size()
|
||||
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
var uploader *s3manager.Uploader
|
||||
if multipart {
|
||||
uploader = s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = o.fs.opt.UploadConcurrency
|
||||
u.LeavePartsOnError = o.fs.opt.LeavePartsOnError
|
||||
u.S3 = o.fs.c
|
||||
u.PartSize = int64(o.fs.opt.ChunkSize)
|
||||
|
||||
if size == -1 {
|
||||
// Make parts as small as possible while still being able to upload to the
|
||||
// S3 file size limit. Rounded up to nearest MB.
|
||||
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
return
|
||||
}
|
||||
// Adjust PartSize until the number of parts is small enough.
|
||||
if size/u.PartSize >= s3manager.MaxUploadParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Set the mtime in the meta data
|
||||
metadata := map[string]*string{
|
||||
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
|
||||
}
|
||||
|
||||
// read the md5sum if available for non multpart and if
|
||||
// disable checksum isn't present.
|
||||
// read the md5sum if available
|
||||
// - for non multpart
|
||||
// - so we can add a ContentMD5
|
||||
// - for multipart provided checksums aren't disabled
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash
|
||||
var md5sum string
|
||||
if !multipart || !o.fs.opt.DisableChecksum {
|
||||
hash, err := src.Hash(ctx, hash.MD5)
|
||||
@@ -2038,52 +2261,32 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Guess the content type
|
||||
mimeType := fs.MimeType(ctx, src)
|
||||
req := s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &bucketPath,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
}
|
||||
if md5sum != "" {
|
||||
req.ContentMD5 = &md5sum
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
|
||||
if multipart {
|
||||
req := s3manager.UploadInput{
|
||||
Bucket: &bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &bucketPath,
|
||||
Body: in,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
//ContentLength: &size,
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = uploader.UploadWithContext(ctx, &req)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
err = o.uploadMultipart(ctx, &req, size, in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
req := s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &bucketPath,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
}
|
||||
if md5sum != "" {
|
||||
req.ContentMD5 = &md5sum
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
|
||||
// Create the request
|
||||
putObj, _ := o.fs.c.PutObjectRequest(&req)
|
||||
|
||||
@@ -29,15 +29,17 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
sshagent "github.com/xanzy/ssh-agent"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
const (
|
||||
connectionsPerSecond = 10 // don't make more than this many ssh connections/s
|
||||
hashCommandNotSupported = "none"
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -154,6 +156,11 @@ Home directory can be found in a shared folder called "home"
|
||||
Default: "",
|
||||
Help: "The command used to read sha1 hashes. Leave blank for autodetect.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Default: false,
|
||||
Help: "Set to skip any symlinks and any other non regular files.",
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -175,6 +182,7 @@ type Options struct {
|
||||
SetModTime bool `config:"set_modtime"`
|
||||
Md5sumCommand string `config:"md5sum_command"`
|
||||
Sha1sumCommand string `config:"sha1sum_command"`
|
||||
SkipLinks bool `config:"skip_links"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -190,7 +198,7 @@ type Fs struct {
|
||||
cachedHashes *hash.Set
|
||||
poolMu sync.Mutex
|
||||
pool []*conn
|
||||
connLimit *rate.Limiter // for limiting number of connections per second
|
||||
pacer *fs.Pacer // pacer for operations
|
||||
}
|
||||
|
||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
@@ -270,10 +278,6 @@ func (c *conn) closed() error {
|
||||
// Open a new connection to the SFTP server.
|
||||
func (f *Fs) sftpConnection() (c *conn, err error) {
|
||||
// Rate limit rate of new connections
|
||||
err = f.connLimit.Wait(context.Background())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "limiter failed in connect")
|
||||
}
|
||||
c = &conn{
|
||||
err: make(chan error, 1),
|
||||
}
|
||||
@@ -307,7 +311,14 @@ func (f *Fs) getSftpConnection() (c *conn, err error) {
|
||||
if c != nil {
|
||||
return c, nil
|
||||
}
|
||||
return f.sftpConnection()
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
c, err = f.sftpConnection()
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return c, err
|
||||
}
|
||||
|
||||
// Return an SFTP connection to the pool
|
||||
@@ -465,7 +476,7 @@ func NewFsWithConnection(ctx context.Context, name string, root string, m config
|
||||
config: sshConfig,
|
||||
url: "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root,
|
||||
mkdirLock: newStringLock(),
|
||||
connLimit: rate.NewLimiter(rate.Limit(connectionsPerSecond), 1),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
@@ -595,12 +606,16 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
remote := path.Join(dir, info.Name())
|
||||
// If file is a symlink (not a regular file is the best cross platform test we can do), do a stat to
|
||||
// pick up the size and type of the destination, instead of the size and type of the symlink.
|
||||
if !info.Mode().IsRegular() {
|
||||
if !info.Mode().IsRegular() && !info.IsDir() {
|
||||
if f.opt.SkipLinks {
|
||||
// skip non regular file if SkipLinks is set
|
||||
continue
|
||||
}
|
||||
oldInfo := info
|
||||
info, err = f.stat(remote)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
fs.Errorf(remote, "stat of non-regular file/dir failed: %v", err)
|
||||
fs.Errorf(remote, "stat of non-regular file failed: %v", err)
|
||||
}
|
||||
info = oldInfo
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -530,10 +531,10 @@ type listFn func(remote string, object *swift.Object, isDirectory bool) error
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer bool, recurse bool, fn listFn) error {
|
||||
if prefix != "" {
|
||||
if prefix != "" && !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
if directory != "" {
|
||||
if directory != "" && !strings.HasSuffix(directory, "/") {
|
||||
directory += "/"
|
||||
}
|
||||
// Options for ObjectsWalk
|
||||
@@ -952,6 +953,18 @@ func (o *Object) isStaticLargeObject() (bool, error) {
|
||||
return o.hasHeader("X-Static-Large-Object")
|
||||
}
|
||||
|
||||
func (o *Object) isInContainerVersioning(container string) (bool, error) {
|
||||
_, headers, err := o.fs.c.Container(container)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
xHistoryLocation := headers["X-History-Location"]
|
||||
if len(xHistoryLocation) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
@@ -1083,9 +1096,8 @@ func min(x, y int64) int64 {
|
||||
//
|
||||
// if except is passed in then segments with that prefix won't be deleted
|
||||
func (o *Object) removeSegments(except string) error {
|
||||
container, containerPath := o.split()
|
||||
segmentsContainer := container + "_segments"
|
||||
err := o.fs.listContainerRoot(segmentsContainer, containerPath, "", false, true, func(remote string, object *swift.Object, isDirectory bool) error {
|
||||
segmentsContainer, prefix, err := o.getSegmentsDlo()
|
||||
err = o.fs.listContainerRoot(segmentsContainer, prefix, "", false, true, func(remote string, object *swift.Object, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
}
|
||||
@@ -1114,6 +1126,23 @@ func (o *Object) removeSegments(except string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) getSegmentsDlo() (segmentsContainer string, prefix string, err error) {
|
||||
if err = o.readMetaData(); err != nil {
|
||||
return
|
||||
}
|
||||
dirManifest := o.headers["X-Object-Manifest"]
|
||||
dirManifest, err = url.PathUnescape(dirManifest)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
delimiter := strings.Index(dirManifest, "/")
|
||||
if len(dirManifest) == 0 || delimiter < 0 {
|
||||
err = errors.New("Missing or wrong structure of manifest of Dynamic large object")
|
||||
return
|
||||
}
|
||||
return dirManifest[:delimiter], dirManifest[delimiter+1:], nil
|
||||
}
|
||||
|
||||
// urlEncode encodes a string so that it is a valid URL
|
||||
//
|
||||
// We don't use any of Go's standard methods as we need `/` not
|
||||
@@ -1300,12 +1329,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
container, containerPath := o.split()
|
||||
isDynamicLargeObject, err := o.isDynamicLargeObject()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove file/manifest first
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(container, containerPath)
|
||||
@@ -1314,12 +1340,22 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isDynamicLargeObject, err := o.isDynamicLargeObject()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// ...then segments if required
|
||||
if isDynamicLargeObject {
|
||||
err = o.removeSegments("")
|
||||
isInContainerVersioning, err := o.isInContainerVersioning(container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isInContainerVersioning {
|
||||
err = o.removeSegments("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -113,7 +113,8 @@ type Fs struct {
|
||||
canStream bool // set if can stream
|
||||
useOCMtime bool // set if can use X-OC-Mtime
|
||||
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
|
||||
hasChecksums bool // set if can use owncloud style checksums
|
||||
hasMD5 bool // set if can use owncloud style checksums for MD5
|
||||
hasSHA1 bool // set if can use owncloud style checksums for SHA1
|
||||
}
|
||||
|
||||
// Object describes a webdav object
|
||||
@@ -215,7 +216,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
|
||||
},
|
||||
NoRedirect: true,
|
||||
}
|
||||
if f.hasChecksums {
|
||||
if f.hasMD5 || f.hasSHA1 {
|
||||
opts.Body = bytes.NewBuffer(owncloudProps)
|
||||
}
|
||||
var result api.Multistatus
|
||||
@@ -383,7 +384,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// sets the BearerToken up
|
||||
func (f *Fs) setBearerToken(token string) {
|
||||
f.opt.BearerToken = token
|
||||
f.srv.SetHeader("Authorization", "BEARER "+token)
|
||||
f.srv.SetHeader("Authorization", "Bearer "+token)
|
||||
}
|
||||
|
||||
// fetch the bearer token using the command
|
||||
@@ -430,11 +431,12 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
||||
f.canStream = true
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.hasChecksums = true
|
||||
f.hasMD5 = true
|
||||
f.hasSHA1 = true
|
||||
case "nextcloud":
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.hasChecksums = true
|
||||
f.hasSHA1 = true
|
||||
case "sharepoint":
|
||||
// To mount sharepoint, two Cookies are required
|
||||
// They have to be set instead of BasicAuth
|
||||
@@ -536,7 +538,7 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
||||
"Depth": depth,
|
||||
},
|
||||
}
|
||||
if f.hasChecksums {
|
||||
if f.hasMD5 || f.hasSHA1 {
|
||||
opts.Body = bytes.NewBuffer(owncloudProps)
|
||||
}
|
||||
var result api.Multistatus
|
||||
@@ -945,10 +947,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
if f.hasChecksums {
|
||||
return hash.NewHashSet(hash.MD5, hash.SHA1)
|
||||
hashes := hash.Set(hash.None)
|
||||
if f.hasMD5 {
|
||||
hashes.Add(hash.MD5)
|
||||
}
|
||||
return hash.Set(hash.None)
|
||||
if f.hasSHA1 {
|
||||
hashes.Add(hash.SHA1)
|
||||
}
|
||||
return hashes
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
@@ -1015,13 +1021,11 @@ func (o *Object) Remote() string {
|
||||
|
||||
// Hash returns the SHA1 or MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if o.fs.hasChecksums {
|
||||
switch t {
|
||||
case hash.SHA1:
|
||||
return o.sha1, nil
|
||||
case hash.MD5:
|
||||
return o.md5, nil
|
||||
}
|
||||
if t == hash.MD5 && o.fs.hasMD5 {
|
||||
return o.md5, nil
|
||||
}
|
||||
if t == hash.SHA1 && o.fs.hasSHA1 {
|
||||
return o.sha1, nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -1042,10 +1046,14 @@ func (o *Object) setMetaData(info *api.Prop) (err error) {
|
||||
o.hasMetaData = true
|
||||
o.size = info.Size
|
||||
o.modTime = time.Time(info.Modified)
|
||||
if o.fs.hasChecksums {
|
||||
if o.fs.hasMD5 || o.fs.hasSHA1 {
|
||||
hashes := info.Hashes()
|
||||
o.sha1 = hashes[hash.SHA1]
|
||||
o.md5 = hashes[hash.MD5]
|
||||
if o.fs.hasSHA1 {
|
||||
o.sha1 = hashes[hash.SHA1]
|
||||
}
|
||||
if o.fs.hasMD5 {
|
||||
o.md5 = hashes[hash.MD5]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1126,19 +1134,21 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
}
|
||||
if o.fs.useOCMtime || o.fs.hasChecksums {
|
||||
if o.fs.useOCMtime || o.fs.hasMD5 || o.fs.hasSHA1 {
|
||||
opts.ExtraHeaders = map[string]string{}
|
||||
if o.fs.useOCMtime {
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1e9)
|
||||
}
|
||||
if o.fs.hasChecksums {
|
||||
// Set an upload checksum - prefer SHA1
|
||||
//
|
||||
// This is used as an upload integrity test. If we set
|
||||
// only SHA1 here, owncloud will calculate the MD5 too.
|
||||
// Set one upload checksum
|
||||
// Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5
|
||||
// Nextcloud stores the checksum you supply (SHA1 or MD5) but only stores one
|
||||
if o.fs.hasSHA1 {
|
||||
if sha1, _ := src.Hash(ctx, hash.SHA1); sha1 != "" {
|
||||
opts.ExtraHeaders["OC-Checksum"] = "SHA1:" + sha1
|
||||
} else if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
|
||||
}
|
||||
}
|
||||
if o.fs.hasMD5 && opts.ExtraHeaders["OC-Checksum"] == "" {
|
||||
if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
|
||||
opts.ExtraHeaders["OC-Checksum"] = "MD5:" + md5
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
@echo off
|
||||
echo Setting environment variables for mingw+WinFsp compile
|
||||
set GOPATH=X:\go
|
||||
set PATH=C:\Program Files\mingw-w64\i686-7.1.0-win32-dwarf-rt_v5-rev0\mingw32\bin;%PATH%
|
||||
set GOPATH=Z:\go
|
||||
rem set PATH=C:\Program Files\mingw-w64\i686-7.1.0-win32-dwarf-rt_v5-rev0\mingw32\bin;%PATH%
|
||||
set PATH=C:\Program Files\mingw-w64\x86_64-8.1.0-win32-seh-rt_v6-rev0\mingw64\bin;%PATH%
|
||||
set CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse
|
||||
|
||||
@@ -3,11 +3,18 @@ package authorize
|
||||
import (
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
noAutoBrowser bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &noAutoBrowser, "auth-no-open-browser", "", false, "Do not automatically open auth link in default browser")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -16,9 +23,12 @@ var commandDefinition = &cobra.Command{
|
||||
Long: `
|
||||
Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.`,
|
||||
rclone config.
|
||||
|
||||
Use the --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 3, command, args)
|
||||
config.Authorize(args)
|
||||
config.Authorize(args, noAutoBrowser)
|
||||
},
|
||||
}
|
||||
|
||||
26
cmd/cmd.go
26
cmd/cmd.go
@@ -82,7 +82,7 @@ func ShowVersion() {
|
||||
func NewFsFile(remote string) (fs.Fs, string) {
|
||||
_, _, fsPath, err := fs.ParseRemote(remote)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
||||
}
|
||||
f, err := cache.Get(remote)
|
||||
@@ -92,7 +92,7 @@ func NewFsFile(remote string) (fs.Fs, string) {
|
||||
case nil:
|
||||
return f, ""
|
||||
default:
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
||||
}
|
||||
return nil, ""
|
||||
@@ -107,13 +107,13 @@ func newFsFileAddFilter(remote string) (fs.Fs, string) {
|
||||
if fileName != "" {
|
||||
if !filter.Active.InActive() {
|
||||
err := errors.Errorf("Can't limit to single files when using filters: %v", remote)
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
// Limit transfers to this file
|
||||
err := filter.Active.AddFile(fileName)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
log.Fatalf("Failed to limit to single file %q: %v", remote, err)
|
||||
}
|
||||
}
|
||||
@@ -135,7 +135,7 @@ func NewFsSrc(args []string) fs.Fs {
|
||||
func newFsDir(remote string) fs.Fs {
|
||||
f, err := cache.Get(remote)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
||||
}
|
||||
return f
|
||||
@@ -189,11 +189,11 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
|
||||
fdst, err := cache.Get(dstRemote)
|
||||
switch err {
|
||||
case fs.ErrorIsFile:
|
||||
fs.CountError(err)
|
||||
_ = fs.CountError(err)
|
||||
log.Fatalf("Source doesn't exist or is a directory and destination is a file")
|
||||
case nil:
|
||||
default:
|
||||
fs.CountError(err)
|
||||
_ = fs.CountError(err)
|
||||
log.Fatalf("Failed to create file system for destination %q: %v", dstRemote, err)
|
||||
}
|
||||
return
|
||||
@@ -239,7 +239,7 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
SigInfoHandler()
|
||||
for try := 1; try <= *retries; try++ {
|
||||
err = f()
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
lastErr := accounting.GlobalStats().GetLastError()
|
||||
if err == nil {
|
||||
err = lastErr
|
||||
@@ -386,12 +386,12 @@ func initConfig() {
|
||||
fs.Infof(nil, "Creating CPU profile %q\n", *cpuProfile)
|
||||
f, err := os.Create(*cpuProfile)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = pprof.StartCPUProfile(f)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
atexit.Register(func() {
|
||||
@@ -405,17 +405,17 @@ func initConfig() {
|
||||
fs.Infof(nil, "Saving Memory profile %q\n", *memProfile)
|
||||
f, err := os.Create(*memProfile)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = pprof.WriteHeapProfile(f)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -371,7 +371,12 @@ func (fsys *FS) Write(path string, buff []byte, ofst int64, fh uint64) (n int) {
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
n, err := handle.WriteAt(buff, ofst)
|
||||
var err error
|
||||
if fsys.VFS.Opt.CacheMode < vfs.CacheModeWrites || handle.Node().Mode()&os.ModeAppend == 0 {
|
||||
n, err = handle.WriteAt(buff, ofst)
|
||||
} else {
|
||||
n, err = handle.Write(buff)
|
||||
}
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
)
|
||||
@@ -207,7 +208,7 @@ func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, er
|
||||
// If noModTime is set then it
|
||||
func Mount(f fs.Fs, mountpoint string) error {
|
||||
// Mount it
|
||||
FS, errChan, _, err := mount(f, mountpoint)
|
||||
FS, errChan, unmount, err := mount(f, mountpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to mount FUSE fs")
|
||||
}
|
||||
@@ -217,6 +218,10 @@ func Mount(f fs.Fs, mountpoint string) error {
|
||||
sigHup := make(chan os.Signal, 1)
|
||||
signal.Notify(sigHup, syscall.SIGHUP)
|
||||
|
||||
atexit.Register(func() {
|
||||
_ = unmount()
|
||||
})
|
||||
|
||||
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
|
||||
return errors.Wrap(err, "failed to notify systemd")
|
||||
}
|
||||
|
||||
@@ -88,7 +88,7 @@ func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
|
||||
underlyingDst := cryptDst.UnWrap()
|
||||
underlyingHash, err := underlyingDst.Hash(ctx, hashType)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(dst, "Error reading hash from underlying %v: %v", underlyingDst, err)
|
||||
return true, false
|
||||
}
|
||||
@@ -97,7 +97,7 @@ func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
|
||||
}
|
||||
cryptHash, err := fcrypt.ComputeHash(ctx, cryptDst, src, hashType)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(dst, "Error computing hash: %v", err)
|
||||
return true, false
|
||||
}
|
||||
@@ -106,7 +106,7 @@ func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
|
||||
}
|
||||
if cryptHash != underlyingHash {
|
||||
err = errors.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(src, err.Error())
|
||||
return true, false
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configflags"
|
||||
"github.com/rclone/rclone/fs/filter/filterflags"
|
||||
"github.com/rclone/rclone/fs/log/logflags"
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -46,10 +47,11 @@ __rclone_custom_func() {
|
||||
else
|
||||
__rclone_init_completion -n : || return
|
||||
fi
|
||||
local rclone=(command rclone --ask-password=false)
|
||||
if [[ $cur != *:* ]]; then
|
||||
local ifs=$IFS
|
||||
IFS=$'\n'
|
||||
local remotes=($(command rclone listremotes))
|
||||
local remotes=($("${rclone[@]}" listremotes 2> /dev/null))
|
||||
IFS=$ifs
|
||||
local remote
|
||||
for remote in "${remotes[@]}"; do
|
||||
@@ -68,7 +70,7 @@ __rclone_custom_func() {
|
||||
fi
|
||||
local ifs=$IFS
|
||||
IFS=$'\n'
|
||||
local lines=($(rclone lsf "${cur%%:*}:$prefix" 2>/dev/null))
|
||||
local lines=($("${rclone[@]}" lsf "${cur%%:*}:$prefix" 2> /dev/null))
|
||||
IFS=$ifs
|
||||
local line
|
||||
for line in "${lines[@]}"; do
|
||||
@@ -168,6 +170,7 @@ func setupRootCommand(rootCmd *cobra.Command) {
|
||||
configflags.AddFlags(pflag.CommandLine)
|
||||
filterflags.AddFlags(pflag.CommandLine)
|
||||
rcflags.AddFlags(pflag.CommandLine)
|
||||
logflags.AddFlags(pflag.CommandLine)
|
||||
|
||||
Root.Run = runRoot
|
||||
Root.Flags().BoolVarP(&version, "version", "V", false, "Print the version number")
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build linux darwin freebsd
|
||||
// +build linux,go1.11 darwin,go1.11 freebsd,go1.11
|
||||
|
||||
package mount
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build linux darwin freebsd
|
||||
// +build linux,go1.11 darwin,go1.11 freebsd,go1.11
|
||||
|
||||
package mount
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// FUSE main Fs
|
||||
|
||||
// +build linux darwin freebsd
|
||||
// +build linux,go1.11 darwin,go1.11 freebsd,go1.11
|
||||
|
||||
package mount
|
||||
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
// +build linux darwin freebsd
|
||||
// +build linux,go1.11 darwin,go1.11 freebsd,go1.11
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
@@ -41,7 +42,12 @@ var _ fusefs.HandleWriter = (*FileHandle)(nil)
|
||||
// Write data to the file handle
|
||||
func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) {
|
||||
defer log.Trace(fh, "len=%d, offset=%d", len(req.Data), req.Offset)("written=%d, err=%v", &resp.Size, &err)
|
||||
n, err := fh.Handle.WriteAt(req.Data, req.Offset)
|
||||
var n int
|
||||
if fh.Handle.Node().VFS().Opt.CacheMode < vfs.CacheModeWrites || fh.Handle.Node().Mode()&os.ModeAppend == 0 {
|
||||
n, err = fh.Handle.WriteAt(req.Data, req.Offset)
|
||||
} else {
|
||||
n, err = fh.Handle.Write(req.Data)
|
||||
}
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package mount implents a FUSE mounting system for rclone remotes.
|
||||
|
||||
// +build linux darwin freebsd
|
||||
// +build linux,go1.11 darwin,go1.11 freebsd,go1.11
|
||||
|
||||
package mount
|
||||
|
||||
@@ -32,12 +32,10 @@ func mountOptions(device string) (options []fuse.MountOption) {
|
||||
fuse.Subtype("rclone"),
|
||||
fuse.FSName(device),
|
||||
fuse.VolumeName(mountlib.VolumeName),
|
||||
fuse.AsyncRead(),
|
||||
|
||||
// Options from benchmarking in the fuse module
|
||||
//fuse.MaxReadahead(64 * 1024 * 1024),
|
||||
//fuse.AsyncRead(), - FIXME this causes
|
||||
// ReadFileHandle.Read error: read /home/files/ISOs/xubuntu-15.10-desktop-amd64.iso: bad file descriptor
|
||||
// which is probably related to errors people are having
|
||||
//fuse.WritebackCache(),
|
||||
}
|
||||
if mountlib.NoAppleDouble {
|
||||
@@ -139,6 +137,9 @@ func Mount(f fs.Fs, mountpoint string) error {
|
||||
sigHup := make(chan os.Signal, 1)
|
||||
signal.Notify(sigHup, syscall.SIGHUP)
|
||||
atexit.IgnoreSignals()
|
||||
atexit.Register(func() {
|
||||
_ = unmount()
|
||||
})
|
||||
|
||||
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
|
||||
return errors.Wrap(err, "failed to notify systemd")
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build linux darwin freebsd
|
||||
// +build linux,go1.11 darwin,go1.11 freebsd,go1.11
|
||||
|
||||
package mount
|
||||
|
||||
|
||||
@@ -1,6 +1,14 @@
|
||||
// Build for mount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !linux,!darwin,!freebsd
|
||||
// Invert the build constraint: linux,go1.11 darwin,go1.11 freebsd,go1.11
|
||||
//
|
||||
// !((linux&&go1.11) || (darwin&&go1.11) || (freebsd&&go1.11))
|
||||
// == !(linux&&go1.11) && !(darwin&&go1.11) && !(freebsd&&go1.11))
|
||||
// == (!linux || !go1.11) && (!darwin || go1.11) && (!freebsd || !go1.11))
|
||||
|
||||
// +build !linux !go1.11
|
||||
// +build !darwin !go1.11
|
||||
// +build !freebsd !go1.11
|
||||
|
||||
package mount
|
||||
|
||||
@@ -50,6 +50,8 @@ func TestRenameOpenHandle(t *testing.T) {
|
||||
err = file.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
run.waitForWriters()
|
||||
|
||||
// verify file was renamed properly
|
||||
run.checkDir(t, "renamebla 9")
|
||||
|
||||
|
||||
@@ -34,6 +34,11 @@ func osCreate(name string) (*os.File, error) {
|
||||
return os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
}
|
||||
|
||||
// os.Create with append
|
||||
func osAppend(name string) (*os.File, error) {
|
||||
return os.OpenFile(name, os.O_WRONLY|os.O_APPEND, 0666)
|
||||
}
|
||||
|
||||
// TestFileModTimeWithOpenWriters tests mod time on open files
|
||||
func TestFileModTimeWithOpenWriters(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
@@ -22,6 +23,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -78,6 +80,7 @@ func RunTests(t *testing.T, fn MountFn) {
|
||||
t.Run("TestWriteFileDoubleClose", TestWriteFileDoubleClose)
|
||||
t.Run("TestWriteFileFsync", TestWriteFileFsync)
|
||||
t.Run("TestWriteFileDup", TestWriteFileDup)
|
||||
t.Run("TestWriteFileAppend", TestWriteFileAppend)
|
||||
})
|
||||
log.Printf("Finished test run with cache mode %v (ok=%v)", cacheMode, ok)
|
||||
if !ok {
|
||||
@@ -344,9 +347,36 @@ func (r *Run) waitForWriters() {
|
||||
run.vfs.WaitForWriters(10 * time.Second)
|
||||
}
|
||||
|
||||
// writeFile writes data to a file named by filename.
|
||||
// If the file does not exist, WriteFile creates it with permissions perm;
|
||||
// otherwise writeFile truncates it before writing.
|
||||
// If there is an error writing then writeFile
|
||||
// deletes it an existing file and tries again.
|
||||
func writeFile(filename string, data []byte, perm os.FileMode) error {
|
||||
f, err := file.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
|
||||
if err != nil {
|
||||
err = os.Remove(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f, err = file.OpenFile(filename, os.O_WRONLY|os.O_CREATE, perm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
n, err := f.Write(data)
|
||||
if err == nil && n < len(data) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
if err1 := f.Close(); err == nil {
|
||||
err = err1
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *Run) createFile(t *testing.T, filepath string, contents string) {
|
||||
filepath = r.path(filepath)
|
||||
err := ioutil.WriteFile(filepath, []byte(contents), 0600)
|
||||
err := writeFile(filepath, []byte(contents), 0600)
|
||||
require.NoError(t, err)
|
||||
r.waitForWriters()
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package mounttest
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -130,3 +131,48 @@ func TestWriteFileDup(t *testing.T) {
|
||||
run.waitForWriters()
|
||||
run.rm(t, "to be synced")
|
||||
}
|
||||
|
||||
// TestWriteFileAppend tests that O_APPEND works on cache backends >= writes
|
||||
func TestWriteFileAppend(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
if run.vfs.Opt.CacheMode < vfs.CacheModeWrites {
|
||||
t.Skip("not supported on vfs-cache-mode < writes")
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: Windows needs the v1.5 release of WinFsp to handle O_APPEND properly.
|
||||
// Until it gets released, skip this test on Windows.
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("currently unsupported on Windows")
|
||||
}
|
||||
|
||||
filepath := run.path("to be synced")
|
||||
fh, err := osCreate(filepath)
|
||||
require.NoError(t, err)
|
||||
|
||||
testData := []byte("0123456789")
|
||||
appendData := []byte("10")
|
||||
|
||||
_, err = fh.Write(testData)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fh.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
fh, err = osAppend(filepath)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = fh.Write(appendData)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fh.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := os.Stat(filepath)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, len(testData)+len(appendData), info.Size())
|
||||
|
||||
run.waitForWriters()
|
||||
run.rm(t, "to be synced")
|
||||
}
|
||||
|
||||
@@ -214,7 +214,7 @@ func withHeader(name string, value string, next http.Handler) http.Handler {
|
||||
|
||||
// serveError returns an http.StatusInternalServerError and logs the error
|
||||
func serveError(what interface{}, w http.ResponseWriter, text string, err error) {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(what, "%s: %v", text, err)
|
||||
http.Error(w, text+".", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
ftp "github.com/goftp/server"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy"
|
||||
@@ -29,6 +28,7 @@ import (
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
ftp "goftp.io/server"
|
||||
)
|
||||
|
||||
// Options contains options for the http Server
|
||||
@@ -155,7 +155,7 @@ func newServer(f fs.Fs, opt *Options) (*server, error) {
|
||||
PassivePorts: opt.PassivePorts,
|
||||
Auth: s, // implemented by CheckPasswd method
|
||||
Logger: &Logger{},
|
||||
//TODO implement a maximum of https://godoc.org/github.com/goftp/server#ServerOpts
|
||||
//TODO implement a maximum of https://godoc.org/goftp.io/server#ServerOpts
|
||||
}
|
||||
s.srv = ftp.NewServer(ftpopt)
|
||||
return s, nil
|
||||
@@ -210,8 +210,8 @@ func (l *Logger) PrintResponse(sessionID string, code int, message string) {
|
||||
// CheckPassword is called with the connection.
|
||||
func findID(callerName []byte) (string, error) {
|
||||
// Dump the stack in this format
|
||||
// github.com/rclone/rclone/vendor/github.com/goftp/server.(*Conn).Serve(0xc0000b2680)
|
||||
// /home/ncw/go/src/github.com/rclone/rclone/vendor/github.com/goftp/server/conn.go:116 +0x11d
|
||||
// github.com/rclone/rclone/vendor/goftp.io/server.(*Conn).Serve(0xc0000b2680)
|
||||
// /home/ncw/go/src/github.com/rclone/rclone/vendor/goftp.io/server/conn.go:116 +0x11d
|
||||
buf := make([]byte, 4096)
|
||||
n := runtime.Stack(buf, false)
|
||||
buf = buf[:n]
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
ftp "github.com/goftp/server"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/cmd/serve/servetest"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -19,6 +18,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
ftp "goftp.io/server"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -68,7 +68,7 @@ func (d *Directory) AddEntry(remote string, isDir bool) {
|
||||
|
||||
// Error logs the error and if a ResponseWriter is given it writes a http.StatusInternalServerError
|
||||
func Error(what interface{}, w http.ResponseWriter, text string, err error) {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(what, "%s: %v", text, err)
|
||||
if w != nil {
|
||||
http.Error(w, text+".", http.StatusInternalServerError)
|
||||
|
||||
@@ -208,7 +208,10 @@ func (p *Proxy) call(user, pass string, passwordBytes []byte) (value interface{}
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
pwHash, err := bcrypt.GenerateFromPassword(passwordBytes, bcrypt.DefaultCost)
|
||||
// The bcrypt cost is a compromise between security and speed. The password is looked up on every
|
||||
// transaction for WebDAV so we store it lightly hashed. An attacker would find it easier to go after
|
||||
// the unencrypted password in memory most likely.
|
||||
pwHash, err := bcrypt.GenerateFromPassword(passwordBytes, bcrypt.MinCost)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
@@ -271,7 +271,7 @@ func (s *server) postObject(w http.ResponseWriter, r *http.Request, remote strin
|
||||
|
||||
_, err := operations.RcatSize(r.Context(), s.f, remote, r.Body, r.ContentLength, time.Now())
|
||||
if err != nil {
|
||||
accounting.Stats(r.Context()).Error(err)
|
||||
err = accounting.Stats(r.Context()).Error(err)
|
||||
fs.Errorf(remote, "Post request rcat error: %v", err)
|
||||
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
|
||||
|
||||
|
||||
@@ -192,7 +192,7 @@ Contributors
|
||||
* Sheldon Rupp <me@shel.io>
|
||||
* albertony <12441419+albertony@users.noreply.github.com>
|
||||
* cron410 <cron410@gmail.com>
|
||||
* Anagh Kumar Baranwal <anaghk.dos@gmail.com>
|
||||
* Anagh Kumar Baranwal <anaghk.dos@gmail.com> <6824881+darthShadow@users.noreply.github.com>
|
||||
* Felix Brucker <felix@felixbrucker.com>
|
||||
* Santiago Rodríguez <scollazo@users.noreply.github.com>
|
||||
* Craig Miskell <craig.miskell@fluxfederation.com>
|
||||
@@ -263,7 +263,7 @@ Contributors
|
||||
* garry415 <garry.415@gmail.com>
|
||||
* forgems <forgems@gmail.com>
|
||||
* Florian Apolloner <florian@apolloner.eu>
|
||||
* Aleksandar Jankovic <office@ajankovic.com>
|
||||
* Aleksandar Janković <office@ajankovic.com> <ajankovic@users.noreply.github.com>
|
||||
* Maran <maran@protonmail.com>
|
||||
* nguyenhuuluan434 <nguyenhuuluan434@gmail.com>
|
||||
* Laura Hausmann <zotan@zotan.pw> <laura@hausmann.dev>
|
||||
@@ -306,3 +306,18 @@ Contributors
|
||||
* Carlos Ferreyra <crypticmind@gmail.com>
|
||||
* Saksham Khanna <sakshamkhanna@outlook.com>
|
||||
* dausruddin <5763466+dausruddin@users.noreply.github.com>
|
||||
* zero-24 <zero-24@users.noreply.github.com>
|
||||
* Xiaoxing Ye <ye@xiaoxing.us>
|
||||
* Barry Muldrey <barry@muldrey.net>
|
||||
* Sebastian Brandt <sebastian.brandt@friday.de>
|
||||
* Marco Molteni <marco.molteni@mailbox.org>
|
||||
* Ankur Gupta <ankur0493@gmail.com>
|
||||
* Maciej Zimnoch <maciej@scylladb.com>
|
||||
* anuar45 <serdaliyev.anuar@gmail.com>
|
||||
* Fernando <ferferga@users.noreply.github.com>
|
||||
* David Cole <david.cole@sohonet.com>
|
||||
* Wei He <git@weispot.com>
|
||||
* Outvi V <19144373+outloudvi@users.noreply.github.com>
|
||||
* Thomas Kriechbaumer <thomas@kriechbaumer.name>
|
||||
* Tennix <tennix@users.noreply.github.com>
|
||||
* Ole Schütt <ole@schuett.name>
|
||||
|
||||
@@ -1,11 +1,30 @@
|
||||
---
|
||||
title: "Documentation"
|
||||
description: "Rclone Changelog"
|
||||
date: "2019-10-26"
|
||||
date: "2019-11-19"
|
||||
---
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.50.2 - 2019-11-19
|
||||
|
||||
* Bug Fixes
|
||||
* accounting: Fix memory leak on retries operations (Nick Craig-Wood)
|
||||
* Drive
|
||||
* Fix listing of the root directory with drive.files scope (Nick Craig-Wood)
|
||||
* Fix --drive-root-folder-id with team/shared drives (Nick Craig-Wood)
|
||||
|
||||
## v1.50.1 - 2019-11-02
|
||||
|
||||
* Bug Fixes
|
||||
* hash: Fix accidentally changed hash names for `DropboxHash` and `CRC-32` (Nick Craig-Wood)
|
||||
* fshttp: Fix error reporting on tpslimit token bucket errors (Nick Craig-Wood)
|
||||
* fshttp: Don't print token bucket errors on context cancelled (Nick Craig-Wood)
|
||||
* Local
|
||||
* Fix listings of . on Windows (Nick Craig-Wood)
|
||||
* Onedrive
|
||||
* Fix DirMove/Move after Onedrive change (Xiaoxing Ye)
|
||||
|
||||
## v1.50.0 - 2019-10-26
|
||||
|
||||
* New backends
|
||||
|
||||
@@ -130,10 +130,10 @@ error message in such cases.
|
||||
|
||||
#### Chunk names
|
||||
|
||||
The default chunk name format is `*.rclone-chunk.###`, hence by default
|
||||
chunk names are `BIG_FILE_NAME.rclone-chunk.001`,
|
||||
`BIG_FILE_NAME.rclone-chunk.002` etc. You can configure a different name
|
||||
format using the `--chunker-name-format` option. The format uses asterisk
|
||||
The default chunk name format is `*.rclone_chunk.###`, hence by default
|
||||
chunk names are `BIG_FILE_NAME.rclone_chunk.001`,
|
||||
`BIG_FILE_NAME.rclone_chunk.002` etc. You can configure another name format
|
||||
using the `name_format` configuration file option. The format uses asterisk
|
||||
`*` as a placeholder for the base file name and one or more consecutive
|
||||
hash characters `#` as a placeholder for sequential chunk number.
|
||||
There must be one and only one asterisk. The number of consecutive hash
|
||||
@@ -211,6 +211,9 @@ file hashing, configure chunker with `md5all` or `sha1all`. These two modes
|
||||
guarantee given hash for all files. If wrapped remote doesn't support it,
|
||||
chunker will then add metadata to all files, even small. However, this can
|
||||
double the amount of small files in storage and incur additional service charges.
|
||||
You can even use chunker to force md5/sha1 support in any other remote
|
||||
at expence of sidecar meta objects by setting eg. `chunk_type=sha1all`
|
||||
to force hashsums and `chunk_size=1P` to effectively disable chunking.
|
||||
|
||||
Normally, when a file is copied to chunker controlled remote, chunker
|
||||
will ask the file source for compatible file hash and revert to on-the-fly
|
||||
@@ -274,6 +277,14 @@ Chunker requires wrapped remote to support server side `move` (or `copy` +
|
||||
This is because it internally renames temporary chunk files to their final
|
||||
names when an operation completes successfully.
|
||||
|
||||
Chunker encodes chunk number in file name, so with default `name_format`
|
||||
setting it adds 17 characters. Also chunker adds 7 characters of temporary
|
||||
suffix during operations. Many file systems limit base file name without path
|
||||
by 255 characters. Using rclone's crypt remote as a base file system limits
|
||||
file name by 143 characters. Thus, maximum name length is 231 for most files
|
||||
and 119 for chunker-over-crypt. A user in need can change name format to
|
||||
eg. `*.rcc##` and save 10 characters (provided at most 99 chunks per file).
|
||||
|
||||
Note that a move implemented using the copy-and-delete method may incur
|
||||
double charging with some cloud storage providers.
|
||||
|
||||
|
||||
@@ -22,7 +22,8 @@ rclone authorize [flags]
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for authorize
|
||||
--auth-no-open-browser Do not automatically open auth link in default browser
|
||||
-h, --help help for authorize
|
||||
```
|
||||
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
@@ -65,6 +65,28 @@ infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Archit
|
||||
which creates drives accessible for everyone on the system or
|
||||
alternatively using [the nssm service manager](https://nssm.cc/usage).
|
||||
|
||||
#### Mount as a network drive
|
||||
|
||||
By default, rclone will mount the remote as a normal drive. However, you can also mount it as a **Network Drive**
|
||||
(or **Network Share**, as mentioned in some places)
|
||||
|
||||
Unlike other systems, Windows provides a different filesystem type for network drives.
|
||||
Windows and other programs treat the network drives and fixed/removable drives differently:
|
||||
In network drives, many I/O operations are optimized, as the high latency and low reliability
|
||||
(compared to a normal drive) of a network is expected.
|
||||
|
||||
Although many people prefer network shares to be mounted as normal system drives, this might cause
|
||||
some issues, such as programs not working as expected or freezes and errors while operating with the
|
||||
mounted remote in Windows Explorer. If you experience any of those, consider mounting rclone remotes as network shares,
|
||||
as Windows expects normal drives to be fast and reliable, while cloud storage is far from that.
|
||||
See also [Limitations](#limitations) section below for more info
|
||||
|
||||
Add `--fuse-flag --VolumePrefix=\server\share` to your `mount` command, **replacing `share` with any other
|
||||
name of your choice if you are mounting more than one remote**. Otherwise, the mountpoints will conflict and
|
||||
your mounted filesystems will overlap.
|
||||
|
||||
[Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping)
|
||||
|
||||
### Limitations
|
||||
|
||||
Without the use of "--vfs-cache-mode" this can only write files
|
||||
|
||||
@@ -37,7 +37,7 @@ See the following for detailed instructions for
|
||||
* [Google Photos](/googlephotos/)
|
||||
* [HTTP](/http/)
|
||||
* [Hubic](/hubic/)
|
||||
* [Jottacloud](/jottacloud/)
|
||||
* [Jottacloud / GetSky.no](/jottacloud/)
|
||||
* [Koofr](/koofr/)
|
||||
* [Mail.ru Cloud](/mailru/)
|
||||
* [Mega](/mega/)
|
||||
@@ -770,6 +770,23 @@ in effect (the defaults):
|
||||
- 500MB..750MB files will be downloaded with 3 streams
|
||||
- 750MB+ files will be downloaded with 4 streams
|
||||
|
||||
### --no-check-dest ###
|
||||
|
||||
The `--no-check-dest` can be used with `move` or `copy` and it causes
|
||||
rclone not to check the destination at all when copying files.
|
||||
|
||||
This means that:
|
||||
|
||||
- the destination is not listed minimising the API calls
|
||||
- files are always transferred
|
||||
- this can cause duplicates on remotes which allow it (eg Google Drive)
|
||||
- `--retries 1` is recommended otherwise you'll transfer everything again on a retry
|
||||
|
||||
This flag is useful to minimise the transactions if you know that none
|
||||
of the files are on the destination.
|
||||
|
||||
This is a specialized flag which should be ignored by most users!
|
||||
|
||||
### --no-gzip-encoding ###
|
||||
|
||||
Don't set `Accept-Encoding: gzip`. This means that rclone won't ask
|
||||
|
||||
@@ -191,3 +191,22 @@ If you have more than 10,000 files in a directory then `rclone purge
|
||||
dropbox:dir` will return the error `Failed to purge: There are too
|
||||
many files involved in this operation`. As a work-around do an
|
||||
`rclone delete dropbox:dir` followed by an `rclone rmdir dropbox:dir`.
|
||||
|
||||
### Get your own Dropbox App ID ###
|
||||
|
||||
When you use rclone with Dropbox in its default configuration you are using rclone's App ID. This is shared between all the rclone users.
|
||||
|
||||
Here is how to create your own Dropbox App ID for rclone:
|
||||
|
||||
1. Log into the [Dropbox App console](https://www.dropbox.com/developers/apps/create) with your Dropbox Account (It need not
|
||||
to be the same account as the Dropbox you want to access)
|
||||
|
||||
2. Choose an API => Usually this should be `Dropbox API`
|
||||
|
||||
3. Choose the type of access you want to use => Full Dropbox or App Folder
|
||||
|
||||
4. Name your App
|
||||
|
||||
5. Click the button `Create App`
|
||||
|
||||
6. Find the `App key` and `App secret` Use these values in rclone config to add a new remote or edit an existing remote.
|
||||
|
||||
@@ -99,7 +99,7 @@ Or instead of htpassword if you just want a single user and password:
|
||||
|
||||
The GUI is being developed in the: [rclone/rclone-webui-react respository](https://github.com/rclone/rclone-webui-react).
|
||||
|
||||
Bug reports and contributions very welcome welcome :-)
|
||||
Bug reports and contributions are very welcome :-)
|
||||
|
||||
If you have questions then please ask them on the [rclone forum](https://forum.rclone.org/).
|
||||
|
||||
|
||||
@@ -56,7 +56,14 @@ Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
|
||||
|
||||
rclone config
|
||||
|
||||
## macOS installation from precompiled binary ##
|
||||
## macOS installation with brew ##
|
||||
|
||||
brew install rclone
|
||||
|
||||
## macOS installation from precompiled binary, using curl ##
|
||||
|
||||
To avoid problems with macOS gatekeeper enforcing the binary to be signed and
|
||||
notarized it is enough to download with `curl`.
|
||||
|
||||
Download the latest version of rclone.
|
||||
|
||||
@@ -81,6 +88,19 @@ Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
|
||||
|
||||
rclone config
|
||||
|
||||
## macOS installation from precompiled binary, using a web browser ##
|
||||
|
||||
When downloading a binary with a web browser, the browser will set the macOS
|
||||
gatekeeper quarantine attribute. Starting from Catalina, when attempting to run
|
||||
`rclone`, a pop-up will appear saying:
|
||||
|
||||
“rclone” cannot be opened because the developer cannot be verified.
|
||||
macOS cannot verify that this app is free from malware.
|
||||
|
||||
The simplest fix is to run
|
||||
|
||||
xattr -d com.apple.quarantine rclone
|
||||
|
||||
## Install with docker ##
|
||||
|
||||
The rclone maintains a [docker image for rclone](https://hub.docker.com/r/rclone/rclone).
|
||||
|
||||
@@ -11,7 +11,7 @@ Paths are specified as `remote:path`
|
||||
|
||||
Paths may be as deep as required, eg `remote:directory/subdirectory`.
|
||||
|
||||
To configure Jottacloud you will need to enter your username and password and select a mountpoint.
|
||||
To configure Jottacloud you will need to generate a personal security token in the Jottacloud web inteface. You will the option to do in your [account security settings](https://www.jottacloud.com/web/secure). Note that the web inteface may refer to this token as a JottaCli token.
|
||||
|
||||
Here is an example of how to make a remote called `remote`. First run:
|
||||
|
||||
@@ -42,16 +42,8 @@ n) No
|
||||
y/n> n
|
||||
Remote config
|
||||
|
||||
Do you want to create a machine specific API key?
|
||||
|
||||
Rclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.
|
||||
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
Username> 0xC4KE@gmail.com
|
||||
Your Jottacloud password is only required during setup and will not be stored.
|
||||
password:
|
||||
Generate a personal login token here: https://www.jottacloud.com/web/secure
|
||||
Login Token> <your token here>
|
||||
|
||||
Do you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?
|
||||
|
||||
@@ -74,11 +66,10 @@ Mountpoints> 1
|
||||
[jotta]
|
||||
type = jottacloud
|
||||
user = 0xC4KE@gmail.com
|
||||
client_id = .....
|
||||
client_secret = ........
|
||||
token = {........}
|
||||
device = Jotta
|
||||
mountpoint = Archive
|
||||
configVersion = 1
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
@@ -102,7 +93,7 @@ To copy a local directory to an Jottacloud directory called backup
|
||||
### Devices and Mountpoints ###
|
||||
|
||||
The official Jottacloud client registers a device for each computer you install it on and then creates a mountpoint for each folder you select for Backup.
|
||||
The web interface uses a special device called Jotta for the Archive, Sync and Shared mountpoints. In most cases you'll want to use the Jotta/Archive device/mounpoint however if you want to access files uploaded by the official rclone provides the option to select other devices and mountpoints during config.
|
||||
The web interface uses a special device called Jotta for the Archive, Sync and Shared mountpoints. In most cases you'll want to use the Jotta/Archive device/mounpoint however if you want to access files uploaded by any of the official clients rclone provides the option to select other devices and mountpoints during config.
|
||||
|
||||
### --fast-list ###
|
||||
|
||||
|
||||
@@ -340,6 +340,7 @@ Authentication is required for this call.
|
||||
### config/get: Get a remote in the config file. {#config/get}
|
||||
|
||||
Parameters:
|
||||
|
||||
- name - name of remote to get
|
||||
|
||||
See the [config dump command](/commands/rclone_config_dump/) command for more information on the above.
|
||||
@@ -482,6 +483,7 @@ If group is not provided then summed up stats for all groups will be
|
||||
returned.
|
||||
|
||||
Parameters
|
||||
|
||||
- group - name of the stats group (string)
|
||||
|
||||
Returns the following values:
|
||||
@@ -519,12 +521,12 @@ The value for "eta" is null if an eta cannot be determined.
|
||||
|
||||
### core/stats-reset: Reset stats. {#core/stats-reset}
|
||||
|
||||
This clears counters and errors for all stats or specific stats group if group
|
||||
is provided.
|
||||
This clears counters, errors and finished transfers for all stats or specific
|
||||
stats group if group is provided.
|
||||
|
||||
Parameters
|
||||
|
||||
- group - name of the stats group (string)
|
||||
```
|
||||
|
||||
### core/transferred: Returns stats about completed transfers. {#core/transferred}
|
||||
|
||||
@@ -538,6 +540,7 @@ returned.
|
||||
Note only the last 100 completed transfers are returned.
|
||||
|
||||
Parameters
|
||||
|
||||
- group - name of the stats group (string)
|
||||
|
||||
Returns the following values:
|
||||
@@ -561,6 +564,7 @@ Returns the following values:
|
||||
### core/version: Shows the current version of rclone and the go runtime. {#core/version}
|
||||
|
||||
This shows the current version of go and the go runtime
|
||||
|
||||
- version - rclone version, eg "v1.44"
|
||||
- decomposed - version number as [major, minor, patch, subpatch]
|
||||
- note patch and subpatch will be 999 for a git compiled version
|
||||
@@ -569,19 +573,60 @@ This shows the current version of go and the go runtime
|
||||
- arch - cpu architecture in use according to Go
|
||||
- goVersion - version of Go runtime in use
|
||||
|
||||
### debug/set-block-profile-rate: Set runtime.SetBlockProfileRate for blocking profiling. {#debug/set-block-profile-rate}
|
||||
|
||||
SetBlockProfileRate controls the fraction of goroutine blocking events
|
||||
that are reported in the blocking profile. The profiler aims to sample
|
||||
an average of one blocking event per rate nanoseconds spent blocked.
|
||||
|
||||
To include every blocking event in the profile, pass rate = 1. To turn
|
||||
off profiling entirely, pass rate <= 0.
|
||||
|
||||
After calling this you can use this to see the blocking profile:
|
||||
|
||||
go tool pprof http://localhost:5572/debug/pprof/block
|
||||
|
||||
Parameters
|
||||
|
||||
- rate - int
|
||||
|
||||
### debug/set-mutex-profile-fraction: Set runtime.SetMutexProfileFraction for mutex profiling. {#debug/set-mutex-profile-fraction}
|
||||
|
||||
SetMutexProfileFraction controls the fraction of mutex contention
|
||||
events that are reported in the mutex profile. On average 1/rate
|
||||
events are reported. The previous rate is returned.
|
||||
|
||||
To turn off profiling entirely, pass rate 0. To just read the current
|
||||
rate, pass rate < 0. (For n>1 the details of sampling may change.)
|
||||
|
||||
Once this is set you can look use this to profile the mutex contention:
|
||||
|
||||
go tool pprof http://localhost:5572/debug/pprof/mutex
|
||||
|
||||
Parameters
|
||||
|
||||
- rate - int
|
||||
|
||||
Results
|
||||
|
||||
- previousRate - int
|
||||
|
||||
### job/list: Lists the IDs of the running jobs {#job/list}
|
||||
|
||||
Parameters - None
|
||||
|
||||
Results
|
||||
|
||||
- jobids - array of integer job ids
|
||||
|
||||
### job/status: Reads the status of the job ID {#job/status}
|
||||
|
||||
Parameters
|
||||
|
||||
- jobid - id of the job (integer)
|
||||
|
||||
Results
|
||||
|
||||
- finished - boolean
|
||||
- duration - time in seconds that the job ran for
|
||||
- endTime - time the job finished (eg "2018-10-26T18:50:20.528746884+01:00")
|
||||
@@ -596,6 +641,7 @@ Results
|
||||
### job/stop: Stop the running job {#job/stop}
|
||||
|
||||
Parameters
|
||||
|
||||
- jobid - id of the job (integer)
|
||||
|
||||
### operations/about: Return the space used on the remote {#operations/about}
|
||||
@@ -1189,13 +1235,20 @@ You can see a summary of profiles available at http://localhost:5572/debug/pprof
|
||||
|
||||
Here is how to use some of them:
|
||||
|
||||
* Memory: `go tool pprof http://localhost:5572/debug/pprof/heap`
|
||||
* Go routines: `curl http://localhost:5572/debug/pprof/goroutine?debug=1`
|
||||
* 30-second CPU profile: `go tool pprof http://localhost:5572/debug/pprof/profile`
|
||||
* 5-second execution trace: `wget http://localhost:5572/debug/pprof/trace?seconds=5`
|
||||
- Memory: `go tool pprof http://localhost:5572/debug/pprof/heap`
|
||||
- Go routines: `curl http://localhost:5572/debug/pprof/goroutine?debug=1`
|
||||
- 30-second CPU profile: `go tool pprof http://localhost:5572/debug/pprof/profile`
|
||||
- 5-second execution trace: `wget http://localhost:5572/debug/pprof/trace?seconds=5`
|
||||
- Goroutine blocking profile
|
||||
- Enable first with: `rclone rc debug/set-block-profile-rate rate=1` ([docs](#debug/set-block-profile-rate))
|
||||
- `go tool pprof http://localhost:5572/debug/pprof/block`
|
||||
- Contended mutexes:
|
||||
- Enable first with: `rclone rc debug/set-mutex-profile-fraction rate=1` ([docs](#debug/set-mutex-profile-fraction))
|
||||
- `go tool pprof http://localhost:5572/debug/pprof/mutex`
|
||||
|
||||
See the [net/http/pprof docs](https://golang.org/pkg/net/http/pprof/)
|
||||
for more info on how to use the profiling and for a general overview
|
||||
see [the Go team's blog post on profiling go programs](https://blog.golang.org/profiling-go-programs).
|
||||
|
||||
The profiling hook is [zero overhead unless it is used](https://stackoverflow.com/q/26545159/164234).
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ The S3 backend can be used with a number of different providers:
|
||||
* {{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
|
||||
* {{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
|
||||
* {{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
|
||||
* {{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}}
|
||||
* {{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
|
||||
|
||||
Paths are specified as `remote:bucket` (or `remote:` for the `lsd`
|
||||
@@ -135,8 +136,11 @@ Choose a number from below, or type in your own value
|
||||
/ Asia Pacific (Mumbai)
|
||||
13 | Needs location constraint ap-south-1.
|
||||
\ "ap-south-1"
|
||||
/ Asia Patific (Hong Kong) Region
|
||||
14 | Needs location constraint ap-east-1.
|
||||
\ "ap-east-1"
|
||||
/ South America (Sao Paulo) Region
|
||||
14 | Needs location constraint sa-east-1.
|
||||
15 | Needs location constraint sa-east-1.
|
||||
\ "sa-east-1"
|
||||
region> 1
|
||||
Endpoint for S3 API.
|
||||
@@ -170,7 +174,9 @@ Choose a number from below, or type in your own value
|
||||
\ "ap-northeast-2"
|
||||
13 / Asia Pacific (Mumbai)
|
||||
\ "ap-south-1"
|
||||
14 / South America (Sao Paulo) Region.
|
||||
14 / Asia Pacific (Hong Kong)
|
||||
\ "ap-east-1"
|
||||
15 / South America (Sao Paulo) Region.
|
||||
\ "sa-east-1"
|
||||
location_constraint> 1
|
||||
Canned ACL used when creating buckets and/or storing objects in S3.
|
||||
@@ -264,8 +270,8 @@ The modified time is stored as metadata on the object as
|
||||
`X-Amz-Meta-Mtime` as floating point since the epoch accurate to 1 ns.
|
||||
|
||||
If the modification time needs to be updated rclone will attempt to perform a server
|
||||
side copy to update the modification if the object can be copied in a single part.
|
||||
In the case the object is larger than 5Gb or is in Glacier or Glacier Deep Archive
|
||||
side copy to update the modification if the object can be copied in a single part.
|
||||
In the case the object is larger than 5Gb or is in Glacier or Glacier Deep Archive
|
||||
storage the object will be uploaded rather than copied.
|
||||
|
||||
#### Restricted filename characters
|
||||
@@ -352,6 +358,7 @@ The different authentication methods are tried in this order:
|
||||
- `AWS_PROFILE` to control which profile to use.
|
||||
- Or, run `rclone` in an ECS task with an IAM role (AWS only).
|
||||
- Or, run `rclone` on an EC2 instance with an IAM role (AWS only).
|
||||
- Or, run `rclone` in an EKS pod with an IAM role that is associated with a service account (AWS only).
|
||||
|
||||
If none of these option actually end up providing `rclone` with AWS
|
||||
credentials then S3 interaction will be non-authenticated (see below).
|
||||
@@ -555,6 +562,9 @@ Region to connect to.
|
||||
- "ap-south-1"
|
||||
- Asia Pacific (Mumbai)
|
||||
- Needs location constraint ap-south-1.
|
||||
- "ap-east-1"
|
||||
- Asia Pacific (Hong Kong) Region
|
||||
- Needs location constraint ap-east-1.
|
||||
- "sa-east-1"
|
||||
- South America (Sao Paulo) Region
|
||||
- Needs location constraint sa-east-1.
|
||||
@@ -774,6 +784,8 @@ Used when creating buckets only.
|
||||
- Asia Pacific (Seoul)
|
||||
- "ap-south-1"
|
||||
- Asia Pacific (Mumbai)
|
||||
- "ap-east-1"
|
||||
- Asia Pacific (Hong Kong)
|
||||
- "sa-east-1"
|
||||
- South America (Sao Paulo) Region.
|
||||
|
||||
@@ -1123,6 +1135,21 @@ WARNING: Storing parts of an incomplete multipart upload counts towards space us
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --s3-list-chunk
|
||||
|
||||
Size of listing chunk (response list for each ListObject S3 request).
|
||||
|
||||
This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification.
|
||||
Most services truncate the response list to 1000 objects even if requested more than that.
|
||||
In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html).
|
||||
In Ceph, this can be increased with the "rgw list buckets max chunk" option.
|
||||
|
||||
|
||||
- Config: list_chunk
|
||||
- Env Var: RCLONE_S3_LIST_CHUNK
|
||||
- Type: int
|
||||
- Default: 1000
|
||||
|
||||
<!--- autogenerated options stop -->
|
||||
|
||||
### Anonymous access to public buckets ###
|
||||
|
||||
6
docs/static/css/custom.css
vendored
6
docs/static/css/custom.css
vendored
@@ -43,7 +43,11 @@ h3:hover .header-link,
|
||||
h4:hover .header-link,
|
||||
h5:hover .header-link,
|
||||
h6:hover .header-link {
|
||||
opacity: 1;
|
||||
opacity: 1;
|
||||
}
|
||||
h2, h3, h4, h5, h6 {
|
||||
padding-top: 55px;
|
||||
margin-top: -44px;
|
||||
}
|
||||
|
||||
/* Fix spacing between menu items */
|
||||
|
||||
@@ -70,6 +70,10 @@ func newAccountSizeName(stats *StatsInfo, in io.ReadCloser, size int64, name str
|
||||
|
||||
// WithBuffer - If the file is above a certain size it adds an Async reader
|
||||
func (acc *Account) WithBuffer() *Account {
|
||||
// if already have a buffer then just return
|
||||
if acc.withBuf {
|
||||
return acc
|
||||
}
|
||||
acc.withBuf = true
|
||||
var buffers int
|
||||
if acc.size >= int64(fs.Config.BufferSize) || acc.size == -1 {
|
||||
@@ -118,14 +122,16 @@ func (acc *Account) StopBuffering() {
|
||||
// async buffer (if any) and re-adding it
|
||||
func (acc *Account) UpdateReader(in io.ReadCloser) {
|
||||
acc.mu.Lock()
|
||||
if acc.withBuf {
|
||||
withBuf := acc.withBuf
|
||||
if withBuf {
|
||||
acc.StopBuffering()
|
||||
acc.withBuf = false
|
||||
}
|
||||
acc.in = in
|
||||
acc.close = in
|
||||
acc.origIn = in
|
||||
acc.closed = false
|
||||
if acc.withBuf {
|
||||
if withBuf {
|
||||
acc.WithBuffer()
|
||||
}
|
||||
acc.mu.Unlock()
|
||||
@@ -378,6 +384,7 @@ func (acc *Account) RemoteStats() (out rc.Params) {
|
||||
percentageDone = int(100 * float64(a) / float64(b))
|
||||
}
|
||||
out["percentage"] = percentageDone
|
||||
out["group"] = acc.stats.group
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -13,8 +13,8 @@ import (
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
)
|
||||
|
||||
// Maximum number of completed transfers in startedTransfers list
|
||||
const maxCompletedTransfers = 100
|
||||
// MaxCompletedTransfers specifies maximum number of completed transfers in startedTransfers list
|
||||
var MaxCompletedTransfers = 100
|
||||
|
||||
// StatsInfo accounts all transfers
|
||||
type StatsInfo struct {
|
||||
@@ -40,6 +40,7 @@ type StatsInfo struct {
|
||||
startedTransfers []*Transfer // currently active transfers
|
||||
oldTimeRanges timeRanges // a merged list of time ranges for the transfers
|
||||
oldDuration time.Duration // duration of transfers we have culled
|
||||
group string
|
||||
}
|
||||
|
||||
// NewStats creates an initialised StatsInfo
|
||||
@@ -291,7 +292,7 @@ func (s *StatsInfo) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s",
|
||||
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s\n",
|
||||
dateString,
|
||||
fs.SizeSuffix(s.bytes),
|
||||
fs.SizeSuffix(totalSize).Unit("Bytes"),
|
||||
@@ -312,16 +313,23 @@ func (s *StatsInfo) String() string {
|
||||
errorDetails = " (no need to retry)"
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(buf, `
|
||||
Errors: %10d%s
|
||||
Checks: %10d / %d, %s
|
||||
Transferred: %10d / %d, %s
|
||||
Elapsed time: %10v
|
||||
`,
|
||||
s.errors, errorDetails,
|
||||
s.checks, totalChecks, percent(s.checks, totalChecks),
|
||||
s.transfers, totalTransfer, percent(s.transfers, totalTransfer),
|
||||
dtRounded)
|
||||
// Add only non zero stats
|
||||
if s.errors != 0 {
|
||||
_, _ = fmt.Fprintf(buf, "Errors: %10d%s\n",
|
||||
s.errors, errorDetails)
|
||||
}
|
||||
if s.checks != 0 || totalChecks != 0 {
|
||||
_, _ = fmt.Fprintf(buf, "Checks: %10d / %d, %s\n",
|
||||
s.checks, totalChecks, percent(s.checks, totalChecks))
|
||||
}
|
||||
if s.deletes != 0 {
|
||||
_, _ = fmt.Fprintf(buf, "Deleted: %10d\n", s.deletes)
|
||||
}
|
||||
if s.transfers != 0 || totalTransfer != 0 {
|
||||
_, _ = fmt.Fprintf(buf, "Transferred: %10d / %d, %s\n",
|
||||
s.transfers, totalTransfer, percent(s.transfers, totalTransfer))
|
||||
}
|
||||
_, _ = fmt.Fprintf(buf, "Elapsed time: %10v\n", dtRounded)
|
||||
}
|
||||
|
||||
// checking and transferring have their own locking so unlock
|
||||
@@ -331,10 +339,10 @@ Elapsed time: %10v
|
||||
// Add per transfer stats if required
|
||||
if !fs.Config.StatsOneLine {
|
||||
if !s.checking.empty() {
|
||||
_, _ = fmt.Fprintf(buf, "Checking:\n%s\n", s.checking.String(s.inProgress))
|
||||
_, _ = fmt.Fprintf(buf, "Checking:\n%s\n", s.checking.String(s.inProgress, s.transferring))
|
||||
}
|
||||
if !s.transferring.empty() {
|
||||
_, _ = fmt.Fprintf(buf, "Transferring:\n%s\n", s.transferring.String(s.inProgress))
|
||||
_, _ = fmt.Fprintf(buf, "Transferring:\n%s\n", s.transferring.String(s.inProgress, nil))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -474,14 +482,16 @@ func (s *StatsInfo) Errored() bool {
|
||||
}
|
||||
|
||||
// Error adds a single error into the stats, assigns lastError and eventually sets fatalError or retryError
|
||||
func (s *StatsInfo) Error(err error) {
|
||||
if err == nil {
|
||||
return
|
||||
func (s *StatsInfo) Error(err error) error {
|
||||
if err == nil || fserrors.IsCounted(err) {
|
||||
return err
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.errors++
|
||||
s.lastError = err
|
||||
err = fserrors.FsError(err)
|
||||
fserrors.Count(err)
|
||||
switch {
|
||||
case fserrors.IsFatalError(err):
|
||||
s.fatalError = true
|
||||
@@ -494,6 +504,7 @@ func (s *StatsInfo) Error(err error) {
|
||||
case !fserrors.IsNoRetryError(err):
|
||||
s.retryError = true
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// RetryAfter returns the time to retry after if it is set. It will
|
||||
@@ -623,11 +634,15 @@ func (s *StatsInfo) RemoveTransfer(transfer *Transfer) {
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// PruneTransfers makes sure there aren't too many old transfers
|
||||
// PruneTransfers makes sure there aren't too many old transfers by removing
|
||||
// single finished transfer.
|
||||
func (s *StatsInfo) PruneTransfers() {
|
||||
if MaxCompletedTransfers < 0 {
|
||||
return
|
||||
}
|
||||
s.mu.Lock()
|
||||
// remove a transfer from the start if we are over quota
|
||||
if len(s.startedTransfers) > maxCompletedTransfers+fs.Config.Transfers {
|
||||
if len(s.startedTransfers) > MaxCompletedTransfers+fs.Config.Transfers {
|
||||
for i, tr := range s.startedTransfers {
|
||||
if tr.IsDone() {
|
||||
s.removeTransfer(tr, i)
|
||||
|
||||
@@ -13,7 +13,15 @@ const globalStats = "global_stats"
|
||||
|
||||
var groups *statsGroups
|
||||
|
||||
func listStats(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
func init() {
|
||||
// Init stats container
|
||||
groups = newStatsGroups()
|
||||
|
||||
// Set the function pointer up in fs
|
||||
fs.CountError = GlobalStats().Error
|
||||
}
|
||||
|
||||
func rcListStats(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
out := make(rc.Params)
|
||||
|
||||
out["groups"] = groups.names()
|
||||
@@ -21,7 +29,30 @@ func listStats(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func remoteStats(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
func init() {
|
||||
rc.Add(rc.Call{
|
||||
Path: "core/group-list",
|
||||
Fn: rcListStats,
|
||||
Title: "Returns list of stats.",
|
||||
Help: `
|
||||
This returns list of stats groups currently in memory.
|
||||
|
||||
Returns the following values:
|
||||
` + "```" + `
|
||||
{
|
||||
"groups": an array of group names:
|
||||
[
|
||||
"group1",
|
||||
"group2",
|
||||
...
|
||||
]
|
||||
}
|
||||
` + "```" + `
|
||||
`,
|
||||
})
|
||||
}
|
||||
|
||||
func rcRemoteStats(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
// Check to see if we should filter by group.
|
||||
group, err := in.GetString("group")
|
||||
if rc.NotErrParamNotFound(err) {
|
||||
@@ -34,50 +65,10 @@ func remoteStats(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
return groups.sum().RemoteStats()
|
||||
}
|
||||
|
||||
func transferredStats(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
// Check to see if we should filter by group.
|
||||
group, err := in.GetString("group")
|
||||
if rc.NotErrParamNotFound(err) {
|
||||
return rc.Params{}, err
|
||||
}
|
||||
|
||||
out := make(rc.Params)
|
||||
if group != "" {
|
||||
out["transferred"] = StatsGroup(group).Transferred()
|
||||
} else {
|
||||
out["transferred"] = groups.sum().Transferred()
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func resetStats(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
// Check to see if we should filter by group.
|
||||
group, err := in.GetString("group")
|
||||
if rc.NotErrParamNotFound(err) {
|
||||
return rc.Params{}, err
|
||||
}
|
||||
|
||||
if group != "" {
|
||||
groups.get(group).ResetCounters()
|
||||
groups.get(group).ResetErrors()
|
||||
} else {
|
||||
groups.clear()
|
||||
}
|
||||
|
||||
return rc.Params{}, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Init stats container
|
||||
groups = newStatsGroups()
|
||||
|
||||
// Set the function pointer up in fs
|
||||
fs.CountError = GlobalStats().Error
|
||||
|
||||
rc.Add(rc.Call{
|
||||
Path: "core/stats",
|
||||
Fn: remoteStats,
|
||||
Fn: rcRemoteStats,
|
||||
Title: "Returns stats about current transfers.",
|
||||
Help: `
|
||||
This returns all available stats:
|
||||
@@ -88,6 +79,7 @@ If group is not provided then summed up stats for all groups will be
|
||||
returned.
|
||||
|
||||
Parameters
|
||||
|
||||
- group - name of the stats group (string)
|
||||
|
||||
Returns the following values:
|
||||
@@ -124,10 +116,29 @@ Values for "transferring", "checking" and "lastError" are only assigned if data
|
||||
The value for "eta" is null if an eta cannot be determined.
|
||||
`,
|
||||
})
|
||||
}
|
||||
|
||||
func rcTransferredStats(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
// Check to see if we should filter by group.
|
||||
group, err := in.GetString("group")
|
||||
if rc.NotErrParamNotFound(err) {
|
||||
return rc.Params{}, err
|
||||
}
|
||||
|
||||
out := make(rc.Params)
|
||||
if group != "" {
|
||||
out["transferred"] = StatsGroup(group).Transferred()
|
||||
} else {
|
||||
out["transferred"] = groups.sum().Transferred()
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
rc.Add(rc.Call{
|
||||
Path: "core/transferred",
|
||||
Fn: transferredStats,
|
||||
Fn: rcTransferredStats,
|
||||
Title: "Returns stats about completed transfers.",
|
||||
Help: `
|
||||
This returns stats about completed transfers:
|
||||
@@ -140,6 +151,7 @@ returned.
|
||||
Note only the last 100 completed transfers are returned.
|
||||
|
||||
Parameters
|
||||
|
||||
- group - name of the stats group (string)
|
||||
|
||||
Returns the following values:
|
||||
@@ -161,39 +173,67 @@ Returns the following values:
|
||||
` + "```" + `
|
||||
`,
|
||||
})
|
||||
|
||||
rc.Add(rc.Call{
|
||||
Path: "core/group-list",
|
||||
Fn: listStats,
|
||||
Title: "Returns list of stats.",
|
||||
Help: `
|
||||
This returns list of stats groups currently in memory.
|
||||
|
||||
Returns the following values:
|
||||
` + "```" + `
|
||||
{
|
||||
"groups": an array of group names:
|
||||
[
|
||||
"group1",
|
||||
"group2",
|
||||
...
|
||||
]
|
||||
}
|
||||
` + "```" + `
|
||||
`,
|
||||
})
|
||||
|
||||
func rcResetStats(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
// Check to see if we should filter by group.
|
||||
group, err := in.GetString("group")
|
||||
if rc.NotErrParamNotFound(err) {
|
||||
return rc.Params{}, err
|
||||
}
|
||||
|
||||
if group != "" {
|
||||
stats := groups.get(group)
|
||||
stats.ResetErrors()
|
||||
stats.ResetCounters()
|
||||
} else {
|
||||
groups.reset()
|
||||
}
|
||||
|
||||
return rc.Params{}, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
rc.Add(rc.Call{
|
||||
Path: "core/stats-reset",
|
||||
Fn: resetStats,
|
||||
Fn: rcResetStats,
|
||||
Title: "Reset stats.",
|
||||
Help: `
|
||||
This clears counters and errors for all stats or specific stats group if group
|
||||
is provided.
|
||||
This clears counters, errors and finished transfers for all stats or specific
|
||||
stats group if group is provided.
|
||||
|
||||
Parameters
|
||||
|
||||
- group - name of the stats group (string)
|
||||
`,
|
||||
})
|
||||
}
|
||||
|
||||
func rcDeleteStats(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
// Group name required because we only do single group.
|
||||
group, err := in.GetString("group")
|
||||
if rc.NotErrParamNotFound(err) {
|
||||
return rc.Params{}, err
|
||||
}
|
||||
|
||||
if group != "" {
|
||||
groups.delete(group)
|
||||
}
|
||||
|
||||
return rc.Params{}, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
rc.Add(rc.Call{
|
||||
Path: "core/stats-delete",
|
||||
Fn: rcDeleteStats,
|
||||
Title: "Delete stats group.",
|
||||
Help: `
|
||||
This deletes entire stats group
|
||||
|
||||
Parameters
|
||||
|
||||
- group - name of the stats group (string)
|
||||
` + "```" + `
|
||||
`,
|
||||
})
|
||||
}
|
||||
@@ -243,6 +283,7 @@ func GlobalStats() *StatsInfo {
|
||||
// NewStatsGroup creates new stats under named group.
|
||||
func NewStatsGroup(group string) *StatsInfo {
|
||||
stats := NewStats()
|
||||
stats.group = group
|
||||
groups.set(group, stats)
|
||||
return stats
|
||||
}
|
||||
@@ -269,13 +310,13 @@ func (sg *statsGroups) set(group string, stats *StatsInfo) {
|
||||
// Limit number of groups kept in memory.
|
||||
if len(sg.order) >= fs.Config.MaxStatsGroups {
|
||||
group := sg.order[0]
|
||||
fs.LogPrintf(fs.LogLevelInfo, nil, "Max number of stats groups reached removing %s", group)
|
||||
//fs.LogPrintf(fs.LogLevelInfo, nil, "Max number of stats groups reached removing %s", group)
|
||||
delete(sg.m, group)
|
||||
r := (len(sg.order) - fs.Config.MaxStatsGroups) + 1
|
||||
sg.order = sg.order[r:]
|
||||
}
|
||||
|
||||
// Exclude global stats from
|
||||
// Exclude global stats from listing
|
||||
if group != globalStats {
|
||||
sg.order = append(sg.order, group)
|
||||
}
|
||||
@@ -299,31 +340,36 @@ func (sg *statsGroups) names() []string {
|
||||
return sg.order
|
||||
}
|
||||
|
||||
// get gets the stats for group, or nil if not found
|
||||
// sum returns aggregate stats that contains summation of all groups.
|
||||
func (sg *statsGroups) sum() *StatsInfo {
|
||||
sg.mu.Lock()
|
||||
defer sg.mu.Unlock()
|
||||
|
||||
sum := NewStats()
|
||||
for _, stats := range sg.m {
|
||||
sum.bytes += stats.bytes
|
||||
sum.errors += stats.errors
|
||||
sum.fatalError = sum.fatalError || stats.fatalError
|
||||
sum.retryError = sum.retryError || stats.retryError
|
||||
sum.checks += stats.checks
|
||||
sum.transfers += stats.transfers
|
||||
sum.deletes += stats.deletes
|
||||
sum.checking.merge(stats.checking)
|
||||
sum.transferring.merge(stats.transferring)
|
||||
sum.inProgress.merge(stats.inProgress)
|
||||
if sum.lastError == nil && stats.lastError != nil {
|
||||
sum.lastError = stats.lastError
|
||||
stats.mu.RLock()
|
||||
{
|
||||
sum.bytes += stats.bytes
|
||||
sum.errors += stats.errors
|
||||
sum.fatalError = sum.fatalError || stats.fatalError
|
||||
sum.retryError = sum.retryError || stats.retryError
|
||||
sum.checks += stats.checks
|
||||
sum.transfers += stats.transfers
|
||||
sum.deletes += stats.deletes
|
||||
sum.checking.merge(stats.checking)
|
||||
sum.transferring.merge(stats.transferring)
|
||||
sum.inProgress.merge(stats.inProgress)
|
||||
if sum.lastError == nil && stats.lastError != nil {
|
||||
sum.lastError = stats.lastError
|
||||
}
|
||||
sum.startedTransfers = append(sum.startedTransfers, stats.startedTransfers...)
|
||||
}
|
||||
sum.startedTransfers = append(sum.startedTransfers, stats.startedTransfers...)
|
||||
stats.mu.RUnlock()
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
func (sg *statsGroups) clear() {
|
||||
func (sg *statsGroups) reset() {
|
||||
sg.mu.Lock()
|
||||
defer sg.mu.Unlock()
|
||||
|
||||
@@ -335,3 +381,25 @@ func (sg *statsGroups) clear() {
|
||||
sg.m = make(map[string]*StatsInfo)
|
||||
sg.order = nil
|
||||
}
|
||||
|
||||
// delete removes all references to the group.
|
||||
func (sg *statsGroups) delete(group string) {
|
||||
sg.mu.Lock()
|
||||
defer sg.mu.Unlock()
|
||||
stats := sg.m[group]
|
||||
if stats == nil {
|
||||
return
|
||||
}
|
||||
stats.ResetErrors()
|
||||
stats.ResetCounters()
|
||||
delete(sg.m, group)
|
||||
|
||||
// Remove group reference from the ordering slice.
|
||||
tmp := sg.order[:0]
|
||||
for _, g := range sg.order {
|
||||
if g != group {
|
||||
tmp = append(tmp, g)
|
||||
}
|
||||
}
|
||||
sg.order = tmp
|
||||
}
|
||||
|
||||
104
fs/accounting/stats_groups_test.go
Normal file
104
fs/accounting/stats_groups_test.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package accounting
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestStatsGroupOperations(t *testing.T) {
|
||||
|
||||
t.Run("empty group returns nil", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
sg := newStatsGroups()
|
||||
sg.get("invalid-group")
|
||||
})
|
||||
|
||||
t.Run("set assigns stats to group", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
stats := NewStats()
|
||||
sg := newStatsGroups()
|
||||
sg.set("test", stats)
|
||||
sg.set("test1", stats)
|
||||
if len(sg.m) != len(sg.names()) || len(sg.m) != 2 {
|
||||
t.Fatalf("Expected two stats got %d, %d", len(sg.m), len(sg.order))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("get returns correct group", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
stats := NewStats()
|
||||
sg := newStatsGroups()
|
||||
sg.set("test", stats)
|
||||
sg.set("test1", stats)
|
||||
got := sg.get("test")
|
||||
if got != stats {
|
||||
t.Fatal("get returns incorrect stats")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("sum returns correct values", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
stats1 := NewStats()
|
||||
stats1.bytes = 5
|
||||
stats1.errors = 5
|
||||
stats2 := NewStats()
|
||||
sg := newStatsGroups()
|
||||
sg.set("test1", stats1)
|
||||
sg.set("test2", stats2)
|
||||
sum := sg.sum()
|
||||
if sum.bytes != stats1.bytes+stats2.bytes {
|
||||
t.Fatalf("sum() => bytes %d, expected %d", sum.bytes, stats1.bytes+stats2.bytes)
|
||||
}
|
||||
if sum.errors != stats1.errors+stats2.errors {
|
||||
t.Fatalf("sum() => errors %d, expected %d", sum.errors, stats1.errors+stats2.errors)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("delete removes stats", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
stats := NewStats()
|
||||
sg := newStatsGroups()
|
||||
sg.set("test", stats)
|
||||
sg.set("test1", stats)
|
||||
sg.delete("test1")
|
||||
if sg.get("test1") != nil {
|
||||
t.Fatal("stats not deleted")
|
||||
}
|
||||
if len(sg.m) != len(sg.names()) || len(sg.m) != 1 {
|
||||
t.Fatalf("Expected two stats got %d, %d", len(sg.m), len(sg.order))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("memory is reclaimed", func(t *testing.T) {
|
||||
var (
|
||||
count = 1000
|
||||
start, end runtime.MemStats
|
||||
sg = newStatsGroups()
|
||||
)
|
||||
|
||||
runtime.GC()
|
||||
runtime.ReadMemStats(&start)
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
sg.set(fmt.Sprintf("test-%d", i), NewStats())
|
||||
}
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
sg.delete(fmt.Sprintf("test-%d", i))
|
||||
}
|
||||
|
||||
runtime.GC()
|
||||
runtime.ReadMemStats(&end)
|
||||
|
||||
t.Log(fmt.Sprintf("%+v\n%+v", start, end))
|
||||
diff := percentDiff(start.HeapObjects, end.HeapObjects)
|
||||
if diff > 1 || diff < 0 {
|
||||
t.Errorf("HeapObjects = %d, expected %d", end.HeapObjects, start.HeapObjects)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func percentDiff(start, end uint64) uint64 {
|
||||
return (start - end) * 100 / start
|
||||
}
|
||||
@@ -78,7 +78,7 @@ func TestStatsError(t *testing.T) {
|
||||
t0 := time.Now()
|
||||
t1 := t0.Add(time.Second)
|
||||
|
||||
s.Error(nil)
|
||||
_ = s.Error(nil)
|
||||
assert.Equal(t, int64(0), s.GetErrors())
|
||||
assert.False(t, s.HadFatalError())
|
||||
assert.False(t, s.HadRetryError())
|
||||
@@ -86,7 +86,7 @@ func TestStatsError(t *testing.T) {
|
||||
assert.Equal(t, nil, s.GetLastError())
|
||||
assert.False(t, s.Errored())
|
||||
|
||||
s.Error(io.EOF)
|
||||
_ = s.Error(io.EOF)
|
||||
assert.Equal(t, int64(1), s.GetErrors())
|
||||
assert.False(t, s.HadFatalError())
|
||||
assert.True(t, s.HadRetryError())
|
||||
@@ -95,7 +95,7 @@ func TestStatsError(t *testing.T) {
|
||||
assert.True(t, s.Errored())
|
||||
|
||||
e := fserrors.ErrorRetryAfter(t0)
|
||||
s.Error(e)
|
||||
_ = s.Error(e)
|
||||
assert.Equal(t, int64(2), s.GetErrors())
|
||||
assert.False(t, s.HadFatalError())
|
||||
assert.True(t, s.HadRetryError())
|
||||
@@ -103,14 +103,14 @@ func TestStatsError(t *testing.T) {
|
||||
assert.Equal(t, e, s.GetLastError())
|
||||
|
||||
err := errors.Wrap(fserrors.ErrorRetryAfter(t1), "potato")
|
||||
s.Error(err)
|
||||
err = s.Error(err)
|
||||
assert.Equal(t, int64(3), s.GetErrors())
|
||||
assert.False(t, s.HadFatalError())
|
||||
assert.True(t, s.HadRetryError())
|
||||
assert.Equal(t, t1, s.RetryAfter())
|
||||
assert.Equal(t, t1, fserrors.RetryAfterErrorTime(err))
|
||||
|
||||
s.Error(fserrors.FatalError(io.EOF))
|
||||
_ = s.Error(fserrors.FatalError(io.EOF))
|
||||
assert.Equal(t, int64(4), s.GetErrors())
|
||||
assert.True(t, s.HadFatalError())
|
||||
assert.True(t, s.HadRetryError())
|
||||
@@ -124,7 +124,7 @@ func TestStatsError(t *testing.T) {
|
||||
assert.Equal(t, nil, s.GetLastError())
|
||||
assert.False(t, s.Errored())
|
||||
|
||||
s.Error(fserrors.NoRetryError(io.EOF))
|
||||
_ = s.Error(fserrors.NoRetryError(io.EOF))
|
||||
assert.Equal(t, int64(1), s.GetErrors())
|
||||
assert.False(t, s.HadFatalError())
|
||||
assert.False(t, s.HadRetryError())
|
||||
@@ -382,28 +382,52 @@ func TestTimeRangeDuration(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPruneTransfers(t *testing.T) {
|
||||
max := maxCompletedTransfers + fs.Config.Transfers
|
||||
for _, test := range []struct {
|
||||
Name string
|
||||
Transfers int
|
||||
Limit int
|
||||
ExpectedStartedTransfers int
|
||||
}{
|
||||
{
|
||||
Name: "Limited number of StartedTransfers",
|
||||
Limit: 100,
|
||||
Transfers: 200,
|
||||
ExpectedStartedTransfers: 100 + fs.Config.Transfers,
|
||||
},
|
||||
{
|
||||
Name: "Unlimited number of StartedTransfers",
|
||||
Limit: -1,
|
||||
Transfers: 200,
|
||||
ExpectedStartedTransfers: 200,
|
||||
},
|
||||
} {
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
prevLimit := MaxCompletedTransfers
|
||||
MaxCompletedTransfers = test.Limit
|
||||
defer func() { MaxCompletedTransfers = prevLimit }()
|
||||
|
||||
s := NewStats()
|
||||
for i := int64(1); i <= int64(test.Transfers); i++ {
|
||||
s.AddTransfer(&Transfer{
|
||||
startedAt: time.Unix(i, 0),
|
||||
completedAt: time.Unix(i+1, 0),
|
||||
})
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
assert.Equal(t, time.Duration(test.Transfers)*time.Second, s.totalDuration())
|
||||
assert.Equal(t, test.Transfers, len(s.startedTransfers))
|
||||
s.mu.Unlock()
|
||||
|
||||
for i := 0; i < test.Transfers; i++ {
|
||||
s.PruneTransfers()
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
assert.Equal(t, time.Duration(test.Transfers)*time.Second, s.totalDuration())
|
||||
assert.Equal(t, test.ExpectedStartedTransfers, len(s.startedTransfers))
|
||||
s.mu.Unlock()
|
||||
|
||||
s := NewStats()
|
||||
for i := int64(1); i <= int64(max+100); i++ {
|
||||
s.AddTransfer(&Transfer{
|
||||
startedAt: time.Unix(i, 0),
|
||||
completedAt: time.Unix(i+1, 0),
|
||||
})
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
assert.Equal(t, time.Duration(max+100)*time.Second, s.totalDuration())
|
||||
assert.Equal(t, max+100, len(s.startedTransfers))
|
||||
s.mu.Unlock()
|
||||
|
||||
for i := 0; i < 200; i++ {
|
||||
s.PruneTransfers()
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
assert.Equal(t, time.Duration(max+100)*time.Second, s.totalDuration())
|
||||
assert.Equal(t, max, len(s.startedTransfers))
|
||||
s.mu.Unlock()
|
||||
|
||||
}
|
||||
|
||||
@@ -63,12 +63,21 @@ func (ss *stringSet) count() int {
|
||||
return len(ss.items)
|
||||
}
|
||||
|
||||
// String returns string representation of set items.
|
||||
func (ss *stringSet) String(progress *inProgress) string {
|
||||
// String returns string representation of set items excluding any in
|
||||
// exclude (if set).
|
||||
func (ss *stringSet) String(progress *inProgress, exclude *stringSet) string {
|
||||
ss.mu.RLock()
|
||||
defer ss.mu.RUnlock()
|
||||
strngs := make([]string, 0, len(ss.items))
|
||||
for name := range ss.items {
|
||||
if exclude != nil {
|
||||
exclude.mu.RLock()
|
||||
_, found := exclude.items[name]
|
||||
exclude.mu.RUnlock()
|
||||
if found {
|
||||
continue
|
||||
}
|
||||
}
|
||||
var out string
|
||||
if acc := progress.get(name); acc != nil {
|
||||
out = acc.String()
|
||||
|
||||
@@ -18,6 +18,7 @@ type TransferSnapshot struct {
|
||||
StartedAt time.Time `json:"started_at"`
|
||||
CompletedAt time.Time `json:"completed_at,omitempty"`
|
||||
Error error `json:"-"`
|
||||
Group string `json:"group"`
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler interface.
|
||||
@@ -26,6 +27,7 @@ func (as TransferSnapshot) MarshalJSON() ([]byte, error) {
|
||||
if as.Error != nil {
|
||||
err = as.Error.Error()
|
||||
}
|
||||
|
||||
type Alias TransferSnapshot
|
||||
return json.Marshal(&struct {
|
||||
Error string `json:"error"`
|
||||
@@ -84,7 +86,7 @@ func newTransferRemoteSize(stats *StatsInfo, remote string, size int64, checking
|
||||
// Must be called after transfer is finished to run proper cleanups.
|
||||
func (tr *Transfer) Done(err error) {
|
||||
if err != nil {
|
||||
tr.stats.Error(err)
|
||||
err = tr.stats.Error(err)
|
||||
|
||||
tr.mu.Lock()
|
||||
tr.err = err
|
||||
@@ -176,5 +178,6 @@ func (tr *Transfer) Snapshot() TransferSnapshot {
|
||||
StartedAt: tr.startedAt,
|
||||
CompletedAt: tr.completedAt,
|
||||
Error: tr.err,
|
||||
Group: tr.stats.group,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -174,6 +174,9 @@ func (a *AsyncReader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
n = 0
|
||||
for {
|
||||
err = a.fill()
|
||||
if err == io.EOF {
|
||||
return n, nil
|
||||
}
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
@@ -183,6 +186,10 @@ func (a *AsyncReader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if a.cur.err == io.EOF {
|
||||
a.err = a.cur.err
|
||||
return n, err
|
||||
}
|
||||
if a.cur.err != nil {
|
||||
a.err = a.cur.err
|
||||
return n, a.cur.err
|
||||
|
||||
@@ -60,12 +60,12 @@ func TestAsyncWriteTo(t *testing.T) {
|
||||
|
||||
var dst = &bytes.Buffer{}
|
||||
n, err := io.Copy(dst, ar)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(10), n)
|
||||
|
||||
// Should still return EOF
|
||||
// Should still not return any errors
|
||||
n, err = io.Copy(dst, ar)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), n)
|
||||
|
||||
err = ar.Close()
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// io related errors returned by ChunkedReader
|
||||
@@ -215,12 +216,12 @@ func (cr *ChunkedReader) openRange() error {
|
||||
var err error
|
||||
if length <= 0 {
|
||||
if offset == 0 {
|
||||
rc, err = cr.o.Open(cr.ctx)
|
||||
rc, err = cr.o.Open(cr.ctx, &fs.HashesOption{Hashes: hash.Set(hash.None)})
|
||||
} else {
|
||||
rc, err = cr.o.Open(cr.ctx, &fs.RangeOption{Start: offset, End: -1})
|
||||
rc, err = cr.o.Open(cr.ctx, &fs.HashesOption{Hashes: hash.Set(hash.None)}, &fs.RangeOption{Start: offset, End: -1})
|
||||
}
|
||||
} else {
|
||||
rc, err = cr.o.Open(cr.ctx, &fs.RangeOption{Start: offset, End: offset + length - 1})
|
||||
rc, err = cr.o.Open(cr.ctx, &fs.HashesOption{Hashes: hash.Set(hash.None)}, &fs.RangeOption{Start: offset, End: offset + length - 1})
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -32,7 +32,7 @@ var (
|
||||
//
|
||||
// This is a function pointer to decouple the config
|
||||
// implementation from the fs
|
||||
CountError = func(err error) {}
|
||||
CountError = func(err error) error { return nil }
|
||||
|
||||
// ConfigProvider is the config key used for provider options
|
||||
ConfigProvider = "provider"
|
||||
@@ -67,6 +67,7 @@ type ConfigInfo struct {
|
||||
IgnoreChecksum bool
|
||||
IgnoreCaseSync bool
|
||||
NoTraverse bool
|
||||
NoCheckDest bool
|
||||
NoUpdateModTime bool
|
||||
DataRateUnit string
|
||||
CompareDest string
|
||||
|
||||
@@ -62,6 +62,9 @@ const (
|
||||
|
||||
// ConfigAuthorize indicates that we just want "rclone authorize"
|
||||
ConfigAuthorize = "config_authorize"
|
||||
|
||||
// ConfigAuthNoBrowser indicates that we do not want to open browser
|
||||
ConfigAuthNoBrowser = "config_auth_no_browser"
|
||||
)
|
||||
|
||||
// Global
|
||||
@@ -572,7 +575,7 @@ func SetValueAndSave(name, key, value string) (err error) {
|
||||
_, err = reloadedConfigFile.GetSection(name)
|
||||
if err != nil {
|
||||
// Section doesn't exist yet so ignore reload
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
// Update the config file with the reloaded version
|
||||
configFile = reloadedConfigFile
|
||||
@@ -635,11 +638,16 @@ func ReadNonEmptyLine(prompt string) string {
|
||||
return result
|
||||
}
|
||||
|
||||
// Command - choose one
|
||||
func Command(commands []string) byte {
|
||||
// CommandDefault - choose one. If return is pressed then it will
|
||||
// chose the defaultIndex if it is >= 0
|
||||
func CommandDefault(commands []string, defaultIndex int) byte {
|
||||
opts := []string{}
|
||||
for _, text := range commands {
|
||||
fmt.Printf("%c) %s\n", text[0], text[1:])
|
||||
for i, text := range commands {
|
||||
def := ""
|
||||
if i == defaultIndex {
|
||||
def = " (default)"
|
||||
}
|
||||
fmt.Printf("%c) %s%s\n", text[0], text[1:], def)
|
||||
opts = append(opts, text[:1])
|
||||
}
|
||||
optString := strings.Join(opts, "")
|
||||
@@ -647,6 +655,9 @@ func Command(commands []string) byte {
|
||||
for {
|
||||
fmt.Printf("%s> ", optHelp)
|
||||
result := strings.ToLower(ReadLine())
|
||||
if len(result) == 0 && defaultIndex >= 0 {
|
||||
return optString[defaultIndex]
|
||||
}
|
||||
if len(result) != 1 {
|
||||
continue
|
||||
}
|
||||
@@ -657,11 +668,20 @@ func Command(commands []string) byte {
|
||||
}
|
||||
}
|
||||
|
||||
// Command - choose one
|
||||
func Command(commands []string) byte {
|
||||
return CommandDefault(commands, -1)
|
||||
}
|
||||
|
||||
// Confirm asks the user for Yes or No and returns true or false
|
||||
//
|
||||
// If AutoConfirm is set, it will return true
|
||||
func Confirm() bool {
|
||||
return Command([]string{"yYes", "nNo"}) == 'y'
|
||||
// If the user presses enter then the Default will be used
|
||||
func Confirm(Default bool) bool {
|
||||
defaultIndex := 0
|
||||
if !Default {
|
||||
defaultIndex = 1
|
||||
}
|
||||
return CommandDefault([]string{"yYes", "nNo"}, defaultIndex) == 'y'
|
||||
}
|
||||
|
||||
// ConfirmWithConfig asks the user for Yes or No and returns true or
|
||||
@@ -688,7 +708,7 @@ func ConfirmWithConfig(m configmap.Getter, configName string, Default bool) bool
|
||||
fmt.Printf("Auto confirm is set: answering %s, override by setting config parameter %s=%v\n", answer, configName, !Default)
|
||||
return Default
|
||||
}
|
||||
return Confirm()
|
||||
return Confirm(Default)
|
||||
}
|
||||
|
||||
// Choose one of the defaults or type a new string if newOk is set
|
||||
@@ -797,7 +817,7 @@ func ShowRemote(name string) {
|
||||
// OkRemote prints the contents of the remote and ask if it is OK
|
||||
func OkRemote(name string) bool {
|
||||
ShowRemote(name)
|
||||
switch i := Command([]string{"yYes this is OK", "eEdit this remote", "dDelete this remote"}); i {
|
||||
switch i := CommandDefault([]string{"yYes this is OK", "eEdit this remote", "dDelete this remote"}, 0); i {
|
||||
case 'y':
|
||||
return true
|
||||
case 'e':
|
||||
@@ -867,12 +887,14 @@ func ChooseOption(o *fs.Option, name string) string {
|
||||
fmt.Println(o.Help)
|
||||
if o.IsPassword {
|
||||
actions := []string{"yYes type in my own password", "gGenerate random password"}
|
||||
defaultAction := -1
|
||||
if !o.Required {
|
||||
defaultAction = len(actions)
|
||||
actions = append(actions, "nNo leave this optional password blank")
|
||||
}
|
||||
var password string
|
||||
var err error
|
||||
switch i := Command(actions); i {
|
||||
switch i := CommandDefault(actions, defaultAction); i {
|
||||
case 'y':
|
||||
password = ChangePassword("the")
|
||||
case 'g':
|
||||
@@ -887,7 +909,7 @@ func ChooseOption(o *fs.Option, name string) string {
|
||||
fmt.Printf("Use this password? Please note that an obscured version of this \npassword (and not the " +
|
||||
"password itself) will be stored under your \nconfiguration file, so keep this generated password " +
|
||||
"in a safe place.\n")
|
||||
if Confirm() {
|
||||
if Confirm(true) {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -1062,12 +1084,17 @@ func fsOption() *fs.Option {
|
||||
return o
|
||||
}
|
||||
|
||||
// NewRemoteName asks the user for a name for a remote
|
||||
// NewRemoteName asks the user for a name for a new remote
|
||||
func NewRemoteName() (name string) {
|
||||
for {
|
||||
fmt.Printf("name> ")
|
||||
name = ReadLine()
|
||||
err := fspath.CheckConfigName(name)
|
||||
_, err := getConfigData().GetSection(name)
|
||||
if err == nil {
|
||||
fmt.Printf("Remote %q already exists.\n", name)
|
||||
continue
|
||||
}
|
||||
err = fspath.CheckConfigName(name)
|
||||
switch {
|
||||
case name == "":
|
||||
fmt.Printf("Can't use empty name.\n")
|
||||
@@ -1092,7 +1119,7 @@ func editOptions(ri *fs.RegInfo, name string, isNew bool) {
|
||||
break
|
||||
}
|
||||
fmt.Printf("Edit advanced config? (y/n)\n")
|
||||
if !Confirm() {
|
||||
if !Confirm(false) {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -1107,7 +1134,7 @@ func editOptions(ri *fs.RegInfo, name string, isNew bool) {
|
||||
if !isNew {
|
||||
fmt.Printf("Value %q = %q\n", option.Name, FileGet(name, option.Name))
|
||||
fmt.Printf("Edit? (y/n)>\n")
|
||||
if !Confirm() {
|
||||
if !Confirm(false) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -1299,7 +1326,7 @@ func SetPassword() {
|
||||
//
|
||||
// rclone authorize "fs name"
|
||||
// rclone authorize "fs name" "client id" "client secret"
|
||||
func Authorize(args []string) {
|
||||
func Authorize(args []string, noAutoBrowser bool) {
|
||||
defer suppressConfirm()()
|
||||
switch len(args) {
|
||||
case 1, 3:
|
||||
@@ -1319,10 +1346,15 @@ func Authorize(args []string) {
|
||||
|
||||
// Indicate that we are running rclone authorize
|
||||
getConfigData().SetValue(name, ConfigAuthorize, "true")
|
||||
if noAutoBrowser {
|
||||
getConfigData().SetValue(name, ConfigAuthNoBrowser, "true")
|
||||
}
|
||||
|
||||
if len(args) == 3 {
|
||||
getConfigData().SetValue(name, ConfigClientID, args[1])
|
||||
getConfigData().SetValue(name, ConfigClientSecret, args[2])
|
||||
}
|
||||
|
||||
m := fs.ConfigMap(f, name)
|
||||
f.Config(name, m)
|
||||
}
|
||||
|
||||
@@ -115,14 +115,6 @@ func TestCRUD(t *testing.T) {
|
||||
assert.Equal(t, "true", FileGet("asdf", "bool"))
|
||||
assert.Equal(t, "secret", obscure.MustReveal(FileGet("asdf", "pass")))
|
||||
|
||||
// no-op rename, asdf → asdf
|
||||
RenameRemote("asdf")
|
||||
|
||||
assert.Equal(t, []string{"asdf"}, configFile.GetSectionList())
|
||||
assert.Equal(t, "config_test_remote", FileGet("asdf", "type"))
|
||||
assert.Equal(t, "true", FileGet("asdf", "bool"))
|
||||
assert.Equal(t, "secret", obscure.MustReveal(FileGet("asdf", "pass")))
|
||||
|
||||
// delete remote
|
||||
DeleteRemote("asdf")
|
||||
assert.Equal(t, []string{}, configFile.GetSectionList())
|
||||
@@ -163,6 +155,28 @@ func TestChooseOption(t *testing.T) {
|
||||
assert.Equal(t, "", FileGet("test", "pass"))
|
||||
}
|
||||
|
||||
func TestNewRemoteName(t *testing.T) {
|
||||
defer testConfigFile(t, "crud.conf")()
|
||||
|
||||
// script for creating remote
|
||||
ReadLine = makeReadLine([]string{
|
||||
"config_test_remote", // type
|
||||
"true", // bool value
|
||||
"n", // not required
|
||||
"y", // looks good, save
|
||||
})
|
||||
NewRemote("test")
|
||||
|
||||
ReadLine = makeReadLine([]string{
|
||||
"test", // already exists
|
||||
"", // empty string not allowed
|
||||
"bad@characters", // bad characters
|
||||
"newname", // OK
|
||||
})
|
||||
|
||||
assert.Equal(t, "newname", NewRemoteName())
|
||||
}
|
||||
|
||||
func TestCreateUpatePasswordRemote(t *testing.T) {
|
||||
defer testConfigFile(t, "update.conf")()
|
||||
|
||||
|
||||
@@ -68,9 +68,10 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.BoolVarP(flagSet, &fs.Config.IgnoreChecksum, "ignore-checksum", "", fs.Config.IgnoreChecksum, "Skip post copy check of checksums.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.IgnoreCaseSync, "ignore-case-sync", "", fs.Config.IgnoreCaseSync, "Ignore case when synchronizing")
|
||||
flags.BoolVarP(flagSet, &fs.Config.NoTraverse, "no-traverse", "", fs.Config.NoTraverse, "Don't traverse destination file system on copy.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.NoCheckDest, "no-check-dest", "", fs.Config.NoCheckDest, "Don't check the destination, copy regardless.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.NoUpdateModTime, "no-update-modtime", "", fs.Config.NoUpdateModTime, "Don't update destination mod-time if files identical.")
|
||||
flags.StringVarP(flagSet, &fs.Config.CompareDest, "compare-dest", "", fs.Config.CompareDest, "use DIR to server side copy flies from.")
|
||||
flags.StringVarP(flagSet, &fs.Config.CopyDest, "copy-dest", "", fs.Config.CopyDest, "Compare dest to DIR also.")
|
||||
flags.StringVarP(flagSet, &fs.Config.CompareDest, "compare-dest", "", fs.Config.CompareDest, "Include additional server-side path during comparison.")
|
||||
flags.StringVarP(flagSet, &fs.Config.CopyDest, "copy-dest", "", fs.Config.CopyDest, "Implies --compare-dest but also copies files from path into destination.")
|
||||
flags.StringVarP(flagSet, &fs.Config.BackupDir, "backup-dir", "", fs.Config.BackupDir, "Make backups into hierarchy based in DIR.")
|
||||
flags.StringVarP(flagSet, &fs.Config.Suffix, "suffix", "", fs.Config.Suffix, "Suffix to add to changed files.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.SuffixKeepExtension, "suffix-keep-extension", "", fs.Config.SuffixKeepExtension, "Preserve the extension when using --suffix.")
|
||||
|
||||
@@ -37,6 +37,7 @@ func init() {
|
||||
AuthRequired: true,
|
||||
Help: `
|
||||
Parameters:
|
||||
|
||||
- name - name of remote to get
|
||||
|
||||
See the [config dump command](/commands/rclone_config_dump/) command for more information on the above.
|
||||
|
||||
@@ -178,6 +178,53 @@ func IsNoRetryError(err error) (isNoRetry bool) {
|
||||
return
|
||||
}
|
||||
|
||||
// NoLowLevelRetrier is an optional interface for error as to whether
|
||||
// the operation should not be retried at a low level.
|
||||
//
|
||||
// NoLowLevelRetry errors won't be retried by low level retry loops.
|
||||
type NoLowLevelRetrier interface {
|
||||
error
|
||||
NoLowLevelRetry() bool
|
||||
}
|
||||
|
||||
// wrappedNoLowLevelRetryError is an error wrapped so it will satisfy the
|
||||
// NoLowLevelRetrier interface and return true
|
||||
type wrappedNoLowLevelRetryError struct {
|
||||
error
|
||||
}
|
||||
|
||||
// NoLowLevelRetry interface
|
||||
func (err wrappedNoLowLevelRetryError) NoLowLevelRetry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ NoLowLevelRetrier = wrappedNoLowLevelRetryError{error(nil)}
|
||||
|
||||
// NoLowLevelRetryError makes an error which indicates the sync
|
||||
// shouldn't be low level retried.
|
||||
func NoLowLevelRetryError(err error) error {
|
||||
return wrappedNoLowLevelRetryError{err}
|
||||
}
|
||||
|
||||
// Cause returns the underlying error
|
||||
func (err wrappedNoLowLevelRetryError) Cause() error {
|
||||
return err.error
|
||||
}
|
||||
|
||||
// IsNoLowLevelRetryError returns true if err conforms to the NoLowLevelRetry
|
||||
// interface and calling the NoLowLevelRetry method returns true.
|
||||
func IsNoLowLevelRetryError(err error) (isNoLowLevelRetry bool) {
|
||||
errors.Walk(err, func(err error) bool {
|
||||
if r, ok := err.(NoLowLevelRetrier); ok {
|
||||
isNoLowLevelRetry = r.NoLowLevelRetry()
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// RetryAfter is an optional interface for error as to whether the
|
||||
// operation should be retried after a given delay
|
||||
//
|
||||
@@ -230,6 +277,64 @@ func IsRetryAfterError(err error) bool {
|
||||
return !RetryAfterErrorTime(err).IsZero()
|
||||
}
|
||||
|
||||
// CountableError is an optional interface for error. It stores a boolean
|
||||
// which signifies if the error has already been counted or not
|
||||
type CountableError interface {
|
||||
error
|
||||
Count()
|
||||
IsCounted() bool
|
||||
}
|
||||
|
||||
// wrappedFatalError is an error wrapped so it will satisfy the
|
||||
// Retrier interface and return true
|
||||
type wrappedCountableError struct {
|
||||
error
|
||||
isCounted bool
|
||||
}
|
||||
|
||||
// CountableError interface
|
||||
func (err *wrappedCountableError) Count() {
|
||||
err.isCounted = true
|
||||
}
|
||||
|
||||
// CountableError interface
|
||||
func (err *wrappedCountableError) IsCounted() bool {
|
||||
return err.isCounted
|
||||
}
|
||||
|
||||
func (err *wrappedCountableError) Cause() error {
|
||||
return err.error
|
||||
}
|
||||
|
||||
// IsCounted returns true if err conforms to the CountableError interface
|
||||
// and has already been counted
|
||||
func IsCounted(err error) bool {
|
||||
if r, ok := err.(CountableError); ok {
|
||||
return r.IsCounted()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Count sets the isCounted variable on the error if it conforms to the
|
||||
// CountableError interface
|
||||
func Count(err error) {
|
||||
if r, ok := err.(CountableError); ok {
|
||||
r.Count()
|
||||
}
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ CountableError = &wrappedCountableError{error: error(nil)}
|
||||
|
||||
// FsError makes an error which can keep a record that it is already counted
|
||||
// or not
|
||||
func FsError(err error) error {
|
||||
if err == nil {
|
||||
err = errors.New("countable error")
|
||||
}
|
||||
return &wrappedCountableError{error: err}
|
||||
}
|
||||
|
||||
// Cause is a souped up errors.Cause which can unwrap some standard
|
||||
// library errors too. It returns true if any of the intermediate
|
||||
// errors had a Timeout() or Temporary() method which returned true.
|
||||
@@ -287,6 +392,11 @@ func ShouldRetry(err error) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// If error has been marked to NoLowLevelRetry then don't retry
|
||||
if IsNoLowLevelRetryError(err) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Find root cause if available
|
||||
retriable, err := Cause(err)
|
||||
if retriable {
|
||||
|
||||
@@ -318,8 +318,8 @@ func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error
|
||||
// Get transactions per second token first if limiting
|
||||
if tpsBucket != nil {
|
||||
tbErr := tpsBucket.Wait(req.Context())
|
||||
if tbErr != nil {
|
||||
fs.Errorf(nil, "HTTP token bucket error: %v", err)
|
||||
if tbErr != nil && tbErr != context.Canceled {
|
||||
fs.Errorf(nil, "HTTP token bucket error: %v", tbErr)
|
||||
}
|
||||
}
|
||||
// Force user agent
|
||||
|
||||
@@ -66,7 +66,7 @@ func init() {
|
||||
MD5 = RegisterHash("MD5", 32, md5.New)
|
||||
SHA1 = RegisterHash("SHA-1", 40, sha1.New)
|
||||
Whirlpool = RegisterHash("Whirlpool", 128, whirlpool.New)
|
||||
CRC32 = RegisterHash("CRC32", 8, func() hash.Hash { return crc32.NewIEEE() })
|
||||
CRC32 = RegisterHash("CRC-32", 8, func() hash.Hash { return crc32.NewIEEE() })
|
||||
}
|
||||
|
||||
// Supported returns a set of all the supported hashes by
|
||||
|
||||
@@ -10,16 +10,24 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
)
|
||||
|
||||
// Flags
|
||||
var (
|
||||
logFile = flags.StringP("log-file", "", "", "Log everything to this file")
|
||||
logFormat = flags.StringP("log-format", "", "date,time", "Comma separated list of log format options")
|
||||
useSyslog = flags.BoolP("syslog", "", false, "Use Syslog for logging")
|
||||
syslogFacility = flags.StringP("syslog-facility", "", "DAEMON", "Facility for syslog, eg KERN,USER,...")
|
||||
)
|
||||
// Options contains options for the remote control server
|
||||
type Options struct {
|
||||
File string // Log everything to this file
|
||||
Format string // Comma separated list of log format options
|
||||
UseSyslog bool // Use Syslog for logging
|
||||
SyslogFacility string // Facility for syslog, eg KERN,USER,...
|
||||
}
|
||||
|
||||
// DefaultOpt is the default values used for Opt
|
||||
var DefaultOpt = Options{
|
||||
Format: "date,time",
|
||||
SyslogFacility: "DAEMON",
|
||||
}
|
||||
|
||||
// Opt is the options for the logger
|
||||
var Opt = DefaultOpt
|
||||
|
||||
// fnName returns the name of the calling +2 function
|
||||
func fnName() string {
|
||||
@@ -79,7 +87,7 @@ func Stack(o interface{}, info string) {
|
||||
|
||||
// InitLogging start the logging as per the command line flags
|
||||
func InitLogging() {
|
||||
flagsStr := "," + *logFormat + ","
|
||||
flagsStr := "," + Opt.Format + ","
|
||||
var flags int
|
||||
if strings.Contains(flagsStr, ",date,") {
|
||||
flags |= log.Ldate
|
||||
@@ -102,8 +110,8 @@ func InitLogging() {
|
||||
log.SetFlags(flags)
|
||||
|
||||
// Log file output
|
||||
if *logFile != "" {
|
||||
f, err := os.OpenFile(*logFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
|
||||
if Opt.File != "" {
|
||||
f, err := os.OpenFile(Opt.File, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open log file: %v", err)
|
||||
}
|
||||
@@ -116,8 +124,8 @@ func InitLogging() {
|
||||
}
|
||||
|
||||
// Syslog output
|
||||
if *useSyslog {
|
||||
if *logFile != "" {
|
||||
if Opt.UseSyslog {
|
||||
if Opt.File != "" {
|
||||
log.Fatalf("Can't use --syslog and --log-file together")
|
||||
}
|
||||
startSysLog()
|
||||
@@ -126,5 +134,5 @@ func InitLogging() {
|
||||
|
||||
// Redirected returns true if the log has been redirected from stdout
|
||||
func Redirected() bool {
|
||||
return *useSyslog || *logFile != ""
|
||||
return Opt.UseSyslog || Opt.File != ""
|
||||
}
|
||||
|
||||
19
fs/log/logflags/logflags.go
Normal file
19
fs/log/logflags/logflags.go
Normal file
@@ -0,0 +1,19 @@
|
||||
// Package logflags implements command line flags to set up the log
|
||||
package logflags
|
||||
|
||||
import (
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// AddFlags adds the log flags to the flagSet
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
rc.AddOption("log", &log.Opt)
|
||||
|
||||
flags.StringVarP(flagSet, &log.Opt.File, "log-file", "", log.Opt.File, "Log everything to this file")
|
||||
flags.StringVarP(flagSet, &log.Opt.Format, "log-format", "", log.Opt.Format, "Comma separated list of log format options")
|
||||
flags.BoolVarP(flagSet, &log.Opt.UseSyslog, "syslog", "", log.Opt.UseSyslog, "Use Syslog for logging")
|
||||
flags.StringVarP(flagSet, &log.Opt.SyslogFacility, "syslog-facility", "", log.Opt.SyslogFacility, "Facility for syslog, eg KERN,USER,...")
|
||||
}
|
||||
@@ -32,9 +32,9 @@ var (
|
||||
|
||||
// Starts syslog
|
||||
func startSysLog() bool {
|
||||
facility, ok := syslogFacilityMap[*syslogFacility]
|
||||
facility, ok := syslogFacilityMap[Opt.SyslogFacility]
|
||||
if !ok {
|
||||
log.Fatalf("Unknown syslog facility %q - man syslog for list", *syslogFacility)
|
||||
log.Fatalf("Unknown syslog facility %q - man syslog for list", Opt.SyslogFacility)
|
||||
}
|
||||
Me := path.Base(os.Args[0])
|
||||
w, err := syslog.New(syslog.LOG_NOTICE|facility, Me)
|
||||
|
||||
@@ -30,6 +30,7 @@ type March struct {
|
||||
SrcIncludeAll bool // don't include all files in the src
|
||||
DstIncludeAll bool // don't include all files in the destination
|
||||
Callback Marcher // object to call with results
|
||||
NoCheckDest bool // transfer all objects regardless without checking dst
|
||||
// internal state
|
||||
srcListDir listDirFn // function to call to list a directory in the src
|
||||
dstListDir listDirFn // function to call to list a directory in the dst
|
||||
@@ -188,6 +189,7 @@ func (m *March) Run() error {
|
||||
srcDepth: srcDepth - 1,
|
||||
dstRemote: m.Dir,
|
||||
dstDepth: dstDepth - 1,
|
||||
noDst: m.NoCheckDest,
|
||||
}
|
||||
go func() {
|
||||
// when the context is cancelled discard the remaining jobs
|
||||
@@ -393,20 +395,20 @@ func (m *March) processJob(job listDirJob) ([]listDirJob, error) {
|
||||
wg.Wait()
|
||||
if srcListErr != nil {
|
||||
fs.Errorf(job.srcRemote, "error reading source directory: %v", srcListErr)
|
||||
fs.CountError(srcListErr)
|
||||
srcListErr = fs.CountError(srcListErr)
|
||||
return nil, srcListErr
|
||||
}
|
||||
if dstListErr == fs.ErrorDirNotFound {
|
||||
// Copy the stuff anyway
|
||||
} else if dstListErr != nil {
|
||||
fs.Errorf(job.dstRemote, "error reading destination directory: %v", dstListErr)
|
||||
fs.CountError(dstListErr)
|
||||
dstListErr = fs.CountError(dstListErr)
|
||||
return nil, dstListErr
|
||||
}
|
||||
|
||||
// If NoTraverse is set, then try to find a matching object
|
||||
// for each item in the srcList
|
||||
if m.NoTraverse {
|
||||
if m.NoTraverse && !m.NoCheckDest {
|
||||
for _, src := range srcList {
|
||||
if srcObj, ok := src.(fs.Object); ok {
|
||||
leaf := path.Base(srcObj.Remote())
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// dedupeRename renames the objs slice to different names
|
||||
@@ -34,7 +33,7 @@ outer:
|
||||
_, err := f.NewObject(ctx, newName)
|
||||
for ; err != fs.ErrorObjectNotFound; suffix++ {
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(o, "Failed to check for existing object: %v", err)
|
||||
continue outer
|
||||
}
|
||||
@@ -48,7 +47,7 @@ outer:
|
||||
if !fs.Config.DryRun {
|
||||
newObj, err := doMove(ctx, o, newName)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(o, "Failed to rename: %v", err)
|
||||
continue
|
||||
}
|
||||
@@ -196,9 +195,6 @@ func (x *DeduplicateMode) Type() string {
|
||||
return "string"
|
||||
}
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ pflag.Value = (*DeduplicateMode)(nil)
|
||||
|
||||
// dedupeFindDuplicateDirs scans f for duplicate directories
|
||||
func dedupeFindDuplicateDirs(ctx context.Context, f fs.Fs) ([][]fs.Directory, error) {
|
||||
dirs := map[string][]fs.Directory{}
|
||||
@@ -211,12 +207,18 @@ func dedupeFindDuplicateDirs(ctx context.Context, f fs.Fs) ([][]fs.Directory, er
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "find duplicate dirs")
|
||||
}
|
||||
duplicateDirs := [][]fs.Directory{}
|
||||
for _, ds := range dirs {
|
||||
// make sure parents are before children
|
||||
duplicateNames := []string{}
|
||||
for name, ds := range dirs {
|
||||
if len(ds) > 1 {
|
||||
duplicateDirs = append(duplicateDirs, ds)
|
||||
duplicateNames = append(duplicateNames, name)
|
||||
}
|
||||
}
|
||||
sort.Strings(duplicateNames)
|
||||
duplicateDirs := [][]fs.Directory{}
|
||||
for _, name := range duplicateNames {
|
||||
duplicateDirs = append(duplicateDirs, dirs[name])
|
||||
}
|
||||
return duplicateDirs, nil
|
||||
}
|
||||
|
||||
@@ -235,7 +237,8 @@ func dedupeMergeDuplicateDirs(ctx context.Context, f fs.Fs, duplicateDirs [][]fs
|
||||
fs.Infof(dirs[0], "Merging contents of duplicate directories")
|
||||
err := mergeDirs(ctx, dirs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "merge duplicate dirs")
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(nil, "merge duplicate dirs: %v", err)
|
||||
}
|
||||
} else {
|
||||
fs.Infof(dirs[0], "NOT Merging contents of duplicate directories as --dry-run")
|
||||
@@ -251,23 +254,16 @@ func dedupeMergeDuplicateDirs(ctx context.Context, f fs.Fs, duplicateDirs [][]fs
|
||||
func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode) error {
|
||||
fs.Infof(f, "Looking for duplicates using %v mode.", mode)
|
||||
|
||||
// Find duplicate directories first and fix them - repeat
|
||||
// until all fixed
|
||||
for {
|
||||
duplicateDirs, err := dedupeFindDuplicateDirs(ctx, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(duplicateDirs) == 0 {
|
||||
break
|
||||
}
|
||||
// Find duplicate directories first and fix them
|
||||
duplicateDirs, err := dedupeFindDuplicateDirs(ctx, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(duplicateDirs) != 0 {
|
||||
err = dedupeMergeDuplicateDirs(ctx, f, duplicateDirs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fs.Config.DryRun {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// find a hash to use
|
||||
@@ -275,7 +271,7 @@ func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode) error {
|
||||
|
||||
// Now find duplicate files
|
||||
files := map[string][]fs.Object{}
|
||||
err := walk.ListR(ctx, f, "", true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
err = walk.ListR(ctx, f, "", true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
entries.ForObject(func(o fs.Object) {
|
||||
remote := o.Remote()
|
||||
files[remote] = append(files[remote], o)
|
||||
|
||||
@@ -10,10 +10,14 @@ import (
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Check flag satisfies the interface
|
||||
var _ pflag.Value = (*operations.DeduplicateMode)(nil)
|
||||
|
||||
func skipIfCantDedupe(t *testing.T, f fs.Fs) {
|
||||
if !f.Features().DuplicateFiles {
|
||||
t.Skip("Can't test deduplicate - no duplicate files possible")
|
||||
|
||||
@@ -63,7 +63,7 @@ func checkHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object, ht hash.
|
||||
g.Go(func() (err error) {
|
||||
srcHash, err = src.Hash(ctx, ht)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(src, "Failed to calculate src hash: %v", err)
|
||||
}
|
||||
return err
|
||||
@@ -71,7 +71,7 @@ func checkHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object, ht hash.
|
||||
g.Go(func() (err error) {
|
||||
dstHash, err = dst.Hash(ctx, ht)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(dst, "Failed to calculate dst hash: %v", err)
|
||||
}
|
||||
return err
|
||||
@@ -234,7 +234,7 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt)
|
||||
}
|
||||
return false
|
||||
} else if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(dst, "Failed to set modification time: %v", err)
|
||||
} else {
|
||||
fs.Infof(src, "Updated modification time in destination")
|
||||
@@ -402,13 +402,14 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
// Retry if err returned a retry error
|
||||
if fserrors.IsRetryError(err) || fserrors.ShouldRetry(err) {
|
||||
fs.Debugf(src, "Received error: %v - low level retry %d/%d", err, tries, maxTries)
|
||||
tr.Reset() // skip incomplete accounting - will be overwritten by retry
|
||||
continue
|
||||
}
|
||||
// otherwise finish
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(src, "Failed to copy: %v", err)
|
||||
return newDst, err
|
||||
}
|
||||
@@ -417,7 +418,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
if sizeDiffers(src, dst) {
|
||||
err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
|
||||
fs.Errorf(dst, "%v", err)
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
removeFailedCopy(ctx, dst)
|
||||
return newDst, err
|
||||
}
|
||||
@@ -429,7 +430,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
if !equal {
|
||||
err = errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum)
|
||||
fs.Errorf(dst, "%v", err)
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
removeFailedCopy(ctx, dst)
|
||||
return newDst, err
|
||||
}
|
||||
@@ -492,7 +493,7 @@ func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.
|
||||
case fs.ErrorCantMove:
|
||||
fs.Debugf(src, "Can't move, switching to copy")
|
||||
default:
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(src, "Couldn't move: %v", err)
|
||||
return newDst, err
|
||||
}
|
||||
@@ -558,8 +559,8 @@ func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs
|
||||
err = dst.Remove(ctx)
|
||||
}
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
fs.Errorf(dst, "Couldn't %s: %v", action, err)
|
||||
err = fs.CountError(err)
|
||||
} else if !fs.Config.DryRun {
|
||||
fs.Infof(dst, actioned)
|
||||
}
|
||||
@@ -685,7 +686,7 @@ func checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHas
|
||||
if !same {
|
||||
err = errors.Errorf("%v differ", ht)
|
||||
fs.Errorf(src, "%v", err)
|
||||
fs.CountError(err)
|
||||
_ = fs.CountError(err)
|
||||
return true, false
|
||||
}
|
||||
return false, false
|
||||
@@ -716,11 +717,14 @@ func (c *checkMarch) DstOnly(dst fs.DirEntry) (recurse bool) {
|
||||
}
|
||||
err := errors.Errorf("File not in %v", c.fsrc)
|
||||
fs.Errorf(dst, "%v", err)
|
||||
fs.CountError(err)
|
||||
_ = fs.CountError(err)
|
||||
atomic.AddInt32(&c.differences, 1)
|
||||
atomic.AddInt32(&c.srcFilesMissing, 1)
|
||||
case fs.Directory:
|
||||
// Do the same thing to the entire contents of the directory
|
||||
if c.oneway {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
default:
|
||||
panic("Bad object in DirEntries")
|
||||
@@ -734,7 +738,7 @@ func (c *checkMarch) SrcOnly(src fs.DirEntry) (recurse bool) {
|
||||
case fs.Object:
|
||||
err := errors.Errorf("File not in %v", c.fdst)
|
||||
fs.Errorf(src, "%v", err)
|
||||
fs.CountError(err)
|
||||
_ = fs.CountError(err)
|
||||
atomic.AddInt32(&c.differences, 1)
|
||||
atomic.AddInt32(&c.dstFilesMissing, 1)
|
||||
case fs.Directory:
|
||||
@@ -756,7 +760,6 @@ func (c *checkMarch) checkIdentical(ctx context.Context, dst, src fs.Object) (di
|
||||
if sizeDiffers(src, dst) {
|
||||
err = errors.Errorf("Sizes differ")
|
||||
fs.Errorf(src, "%v", err)
|
||||
fs.CountError(err)
|
||||
return true, false
|
||||
}
|
||||
if fs.Config.SizeOnly {
|
||||
@@ -776,15 +779,17 @@ func (c *checkMarch) Match(ctx context.Context, dst, src fs.DirEntry) (recurse b
|
||||
atomic.AddInt32(&c.differences, 1)
|
||||
} else {
|
||||
atomic.AddInt32(&c.matches, 1)
|
||||
fs.Debugf(dstX, "OK")
|
||||
}
|
||||
if noHash {
|
||||
atomic.AddInt32(&c.noHashes, 1)
|
||||
if noHash {
|
||||
atomic.AddInt32(&c.noHashes, 1)
|
||||
fs.Debugf(dstX, "OK - could not check hash")
|
||||
} else {
|
||||
fs.Debugf(dstX, "OK")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := errors.Errorf("is file on %v but directory on %v", c.fsrc, c.fdst)
|
||||
fs.Errorf(src, "%v", err)
|
||||
fs.CountError(err)
|
||||
_ = fs.CountError(err)
|
||||
atomic.AddInt32(&c.differences, 1)
|
||||
atomic.AddInt32(&c.dstFilesMissing, 1)
|
||||
}
|
||||
@@ -796,7 +801,7 @@ func (c *checkMarch) Match(ctx context.Context, dst, src fs.DirEntry) (recurse b
|
||||
}
|
||||
err := errors.Errorf("is file on %v but directory on %v", c.fdst, c.fsrc)
|
||||
fs.Errorf(dst, "%v", err)
|
||||
fs.CountError(err)
|
||||
_ = fs.CountError(err)
|
||||
atomic.AddInt32(&c.differences, 1)
|
||||
atomic.AddInt32(&c.srcFilesMissing, 1)
|
||||
|
||||
@@ -923,7 +928,7 @@ func CheckDownload(ctx context.Context, fdst, fsrc fs.Fs, oneway bool) error {
|
||||
check := func(ctx context.Context, a, b fs.Object) (differ bool, noHash bool) {
|
||||
differ, err := CheckIdentical(ctx, a, b)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(a, "Failed to download: %v", err)
|
||||
return true, true
|
||||
}
|
||||
@@ -1070,7 +1075,7 @@ func Mkdir(ctx context.Context, f fs.Fs, dir string) error {
|
||||
fs.Debugf(fs.LogDirName(f, dir), "Making directory")
|
||||
err := f.Mkdir(ctx, dir)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -1091,7 +1096,7 @@ func TryRmdir(ctx context.Context, f fs.Fs, dir string) error {
|
||||
func Rmdir(ctx context.Context, f fs.Fs, dir string) error {
|
||||
err := TryRmdir(ctx, f, dir)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
return err
|
||||
}
|
||||
return err
|
||||
@@ -1124,7 +1129,7 @@ func Purge(ctx context.Context, f fs.Fs, dir string) error {
|
||||
err = Rmdirs(ctx, f, dir, false)
|
||||
}
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -1167,7 +1172,7 @@ func listToChan(ctx context.Context, f fs.Fs, dir string) fs.ObjectsChan {
|
||||
})
|
||||
if err != nil && err != fs.ErrorDirNotFound {
|
||||
err = errors.Wrap(err, "failed to list")
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(nil, "%v", err)
|
||||
}
|
||||
}()
|
||||
@@ -1223,7 +1228,7 @@ func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64) error {
|
||||
}
|
||||
in, err := o.Open(ctx, options...)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(o, "Failed to open: %v", err)
|
||||
return
|
||||
}
|
||||
@@ -1236,7 +1241,7 @@ func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64) error {
|
||||
defer mu.Unlock()
|
||||
_, err = io.Copy(w, in)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(o, "Failed to send to output: %v", err)
|
||||
}
|
||||
})
|
||||
@@ -1263,7 +1268,7 @@ func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser,
|
||||
src := object.NewStaticObjectInfo(dstFileName, modTime, int64(readCounter.BytesRead()), false, hash.Sums(), fdst)
|
||||
if !Equal(ctx, src, dst) {
|
||||
err = errors.Errorf("corrupted on transfer")
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(dst, "%v", err)
|
||||
return err
|
||||
}
|
||||
@@ -1338,7 +1343,7 @@ func Rmdirs(ctx context.Context, f fs.Fs, dir string, leaveRoot bool) error {
|
||||
dirEmpty[dir] = !leaveRoot
|
||||
err := walk.Walk(ctx, f, dir, true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(f, "Failed to list %q: %v", dirPath, err)
|
||||
return nil
|
||||
}
|
||||
@@ -1385,7 +1390,7 @@ func Rmdirs(ctx context.Context, f fs.Fs, dir string, leaveRoot bool) error {
|
||||
dir := toDelete[i]
|
||||
err := TryRmdir(ctx, f, dir)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(dir, "Failed to rmdir: %v", err)
|
||||
return err
|
||||
}
|
||||
@@ -1703,11 +1708,14 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
|
||||
}
|
||||
|
||||
// Find dst object if it exists
|
||||
dstObj, err := fdst.NewObject(ctx, dstFileName)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
dstObj = nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
var dstObj fs.Object
|
||||
if !fs.Config.NoCheckDest {
|
||||
dstObj, err = fdst.NewObject(ctx, dstFileName)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
dstObj = nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Special case for changing case of a file on a case insensitive remote
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
)
|
||||
|
||||
// reOpen is a wrapper for an object reader which reopens the stream on error
|
||||
@@ -104,7 +105,7 @@ func (h *reOpen) Read(p []byte) (n int, err error) {
|
||||
h.err = err
|
||||
}
|
||||
h.read += int64(n)
|
||||
if err != nil && err != io.EOF {
|
||||
if err != nil && err != io.EOF && !fserrors.IsNoLowLevelRetryError(err) {
|
||||
// close underlying stream
|
||||
h.opened = false
|
||||
_ = h.rc.Close()
|
||||
|
||||
@@ -168,6 +168,7 @@ func init() {
|
||||
Title: "Shows the current version of rclone and the go runtime.",
|
||||
Help: `
|
||||
This shows the current version of go and the go runtime
|
||||
|
||||
- version - rclone version, eg "v1.44"
|
||||
- decomposed - version number as [major, minor, patch, subpatch]
|
||||
- note patch and subpatch will be 999 for a git compiled version
|
||||
@@ -260,3 +261,77 @@ func rcQuit(ctx context.Context, in Params) (out Params, err error) {
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
Add(Call{
|
||||
Path: "debug/set-mutex-profile-fraction",
|
||||
Fn: rcSetMutexProfileFraction,
|
||||
Title: "Set runtime.SetMutexProfileFraction for mutex profiling.",
|
||||
Help: `
|
||||
SetMutexProfileFraction controls the fraction of mutex contention
|
||||
events that are reported in the mutex profile. On average 1/rate
|
||||
events are reported. The previous rate is returned.
|
||||
|
||||
To turn off profiling entirely, pass rate 0. To just read the current
|
||||
rate, pass rate < 0. (For n>1 the details of sampling may change.)
|
||||
|
||||
Once this is set you can look use this to profile the mutex contention:
|
||||
|
||||
go tool pprof http://localhost:5572/debug/pprof/mutex
|
||||
|
||||
Parameters
|
||||
|
||||
- rate - int
|
||||
|
||||
Results
|
||||
|
||||
- previousRate - int
|
||||
`,
|
||||
})
|
||||
}
|
||||
|
||||
// Terminates app
|
||||
func rcSetMutexProfileFraction(ctx context.Context, in Params) (out Params, err error) {
|
||||
rate, err := in.GetInt64("rate")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
previousRate := runtime.SetMutexProfileFraction(int(rate))
|
||||
out = make(Params)
|
||||
out["previousRate"] = previousRate
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
Add(Call{
|
||||
Path: "debug/set-block-profile-rate",
|
||||
Fn: rcSetBlockProfileRate,
|
||||
Title: "Set runtime.SetBlockProfileRate for blocking profiling.",
|
||||
Help: `
|
||||
SetBlockProfileRate controls the fraction of goroutine blocking events
|
||||
that are reported in the blocking profile. The profiler aims to sample
|
||||
an average of one blocking event per rate nanoseconds spent blocked.
|
||||
|
||||
To include every blocking event in the profile, pass rate = 1. To turn
|
||||
off profiling entirely, pass rate <= 0.
|
||||
|
||||
After calling this you can use this to see the blocking profile:
|
||||
|
||||
go tool pprof http://localhost:5572/debug/pprof/block
|
||||
|
||||
Parameters
|
||||
|
||||
- rate - int
|
||||
`,
|
||||
})
|
||||
}
|
||||
|
||||
// Terminates app
|
||||
func rcSetBlockProfileRate(ctx context.Context, in Params) (out Params, err error) {
|
||||
rate, err := in.GetInt64("rate")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
runtime.SetBlockProfileRate(int(rate))
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -237,9 +237,11 @@ func init() {
|
||||
Fn: rcJobStatus,
|
||||
Title: "Reads the status of the job ID",
|
||||
Help: `Parameters
|
||||
|
||||
- jobid - id of the job (integer)
|
||||
|
||||
Results
|
||||
|
||||
- finished - boolean
|
||||
- duration - time in seconds that the job ran for
|
||||
- endTime - time the job finished (eg "2018-10-26T18:50:20.528746884+01:00")
|
||||
@@ -282,6 +284,7 @@ func init() {
|
||||
Help: `Parameters - None
|
||||
|
||||
Results
|
||||
|
||||
- jobids - array of integer job ids
|
||||
`,
|
||||
})
|
||||
@@ -300,6 +303,7 @@ func init() {
|
||||
Fn: rcJobStop,
|
||||
Title: "Stop the running job",
|
||||
Help: `Parameters
|
||||
|
||||
- jobid - id of the job (integer)
|
||||
`,
|
||||
})
|
||||
|
||||
@@ -31,6 +31,7 @@ type syncCopyMove struct {
|
||||
ctx context.Context // internal context for controlling go-routines
|
||||
cancel func() // cancel the context
|
||||
noTraverse bool // if set don't traverse the dst
|
||||
noCheckDest bool // if set transfer all objects regardless without checking dst
|
||||
deletersWg sync.WaitGroup // for delete before go routine
|
||||
deleteFilesCh chan fs.Object // channel to receive deletes if delete before
|
||||
trackRenames bool // set if we should do server side renames
|
||||
@@ -82,6 +83,7 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
||||
dstEmptyDirs: make(map[string]fs.DirEntry),
|
||||
srcEmptyDirs: make(map[string]fs.DirEntry),
|
||||
noTraverse: fs.Config.NoTraverse,
|
||||
noCheckDest: fs.Config.NoCheckDest,
|
||||
toBeChecked: newPipe(accounting.Stats(ctx).SetCheckQueue, fs.Config.MaxBacklog),
|
||||
toBeUploaded: newPipe(accounting.Stats(ctx).SetTransferQueue, fs.Config.MaxBacklog),
|
||||
deleteFilesCh: make(chan fs.Object, fs.Config.Checkers),
|
||||
@@ -95,6 +97,17 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
||||
fs.Errorf(nil, "Ignoring --no-traverse with sync")
|
||||
s.noTraverse = false
|
||||
}
|
||||
if s.noCheckDest {
|
||||
if s.deleteMode != fs.DeleteModeOff {
|
||||
return nil, errors.New("can't use --no-check-dest with sync: use copy instead")
|
||||
}
|
||||
if fs.Config.Immutable {
|
||||
return nil, errors.New("can't use --no-check-dest with --immutable")
|
||||
}
|
||||
if s.backupDir != nil {
|
||||
return nil, errors.New("can't use --no-check-dest with --backup-dir")
|
||||
}
|
||||
}
|
||||
if s.trackRenames {
|
||||
// Don't track renames for remotes without server-side move support.
|
||||
if !operations.CanServerSideMove(fdst) {
|
||||
@@ -667,6 +680,7 @@ func (s *syncCopyMove) run() error {
|
||||
NoTraverse: s.noTraverse,
|
||||
Callback: s,
|
||||
DstIncludeAll: filter.Active.Opt.DeleteExcluded,
|
||||
NoCheckDest: s.noCheckDest,
|
||||
}
|
||||
s.processError(m.Run())
|
||||
|
||||
@@ -926,7 +940,7 @@ func MoveDir(ctx context.Context, fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, cop
|
||||
fs.Infof(fdst, "Server side directory move succeeded")
|
||||
return nil
|
||||
default:
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(fdst, "Server side directory move failed: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -490,7 +490,7 @@ func TestSyncIgnoreErrors(t *testing.T) {
|
||||
)
|
||||
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
fs.CountError(errors.New("boom"))
|
||||
_ = fs.CountError(errors.New("boom"))
|
||||
assert.NoError(t, Sync(context.Background(), r.Fremote, r.Flocal, false))
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
@@ -800,7 +800,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) {
|
||||
)
|
||||
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
fs.CountError(errors.New("boom"))
|
||||
_ = fs.CountError(errors.New("boom"))
|
||||
err := Sync(context.Background(), r.Fremote, r.Flocal, false)
|
||||
assert.Equal(t, fs.ErrorNotDeleting, err)
|
||||
|
||||
@@ -1763,5 +1763,7 @@ func TestAbort(t *testing.T) {
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
|
||||
err := Sync(context.Background(), r.Fremote, r.Flocal, false)
|
||||
assert.Equal(t, accounting.ErrorMaxTransferLimitReached, err)
|
||||
expectedErr := fserrors.FsError(accounting.ErrorMaxTransferLimitReached)
|
||||
fserrors.Count(expectedErr)
|
||||
assert.Equal(t, expectedErr, err)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package fs
|
||||
|
||||
// Version of rclone
|
||||
var Version = "v1.50.0"
|
||||
var Version = "v1.50.2-DEV"
|
||||
|
||||
@@ -159,7 +159,7 @@ func listRwalk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLe
|
||||
// Carry on listing but return the error at the end
|
||||
if err != nil {
|
||||
listErr = err
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(path, "error listing: %v", err)
|
||||
return nil
|
||||
}
|
||||
@@ -404,7 +404,7 @@ func walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel i
|
||||
// NB once we have passed entries to fn we mustn't touch it again
|
||||
if err != nil && err != ErrorSkipDir {
|
||||
traversing.Done()
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(job.remote, "error listing: %v", err)
|
||||
closeQuit()
|
||||
// Send error to error channel if space
|
||||
|
||||
@@ -10,7 +10,9 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
_ "github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fstest/mockdir"
|
||||
"github.com/rclone/rclone/fstest/mockfs"
|
||||
"github.com/rclone/rclone/fstest/mockobject"
|
||||
@@ -18,6 +20,15 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var errDirNotFound, errorBoom error
|
||||
|
||||
func init() {
|
||||
errDirNotFound = fserrors.FsError(fs.ErrorDirNotFound)
|
||||
fserrors.Count(errDirNotFound)
|
||||
errorBoom = fserrors.FsError(errors.New("boom"))
|
||||
fserrors.Count(errorBoom)
|
||||
}
|
||||
|
||||
type (
|
||||
listResult struct {
|
||||
entries fs.DirEntries
|
||||
@@ -196,12 +207,12 @@ func TestWalkREmptySkip(t *testing.T) { testWalkEmptySkip(t).WalkR() }
|
||||
func testWalkNotFound(t *testing.T) *listDirs {
|
||||
return newListDirs(t, nil, true,
|
||||
listResults{
|
||||
"": {err: fs.ErrorDirNotFound},
|
||||
"": {err: errDirNotFound},
|
||||
},
|
||||
errorMap{
|
||||
"": fs.ErrorDirNotFound,
|
||||
"": errDirNotFound,
|
||||
},
|
||||
fs.ErrorDirNotFound,
|
||||
errDirNotFound,
|
||||
)
|
||||
}
|
||||
func TestWalkNotFound(t *testing.T) { testWalkNotFound(t).Walk() }
|
||||
@@ -211,7 +222,7 @@ func TestWalkNotFoundMaskError(t *testing.T) {
|
||||
// this doesn't work for WalkR
|
||||
newListDirs(t, nil, true,
|
||||
listResults{
|
||||
"": {err: fs.ErrorDirNotFound},
|
||||
"": {err: errDirNotFound},
|
||||
},
|
||||
errorMap{
|
||||
"": nil,
|
||||
@@ -224,7 +235,7 @@ func TestWalkNotFoundSkipError(t *testing.T) {
|
||||
// this doesn't work for WalkR
|
||||
newListDirs(t, nil, true,
|
||||
listResults{
|
||||
"": {err: fs.ErrorDirNotFound},
|
||||
"": {err: errDirNotFound},
|
||||
},
|
||||
errorMap{
|
||||
"": ErrorSkipDir,
|
||||
@@ -342,7 +353,7 @@ func testWalkSkip(t *testing.T) *listDirs {
|
||||
func TestWalkSkip(t *testing.T) { testWalkSkip(t).Walk() }
|
||||
func TestWalkRSkip(t *testing.T) { testWalkSkip(t).WalkR() }
|
||||
|
||||
func testWalkErrors(t *testing.T) *listDirs {
|
||||
func walkErrors(t *testing.T, expectedErr error) *listDirs {
|
||||
lr := listResults{}
|
||||
em := errorMap{}
|
||||
de := make(fs.DirEntries, 10)
|
||||
@@ -357,13 +368,20 @@ func testWalkErrors(t *testing.T) *listDirs {
|
||||
return newListDirs(t, nil, true,
|
||||
lr,
|
||||
em,
|
||||
fs.ErrorDirNotFound,
|
||||
expectedErr,
|
||||
).NoCheckMaps()
|
||||
}
|
||||
func TestWalkErrors(t *testing.T) { testWalkErrors(t).Walk() }
|
||||
func TestWalkRErrors(t *testing.T) { testWalkErrors(t).WalkR() }
|
||||
|
||||
var errorBoom = errors.New("boom")
|
||||
func testWalkErrors(t *testing.T) *listDirs {
|
||||
return walkErrors(t, errDirNotFound)
|
||||
}
|
||||
|
||||
func testWalkRErrors(t *testing.T) *listDirs {
|
||||
return walkErrors(t, fs.ErrorDirNotFound)
|
||||
}
|
||||
|
||||
func TestWalkErrors(t *testing.T) { testWalkErrors(t).Walk() }
|
||||
func TestWalkRErrors(t *testing.T) { testWalkRErrors(t).WalkR() }
|
||||
|
||||
func makeTree(level int, terminalErrors bool) (listResults, errorMap) {
|
||||
lr := listResults{}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -1571,36 +1572,39 @@ func Run(t *testing.T, opt *Opt) {
|
||||
t.Skip("FS has no PutStream interface")
|
||||
}
|
||||
|
||||
file := fstest.Item{
|
||||
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
||||
Path: "piped data.txt",
|
||||
Size: -1, // use unknown size during upload
|
||||
for _, contentSize := range []int{0, 100} {
|
||||
t.Run(strconv.Itoa(contentSize), func(t *testing.T) {
|
||||
file := fstest.Item{
|
||||
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
||||
Path: "piped data.txt",
|
||||
Size: -1, // use unknown size during upload
|
||||
}
|
||||
|
||||
var (
|
||||
err error
|
||||
obj fs.Object
|
||||
uploadHash *hash.MultiHasher
|
||||
)
|
||||
retry(t, "PutStream", func() error {
|
||||
contents := random.String(contentSize)
|
||||
buf := bytes.NewBufferString(contents)
|
||||
uploadHash = hash.NewMultiHasher()
|
||||
in := io.TeeReader(buf, uploadHash)
|
||||
|
||||
file.Size = -1
|
||||
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
|
||||
obj, err = remote.Features().PutStream(ctx, in, obji)
|
||||
return err
|
||||
})
|
||||
file.Hashes = uploadHash.Sums()
|
||||
file.Size = int64(contentSize) // use correct size when checking
|
||||
file.Check(t, obj, remote.Precision())
|
||||
// Re-read the object and check again
|
||||
obj = findObject(ctx, t, remote, file.Path)
|
||||
file.Check(t, obj, remote.Precision())
|
||||
require.NoError(t, obj.Remove(ctx))
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
err error
|
||||
obj fs.Object
|
||||
uploadHash *hash.MultiHasher
|
||||
contentSize = 100
|
||||
)
|
||||
retry(t, "PutStream", func() error {
|
||||
contents := random.String(contentSize)
|
||||
buf := bytes.NewBufferString(contents)
|
||||
uploadHash = hash.NewMultiHasher()
|
||||
in := io.TeeReader(buf, uploadHash)
|
||||
|
||||
file.Size = -1
|
||||
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
|
||||
obj, err = remote.Features().PutStream(ctx, in, obji)
|
||||
return err
|
||||
})
|
||||
file.Hashes = uploadHash.Sums()
|
||||
file.Size = int64(contentSize) // use correct size when checking
|
||||
file.Check(t, obj, remote.Precision())
|
||||
// Re-read the object and check again
|
||||
obj = findObject(ctx, t, remote, file.Path)
|
||||
file.Check(t, obj, remote.Precision())
|
||||
require.NoError(t, obj.Remove(ctx))
|
||||
})
|
||||
|
||||
// TestInternal calls InternalTest() on the Fs
|
||||
|
||||
52
go.mod
52
go.mod
@@ -1,36 +1,35 @@
|
||||
module github.com/rclone/rclone
|
||||
|
||||
require (
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669
|
||||
cloud.google.com/go v0.44.3 // indirect
|
||||
bazil.org/fuse v0.0.0-20191225233854-3a99aca11732
|
||||
cloud.google.com/go v0.47.0 // indirect
|
||||
github.com/Azure/azure-pipeline-go v0.2.2
|
||||
github.com/Azure/azure-storage-blob-go v0.8.0
|
||||
github.com/Azure/go-autorest/autorest/adal v0.6.0 // indirect
|
||||
github.com/Unknwon/goconfig v0.0.0-20190425194916-3dba17dd7b9e
|
||||
github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4
|
||||
github.com/abbot/go-http-auth v0.4.0
|
||||
github.com/anacrolix/dms v1.0.0
|
||||
github.com/anacrolix/dms v1.1.0
|
||||
github.com/atotto/clipboard v0.1.2
|
||||
github.com/aws/aws-sdk-go v1.23.8
|
||||
github.com/billziss-gh/cgofuse v1.1.0
|
||||
github.com/coreos/bbolt v1.3.3
|
||||
github.com/aws/aws-sdk-go v1.25.31
|
||||
github.com/billziss-gh/cgofuse v1.2.0
|
||||
github.com/djherbis/times v1.2.0
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9 // indirect
|
||||
github.com/goftp/server v0.0.0-20190712054601-1149070ae46b
|
||||
github.com/etcd-io/bbolt v1.3.3
|
||||
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 // indirect
|
||||
github.com/google/go-cmp v0.3.1 // indirect
|
||||
github.com/google/go-querystring v1.0.0 // indirect
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.3 // indirect
|
||||
github.com/jlaffaye/ftp v0.0.0-20190721194432-7cd8b0bcf3fc
|
||||
github.com/jlaffaye/ftp v0.0.0-20191025175106-a59fe673c9b2
|
||||
github.com/jzelinskie/whirlpool v0.0.0-20170603002051-c19460b8caa6
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
|
||||
github.com/koofr/go-httpclient v0.0.0-20190818202018-e0dc8fd921dc
|
||||
github.com/koofr/go-koofrclient v0.0.0-20190724113126-8e5366da203a
|
||||
github.com/mattn/go-colorable v0.1.2
|
||||
github.com/mattn/go-colorable v0.1.4
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb // indirect
|
||||
github.com/mattn/go-runewidth v0.0.4
|
||||
github.com/mattn/go-isatty v0.0.11-0.20191112051248-2a2f0ea997f9 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.6
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2
|
||||
github.com/ncw/swift v1.0.49
|
||||
@@ -49,24 +48,27 @@ require (
|
||||
github.com/smartystreets/assertions v1.0.1 // indirect
|
||||
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 // indirect
|
||||
github.com/spf13/cobra v0.0.5
|
||||
github.com/spf13/pflag v1.0.3
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.4.0
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20190528125457-55e675378686
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20191014094753-e8695d78299a
|
||||
github.com/xanzy/ssh-agent v0.2.1
|
||||
github.com/youmark/pkcs8 v0.0.0-20181201043747-70daafe5d78a
|
||||
github.com/yunify/qingstor-sdk-go/v3 v3.0.2
|
||||
github.com/youmark/pkcs8 v0.0.0-20191102193632-94c173a94d60
|
||||
github.com/yunify/qingstor-sdk-go/v3 v3.1.1
|
||||
go.etcd.io/bbolt v1.3.3 // indirect
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7
|
||||
go.opencensus.io v0.22.2 // indirect
|
||||
goftp.io/server v0.0.0-20190812052725-72a57b186803
|
||||
golang.org/x/crypto v0.0.0-20191108234033-bd318be0434a
|
||||
golang.org/x/net v0.0.0-20191109021931-daa7c04131f5
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58
|
||||
golang.org/x/sys v0.0.0-20190826163724-acd9dae8e8cc
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449
|
||||
golang.org/x/text v0.3.2
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
||||
google.golang.org/api v0.9.0
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 // indirect
|
||||
google.golang.org/grpc v1.23.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.2
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
|
||||
google.golang.org/api v0.13.0
|
||||
google.golang.org/appengine v1.6.5 // indirect
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a // indirect
|
||||
google.golang.org/grpc v1.25.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.5
|
||||
)
|
||||
|
||||
go 1.13
|
||||
|
||||
135
go.sum
135
go.sum
@@ -1,12 +1,24 @@
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669 h1:FNCRpXiquG1aoyqcIWVFmpTSKVcx2bQD38uZZeGtdlw=
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
|
||||
bazil.org/fuse v0.0.0-20191225233854-3a99aca11732 h1:gaB1+kZCJDExjlrdy37gIwxV0M7v81EzIFKQZ5o5YV0=
|
||||
bazil.org/fuse v0.0.0-20191225233854-3a99aca11732/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.3 h1:0sMegbmn/8uTwpNkB0q9cLEpZ2W5a6kl+wtBQgPWBJQ=
|
||||
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.47.0 h1:1JUtpcY9E7+eTospEwWS2QXP3DEn7poB3E2j0jN74mM=
|
||||
cloud.google.com/go v0.47.0/go.mod h1:5p3Ky/7f3N10VBkhuR5LFtddroTiMyjZV/Kj5qOQFxU=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
gitea.com/goftp/file-driver v0.0.0-20190712091345-f79c2ed973f8/go.mod h1:ghdogu0Da3rwYCSJ20JPgTiMcDpzeRbzvuFIOOW3G7w=
|
||||
gitea.com/goftp/file-driver v0.0.0-20190812052443-efcdcba68b34 h1:3wshUWDKHcy8hrNafCS4rtuAdON2KYsuznc05zdHTrQ=
|
||||
gitea.com/goftp/file-driver v0.0.0-20190812052443-efcdcba68b34/go.mod h1:6+f1gclV97PmaVmE4YJbH3KIKnl+r3/HWR0zD/z1CG4=
|
||||
github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
|
||||
github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY=
|
||||
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
|
||||
@@ -37,8 +49,8 @@ github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4 h1:mK1/QgFPU4osbhjJ26B1w7
|
||||
github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg=
|
||||
github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0=
|
||||
github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM=
|
||||
github.com/anacrolix/dms v1.0.0 h1:4vs/X5AdF0eRqFXg+EbNUdvY7JUz/a4U84v+VAEa7V8=
|
||||
github.com/anacrolix/dms v1.0.0/go.mod h1:1TQoem5yf/k/DiVLFFQi+JFQ6GZeKxmJfwGr3goLmFQ=
|
||||
github.com/anacrolix/dms v1.1.0 h1:vbBXZS7T5FaZm+9p1pdmVVo9tN3qdc27bKSETdeT3xo=
|
||||
github.com/anacrolix/dms v1.1.0/go.mod h1:msPKAoppoNRfrYplJqx63FZ+VipDZ4Xsj3KzIQxyU7k=
|
||||
github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
|
||||
github.com/anacrolix/envpprof v1.0.0/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
|
||||
github.com/anacrolix/ffprobe v1.0.0/go.mod h1:BIw+Bjol6CWjm/CRWrVLk2Vy+UYlkgmBZ05vpSYqZPw=
|
||||
@@ -47,15 +59,14 @@ github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/atotto/clipboard v0.1.2 h1:YZCtFu5Ie8qX2VmVTBnrqLSiU9XOWwqNRmdT3gIQzbY=
|
||||
github.com/atotto/clipboard v0.1.2/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
|
||||
github.com/aws/aws-sdk-go v1.23.8 h1:G/azJoBN0pnhB3B+0eeC4yyVFYIIad6bbzg6wwtImqk=
|
||||
github.com/aws/aws-sdk-go v1.23.8/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/billziss-gh/cgofuse v1.1.0 h1:tATn9ZDvuPcOVlvR4tJitGHgAqy1y18+4mKmRfdfjec=
|
||||
github.com/billziss-gh/cgofuse v1.1.0/go.mod h1:LJjoaUojlVjgo5GQoEJTcJNqZJeRU0nCR84CyxKt2YM=
|
||||
github.com/aws/aws-sdk-go v1.25.31 h1:14mdh3HsTgRekePPkYcCbAaEXJknc3mN7f4XfsiMMDA=
|
||||
github.com/aws/aws-sdk-go v1.25.31/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/billziss-gh/cgofuse v1.2.0 h1:FMdQSygSBpD4yEPENJcmvfCdmNWMVkPLlD7wWdl/7IA=
|
||||
github.com/billziss-gh/cgofuse v1.2.0/go.mod h1:LJjoaUojlVjgo5GQoEJTcJNqZJeRU0nCR84CyxKt2YM=
|
||||
github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
|
||||
github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY=
|
||||
github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
@@ -72,16 +83,24 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible h1:9jnukMIowLSo3SY7+GTwxmYJv4QC0LxXbo97zHWCyoc=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible/go.mod h1:lr+LhMM3F6Y3lW1T9j2U5l7QeuWm87N9+PPXo3yH4qY=
|
||||
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM=
|
||||
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
||||
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9 h1:cC0Hbb+18DJ4i6ybqDybvj4wdIDS4vnD0QEci98PgM8=
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9/go.mod h1:GpOj6zuVBG3Inr9qjEnuVTgBlk2lZ1S9DcoFiXWyKss=
|
||||
github.com/goftp/server v0.0.0-20190712054601-1149070ae46b h1:2rRhW1AEs/240C6fpmgGFKlTnh/339r2Cg+ahrkSodo=
|
||||
github.com/goftp/server v0.0.0-20190712054601-1149070ae46b/go.mod h1:k/SS6VWkxY7dHPhoMQ8IdRu8L4lQtmGbhyXGg+vCnXE=
|
||||
github.com/goftp/server v0.0.0-20190304020633-eabccc535b5a/go.mod h1:k/SS6VWkxY7dHPhoMQ8IdRu8L4lQtmGbhyXGg+vCnXE=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE=
|
||||
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
@@ -104,6 +123,7 @@ github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
@@ -114,16 +134,15 @@ github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORR
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk=
|
||||
github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jlaffaye/ftp v0.0.0-20190721194432-7cd8b0bcf3fc h1:Mc2Gk3kF0Uqx+cI97pN0gbgZb0DVW2L+htrZSKkOmtE=
|
||||
github.com/jlaffaye/ftp v0.0.0-20190721194432-7cd8b0bcf3fc/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
|
||||
github.com/jlaffaye/ftp v0.0.0-20190624084859-c1312a7102bf/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
|
||||
github.com/jlaffaye/ftp v0.0.0-20191025175106-a59fe673c9b2 h1:WY3P4euRv9s8F2rpZUK1jnk4ZMiV3O2ltdnoZK/GTUU=
|
||||
github.com/jlaffaye/ftp v0.0.0-20191025175106-a59fe673c9b2/go.mod h1:PwUeyujmhaGohgOf0kJKxPfk3HcRv8QD/wAUN44go4k=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
@@ -134,6 +153,7 @@ github.com/jzelinskie/whirlpool v0.0.0-20170603002051-c19460b8caa6 h1:RyOL4+OIUc
|
||||
github.com/jzelinskie/whirlpool v0.0.0-20170603002051-c19460b8caa6/go.mod h1:KmHnJWQrgEvbuy0vcvj00gtMqbvNn1L+3YUZLK/B92c=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
|
||||
@@ -150,18 +170,18 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-gtk v0.0.0-20190405072524-4deadb416788/go.mod h1:PwzwfeB5syFHXORC3MtPylVcjIoTDT/9cvkKpEndGVI=
|
||||
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb h1:hXqqXzQtJbENrsb+rsIqkVqcg4FUJL0SQFGw08Dgivw=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-pointer v0.0.0-20180825124634-49522c3f3791/go.mod h1:2zXcozF6qYGgmsG+SeTZz3oAbFLdD3OWqnUbNvJZAlc=
|
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-isatty v0.0.11-0.20191112051248-2a2f0ea997f9 h1:tM1L+QoyOIq/0KiBQ4y/jUW0jxB5kz35bz+PSoQYjq8=
|
||||
github.com/mattn/go-isatty v0.0.11-0.20191112051248-2a2f0ea997f9/go.mod h1:cxQpGCW53krnBJYXw0m6SYdk+OIHR4jbEstSUj/+MQ4=
|
||||
github.com/mattn/go-runewidth v0.0.6 h1:V2iyH+aX9C5fsYCpK60U8BYIvmhqxuOL3JZcqc1NB7k=
|
||||
github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
@@ -170,6 +190,7 @@ github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2 h1:VlXvEx6JbFp7F9iz92zX
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2/go.mod h1:MLIrzg7gp/kzVBxRE1olT7CWYMCklcUWU+ekoxOD9x0=
|
||||
github.com/ncw/swift v1.0.49 h1:eQaKIjSt/PXLKfYgzg01nevmO+CMXfXGRhB1gOhDs7E=
|
||||
github.com/ncw/swift v1.0.49/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
|
||||
github.com/nsf/termbox-go v0.0.0-20190817171036-93860e161317 h1:hhGN4SFXgXo61Q4Sjj/X9sBjyeSa2kdpaOzCO+8EVQw=
|
||||
github.com/nsf/termbox-go v0.0.0-20190817171036-93860e161317/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd h1:+iAPaTbi1gZpcpDwe/BW1fx7Xoesv69hLNGPheoyhBs=
|
||||
@@ -191,10 +212,12 @@ github.com/pkg/sftp v1.10.1 h1:VasscCm72135zRysgrJDKsntdmPN+OuU3+nnHYA9wyc=
|
||||
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/putdotio/go-putio v0.0.0-20190822121956-19b9c636c877 h1:sKIa5MAIViLAnQbEo+uiDi2FMowy8KcdZW8XZpmyNxs=
|
||||
github.com/putdotio/go-putio v0.0.0-20190822121956-19b9c636c877/go.mod h1:EWtDL88jJLLWZzywr0QaPO+mGP8gFpvl8dcox8qTk3Y=
|
||||
github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46 h1:w2CpS5muK+jyydnmlkqpAhzKmHmMBzBkfYUDjQNS1Dk=
|
||||
github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46/go.mod h1:U2bmx0hDj8EyDdcxmD5t3XHDnBFnyNNc22n1R4008eM=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
|
||||
@@ -217,6 +240,8 @@ github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tL
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
@@ -228,39 +253,58 @@ github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709 h1:Ko2LQMrRU+Oy
|
||||
github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20190528125457-55e675378686 h1:U7mF+tjDK9zWoxCU+kBNa1XT7WZMF5bjwtRpjeIkSYw=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20190528125457-55e675378686/go.mod h1:XWL4vDyd3JKmJx+hZWUVgCNmmhZ2dTBcaNDcxH465s0=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20191014094753-e8695d78299a h1:9VwG6wBA1jd6oOCnmQ/OaKM1GRfChadtH5N3bx1oSKE=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20191014094753-e8695d78299a/go.mod h1:XWL4vDyd3JKmJx+hZWUVgCNmmhZ2dTBcaNDcxH465s0=
|
||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70=
|
||||
github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/youmark/pkcs8 v0.0.0-20181201043747-70daafe5d78a h1:wRlvyDgRuJOLgD2vcuBUbEduzTkcN7quLip1EnX/Dl4=
|
||||
github.com/youmark/pkcs8 v0.0.0-20181201043747-70daafe5d78a/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
|
||||
github.com/yunify/qingstor-sdk-go/v3 v3.0.2 h1:2pL3tEj6eEESsHKrqsLZ5D+OkHEhYfsW1xwYRcHCgZs=
|
||||
github.com/yunify/qingstor-sdk-go/v3 v3.0.2/go.mod h1:KciFNuMu6F4WLk9nGwwK69sCGKLCdd9f97ac/wfumS4=
|
||||
github.com/youmark/pkcs8 v0.0.0-20191102193632-94c173a94d60 h1:Ud2neINE1YFEwrcJ4EqnbRZlm9R3T8SuFKeqjIw7k44=
|
||||
github.com/youmark/pkcs8 v0.0.0-20191102193632-94c173a94d60/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
|
||||
github.com/yunify/qingstor-sdk-go/v3 v3.1.1 h1:jQkY9N+zSL8h8CqgrDQpXe8/mqJOx8vgGjk6O//RA/4=
|
||||
github.com/yunify/qingstor-sdk-go/v3 v3.1.1/go.mod h1:KciFNuMu6F4WLk9nGwwK69sCGKLCdd9f97ac/wfumS4=
|
||||
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
goftp.io/server v0.0.0-20190712054601-1149070ae46b/go.mod h1:xreggPYu7ZuNe9PfbxiQca7bYGwU44IvlCCg3KzWJtQ=
|
||||
goftp.io/server v0.0.0-20190812034929-9b3874d17690/go.mod h1:99FISrRpwKfaL4Ey/dX8N48WToveng/s2OXR5sJ3cnc=
|
||||
goftp.io/server v0.0.0-20190812052725-72a57b186803 h1:I2IgXYRuOZ6LceE7VY6aSnYuUy6Wot3WFhqI5WsAHXQ=
|
||||
goftp.io/server v0.0.0-20190812052725-72a57b186803/go.mod h1:eDjthxa5tFTS2JVry2jHt1g9y3J0Vgu2Nd+lmNWev7Y=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191108234033-bd318be0434a h1:R/qVym5WAxsZWQqZCwDY/8sdVKV1m1WgU4/S5IRQAzc=
|
||||
golang.org/x/crypto v0.0.0-20191108234033-bd318be0434a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -274,8 +318,8 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191109021931-daa7c04131f5 h1:bHNaocaoJxYBo5cw41UyTMLjYlb8wPY7+WFrnklbHOM=
|
||||
golang.org/x/net v0.0.0-20191109021931-daa7c04131f5/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||
@@ -286,6 +330,8 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -300,8 +346,11 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826163724-acd9dae8e8cc h1:Cgiu447JccQnHt7K/DbJbw1DbXAUHwOtU7ObeOCVsc4=
|
||||
golang.org/x/sys v0.0.0-20190826163724-acd9dae8e8cc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056 h1:dHtDnRWQtSx0Hjq9kvKFpBh9uPPKfQN70NZZmvssGwk=
|
||||
golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
@@ -309,6 +358,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
@@ -320,18 +371,28 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191010171213-8abd42400456/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0 h1:9sdfJOzWlkqPltHAuzT2Cp+yrBeY1KRVYgms8soxMwM=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0 h1:Q3Ui3V3/CVinFWFiW39Iw0kMuVrRzYX0wN6OPFp0lTA=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
@@ -340,15 +401,22 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191009194640-548a555dbc03/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a h1:Ob5/580gVHBJZgXnff1cZDbG+xLtMVE5mDRTe+nIsX4=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
@@ -356,8 +424,11 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
|
||||
@@ -386,6 +386,8 @@ func doConfig(id, name string, m configmap.Mapper, oauthConfig *oauth2.Config, o
|
||||
oauthConfig, changed := overrideCredentials(name, m, oauthConfig)
|
||||
authorizeOnlyValue, ok := m.Get(config.ConfigAuthorize)
|
||||
authorizeOnly := ok && authorizeOnlyValue != "" // set if being run by "rclone authorize"
|
||||
authorizeNoAutoBrowserValue, ok := m.Get(config.ConfigAuthNoBrowser)
|
||||
authorizeNoAutoBrowser := ok && authorizeNoAutoBrowserValue != ""
|
||||
|
||||
// See if already have a token
|
||||
tokenString, ok := m.Get("token")
|
||||
@@ -470,9 +472,13 @@ func doConfig(id, name string, m configmap.Mapper, oauthConfig *oauth2.Config, o
|
||||
authURL = "http://" + bindAddress + "/auth?state=" + state
|
||||
}
|
||||
|
||||
// Open the URL for the user to visit
|
||||
_ = open.Start(authURL)
|
||||
fmt.Printf("If your browser doesn't open automatically go to the following link: %s\n", authURL)
|
||||
if !authorizeNoAutoBrowser && oauthConfig.RedirectURL != TitleBarRedirectURL {
|
||||
// Open the URL for the user to visit
|
||||
_ = open.Start(authURL)
|
||||
fmt.Printf("If your browser doesn't open automatically go to the following link: %s\n", authURL)
|
||||
} else {
|
||||
fmt.Printf("Please go to the following link: %s\n", authURL)
|
||||
}
|
||||
fmt.Printf("Log in and authorize rclone for access\n")
|
||||
|
||||
// Read the code via the webserver or manually
|
||||
|
||||
2
vendor/bazil.org/fuse/LICENSE
generated
vendored
2
vendor/bazil.org/fuse/LICENSE
generated
vendored
@@ -1,4 +1,4 @@
|
||||
Copyright (c) 2013-2015 Tommi Virtanen.
|
||||
Copyright (c) 2013-2019 Tommi Virtanen.
|
||||
Copyright (c) 2009, 2011, 2012 The Go Authors.
|
||||
All rights reserved.
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user