mirror of
https://github.com/rclone/rclone.git
synced 2026-01-28 07:13:39 +00:00
Compare commits
100 Commits
fix-azure-
...
fix-2926-p
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5f37c6f258 | ||
|
|
a2341cc412 | ||
|
|
9685be64cd | ||
|
|
39f5059d48 | ||
|
|
a30e80564d | ||
|
|
8e107b9657 | ||
|
|
21a0693b79 | ||
|
|
4846d9393d | ||
|
|
fc4f20d52f | ||
|
|
60558b5d37 | ||
|
|
5990573ccd | ||
|
|
bd11d3cb62 | ||
|
|
5e5578d2c3 | ||
|
|
1318c6aec8 | ||
|
|
f29757de3b | ||
|
|
f397c35935 | ||
|
|
f365230aea | ||
|
|
ff0b8e10af | ||
|
|
8d16a5693c | ||
|
|
781142a73f | ||
|
|
f471a7e3f5 | ||
|
|
d7a1fd2a6b | ||
|
|
7782eda88e | ||
|
|
d08453d402 | ||
|
|
71e98ea584 | ||
|
|
42d997f639 | ||
|
|
571b4c060b | ||
|
|
ff72059a94 | ||
|
|
2e6ef4f6ec | ||
|
|
0ec6dd9f4b | ||
|
|
0b7fdf16a2 | ||
|
|
5edfd31a6d | ||
|
|
7ee7bc87ae | ||
|
|
1433558c01 | ||
|
|
0458b961c5 | ||
|
|
c1998c4efe | ||
|
|
49da220b65 | ||
|
|
554ee0d963 | ||
|
|
2d2533a08a | ||
|
|
733b072d4f | ||
|
|
2d01a65e36 | ||
|
|
b8280521a5 | ||
|
|
60e6af2605 | ||
|
|
9d16822c63 | ||
|
|
38a0946071 | ||
|
|
95e52e1ac3 | ||
|
|
51ab1c940a | ||
|
|
6f30427357 | ||
|
|
3220acc729 | ||
|
|
3c97933416 | ||
|
|
039e2a9649 | ||
|
|
1c01d0b84a | ||
|
|
39eac7a765 | ||
|
|
082a7065b1 | ||
|
|
f7b08a6982 | ||
|
|
37e32d8c80 | ||
|
|
f2a1b991de | ||
|
|
4128e696d6 | ||
|
|
7e7f3de355 | ||
|
|
1f6a1cd26d | ||
|
|
2cfe2354df | ||
|
|
13387c0838 | ||
|
|
5babf2dc5c | ||
|
|
9012d7c6c1 | ||
|
|
df1faa9a8f | ||
|
|
3de7ad5223 | ||
|
|
9cb3a68c38 | ||
|
|
c1dd76788d | ||
|
|
5ee1816a71 | ||
|
|
63b51c6742 | ||
|
|
e7684b7ed5 | ||
|
|
dda23baf42 | ||
|
|
8575abf599 | ||
|
|
feea0532cd | ||
|
|
d3e8ae1820 | ||
|
|
91a9a959a2 | ||
|
|
04eae51d11 | ||
|
|
8fb707e16d | ||
|
|
4138d5aa75 | ||
|
|
fc654a4cec | ||
|
|
26b5f55cba | ||
|
|
3f572e6bf2 | ||
|
|
941ad6bc62 | ||
|
|
5d1d93e163 | ||
|
|
35fba5bfdd | ||
|
|
887834da91 | ||
|
|
107293c80e | ||
|
|
e3c4ebd59a | ||
|
|
d99ffde7c0 | ||
|
|
198c34ce21 | ||
|
|
0eba88bbfe | ||
|
|
aeea4430d5 | ||
|
|
4b15c4215c | ||
|
|
50452207d9 | ||
|
|
01fcad9b9c | ||
|
|
eb41253764 | ||
|
|
89625e54cf | ||
|
|
58f7141c96 | ||
|
|
e56c6402a7 | ||
|
|
d0eb8ddc30 |
@@ -13,10 +13,10 @@ jobs:
|
||||
- run:
|
||||
name: Cross-compile rclone
|
||||
command: |
|
||||
docker pull billziss/xgo-cgofuse
|
||||
docker pull rclone/xgo-cgofuse
|
||||
go get -v github.com/karalabe/xgo
|
||||
xgo \
|
||||
--image=billziss/xgo-cgofuse \
|
||||
--image=rclone/xgo-cgofuse \
|
||||
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-tags cmount \
|
||||
.
|
||||
@@ -29,6 +29,19 @@ jobs:
|
||||
command: |
|
||||
mkdir -p /tmp/rclone.dist
|
||||
cp -R rclone-* /tmp/rclone.dist
|
||||
mkdir build
|
||||
cp -R rclone-* build/
|
||||
|
||||
- run:
|
||||
name: Build rclone
|
||||
command: |
|
||||
go version
|
||||
go build
|
||||
|
||||
- run:
|
||||
name: Upload artifacts
|
||||
command: |
|
||||
make circleci_upload
|
||||
|
||||
- store_artifacts:
|
||||
path: /tmp/rclone.dist
|
||||
|
||||
@@ -351,6 +351,12 @@ Unit tests
|
||||
Integration tests
|
||||
|
||||
* Add your backend to `fstest/test_all/config.yaml`
|
||||
* Once you've done that then you can use the integration test framework from the project root:
|
||||
* go install ./...
|
||||
* test_all -backend remote
|
||||
|
||||
Or if you want to run the integration tests manually:
|
||||
|
||||
* Make sure integration tests pass with
|
||||
* `cd fs/operations`
|
||||
* `go test -v -remote TestRemote:`
|
||||
@@ -372,4 +378,3 @@ Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](
|
||||
* `docs/content/about.md` - front page of rclone.org
|
||||
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||
* `cmd/cmd.go` - the main help for rclone
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
# Maintainers guide for rclone #
|
||||
|
||||
Current active maintainers of rclone are
|
||||
Current active maintainers of rclone are:
|
||||
|
||||
* Nick Craig-Wood @ncw
|
||||
* Stefan Breunig @breunigs
|
||||
* Ishuah Kariuki @ishuah
|
||||
* Remus Bunduc @remusb - cache subsystem maintainer
|
||||
* Fabian Möller @B4dM4n
|
||||
* Alex Chen @Cnly
|
||||
* Sandeep Ummadi @sandeepkru
|
||||
| Name | GitHub ID | Specific Responsibilities |
|
||||
| :--------------- | :---------- | :-------------------------- |
|
||||
| Nick Craig-Wood | @ncw | overall project health |
|
||||
| Stefan Breunig | @breunigs | |
|
||||
| Ishuah Kariuki | @ishuah | |
|
||||
| Remus Bunduc | @remusb | cache backend |
|
||||
| Fabian Möller | @B4dM4n | |
|
||||
| Alex Chen | @Cnly | onedrive backend |
|
||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
7
Makefile
7
Makefile
@@ -185,6 +185,13 @@ ifndef BRANCH_PATH
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
circleci_upload:
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||
ifndef BRANCH_PATH
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||
|
||||
BUILD_FLAGS := -exclude "^(windows|darwin)/"
|
||||
ifeq ($(TRAVIS_OS_NAME),osx)
|
||||
BUILD_FLAGS := -include "^darwin/" -cgo
|
||||
|
||||
@@ -20,6 +20,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
|
||||
## Storage providers
|
||||
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
@@ -91,4 +92,4 @@ License
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
[COPYING file](/rclone/COPYING) included in this package).
|
||||
[COPYING file](/COPYING) included in this package).
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/go-acd"
|
||||
acd "github.com/ncw/go-acd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -22,12 +22,14 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
@@ -135,6 +137,7 @@ type Fs struct {
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
client *http.Client // http client we are using
|
||||
svcURL *azblob.ServiceURL // reference to serviceURL
|
||||
cntURL *azblob.ContainerURL // reference to containerURL
|
||||
container string // the container we are working on
|
||||
@@ -272,6 +275,38 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// httpClientFactory creates a Factory object that sends HTTP requests
|
||||
// to a rclone's http.Client.
|
||||
//
|
||||
// copied from azblob.newDefaultHTTPClientFactory
|
||||
func httpClientFactory(client *http.Client) pipeline.Factory {
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
||||
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
r, err := client.Do(request.WithContext(ctx))
|
||||
if err != nil {
|
||||
err = pipeline.NewError(err, "HTTP request failed")
|
||||
}
|
||||
return pipeline.NewHTTPResponse(r), err
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// newPipeline creates a Pipeline using the specified credentials and options.
|
||||
//
|
||||
// this code was copied from azblob.NewPipeline
|
||||
func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline.Pipeline {
|
||||
// Closest to API goes first; closest to the wire goes last
|
||||
factories := []pipeline.Factory{
|
||||
azblob.NewTelemetryPolicyFactory(o.Telemetry),
|
||||
azblob.NewUniqueRequestIDPolicyFactory(),
|
||||
azblob.NewRetryPolicyFactory(o.Retry),
|
||||
c,
|
||||
pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked
|
||||
azblob.NewRequestLogPolicyFactory(o.RequestLog),
|
||||
}
|
||||
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -307,6 +342,23 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
container: container,
|
||||
root: directory,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant).SetPacer(pacer.S3Pacer),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
client: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
|
||||
var (
|
||||
u *url.URL
|
||||
serviceURL azblob.ServiceURL
|
||||
@@ -323,7 +375,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||
}
|
||||
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
containerURL = serviceURL.NewContainerURL(container)
|
||||
case opt.SASURL != "":
|
||||
@@ -332,7 +384,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrapf(err, "failed to parse SAS URL")
|
||||
}
|
||||
// use anonymous credentials in case of sas url
|
||||
pipeline := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
pipeline := f.newPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
// Check if we have container level SAS or account level sas
|
||||
parts := azblob.NewBlobURLParts(*u)
|
||||
if parts.ContainerName != "" {
|
||||
@@ -349,24 +401,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
default:
|
||||
return nil, errors.New("Need account+key or connectionString or sasURL")
|
||||
}
|
||||
f.svcURL = &serviceURL
|
||||
f.cntURL = &containerURL
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
container: container,
|
||||
root: directory,
|
||||
svcURL: &serviceURL,
|
||||
cntURL: &containerURL,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the (container,directory) is actually an existing file
|
||||
@@ -380,8 +417,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
_, err := f.NewObject(remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
f.root = oldRoot
|
||||
return f, nil
|
||||
}
|
||||
@@ -437,6 +474,21 @@ func (o *Object) updateMetadataWithModTime(modTime time.Time) {
|
||||
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
||||
}
|
||||
|
||||
// Returns whether file is a directory marker or not
|
||||
func isDirectoryMarker(size int64, metadata azblob.Metadata, remote string) bool {
|
||||
// Directory markers are 0 length
|
||||
if size == 0 {
|
||||
// Note that metadata with hdi_isfolder = true seems to be a
|
||||
// defacto standard for marking blobs as directories.
|
||||
endsWithSlash := strings.HasSuffix(remote, "/")
|
||||
if endsWithSlash || remote == "" || metadata["hdi_isfolder"] == "true" {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object
|
||||
type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
|
||||
|
||||
@@ -472,6 +524,7 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
MaxResults: int32(maxResults),
|
||||
}
|
||||
ctx := context.Background()
|
||||
directoryMarkers := map[string]struct{}{}
|
||||
for marker := (azblob.Marker{}); marker.NotDone(); {
|
||||
var response *azblob.ListBlobsHierarchySegmentResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
@@ -501,13 +554,23 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
continue
|
||||
}
|
||||
remote := file.Name[len(f.root):]
|
||||
// Check for directory
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
if isDirectory {
|
||||
remote = remote[:len(remote)-1]
|
||||
if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) {
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
err = fn(remote, file, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Keep track of directory markers. If recursing then
|
||||
// there will be no Prefixes so no need to keep track
|
||||
if !recurse {
|
||||
directoryMarkers[remote] = struct{}{}
|
||||
}
|
||||
continue // skip directory marker
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, file, isDirectory)
|
||||
err = fn(remote, file, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -520,6 +583,10 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
continue
|
||||
}
|
||||
remote = remote[len(f.root):]
|
||||
// Don't send if already sent as a directory marker
|
||||
if _, found := directoryMarkers[remote]; found {
|
||||
continue
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, nil, true)
|
||||
if err != nil {
|
||||
@@ -923,27 +990,37 @@ func (o *Object) setMetadata(metadata azblob.Metadata) {
|
||||
// o.md5
|
||||
// o.meta
|
||||
func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetPropertiesResponse) (err error) {
|
||||
metadata := info.NewMetadata()
|
||||
size := info.ContentLength()
|
||||
if isDirectoryMarker(size, metadata, o.remote) {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
|
||||
// this as base64 encoded string.
|
||||
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
|
||||
o.mimeType = info.ContentType()
|
||||
o.size = info.ContentLength()
|
||||
o.size = size
|
||||
o.modTime = time.Time(info.LastModified())
|
||||
o.accessTier = azblob.AccessTierType(info.AccessTier())
|
||||
o.setMetadata(info.NewMetadata())
|
||||
o.setMetadata(metadata)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
|
||||
metadata := info.Metadata
|
||||
size := *info.Properties.ContentLength
|
||||
if isDirectoryMarker(size, metadata, o.remote) {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
|
||||
// this as base64 encoded string.
|
||||
o.md5 = base64.StdEncoding.EncodeToString(info.Properties.ContentMD5)
|
||||
o.mimeType = *info.Properties.ContentType
|
||||
o.size = *info.Properties.ContentLength
|
||||
o.size = size
|
||||
o.modTime = info.Properties.LastModified
|
||||
o.accessTier = info.Properties.AccessTier
|
||||
o.setMetadata(info.Metadata)
|
||||
o.setMetadata(metadata)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build freebsd netbsd openbsd plan9 solaris !go1.8
|
||||
// +build plan9 solaris !go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -136,6 +136,7 @@ type AuthorizeAccountResponse struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||
} `json:"allowed"`
|
||||
|
||||
@@ -368,6 +368,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
// If this is a key limited to a single bucket, it must exist already
|
||||
if f.bucket != "" && f.info.Allowed.BucketID != "" {
|
||||
allowedBucket := f.info.Allowed.BucketName
|
||||
if allowedBucket == "" {
|
||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
||||
}
|
||||
if allowedBucket != f.bucket {
|
||||
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
|
||||
}
|
||||
f.markBucketOK()
|
||||
f.setBucketID(f.info.Allowed.BucketID)
|
||||
}
|
||||
@@ -980,6 +987,12 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
errReturn = err
|
||||
}
|
||||
}
|
||||
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
|
||||
if time.Since(time.Time(timestamp)).Hours() > 24 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Delete Config.Transfers in parallel
|
||||
toBeDeleted := make(chan *api.File, fs.Config.Transfers)
|
||||
@@ -1003,6 +1016,9 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
if object.Action == "hide" {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
||||
toBeDeleted <- object
|
||||
} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
||||
toBeDeleted <- object
|
||||
} else {
|
||||
fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
|
||||
}
|
||||
|
||||
2
backend/cache/plex.go
vendored
2
backend/cache/plex.go
vendored
@@ -15,7 +15,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/patrickmn/go-cache"
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
|
||||
2
backend/cache/storage_memory.go
vendored
2
backend/cache/storage_memory.go
vendored
@@ -8,7 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/patrickmn/go-cache"
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
||||
@@ -41,6 +41,7 @@ var (
|
||||
ErrorBadDecryptControlChar = errors.New("bad decryption - contains control chars")
|
||||
ErrorNotAMultipleOfBlocksize = errors.New("not a multiple of blocksize")
|
||||
ErrorTooShortAfterDecode = errors.New("too short after base32 decode")
|
||||
ErrorTooLongAfterDecode = errors.New("too long after base32 decode")
|
||||
ErrorEncryptedFileTooShort = errors.New("file is too short to be encrypted")
|
||||
ErrorEncryptedFileBadHeader = errors.New("file has truncated block header")
|
||||
ErrorEncryptedBadMagic = errors.New("not an encrypted file - bad magic string")
|
||||
@@ -284,6 +285,9 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
// not possible if decodeFilename() working correctly
|
||||
return "", ErrorTooShortAfterDecode
|
||||
}
|
||||
if len(rawCiphertext) > 2048 {
|
||||
return "", ErrorTooLongAfterDecode
|
||||
}
|
||||
paddedPlaintext := eme.Transform(c.block, c.nameTweak[:], rawCiphertext, eme.DirectionDecrypt)
|
||||
plaintext, err := pkcs7.Unpad(nameCipherBlockSize, paddedPlaintext)
|
||||
if err != nil {
|
||||
|
||||
@@ -194,6 +194,10 @@ func TestEncryptSegment(t *testing.T) {
|
||||
|
||||
func TestDecryptSegment(t *testing.T) {
|
||||
// We've tested the forwards above, now concentrate on the errors
|
||||
longName := make([]byte, 3328)
|
||||
for i := range longName {
|
||||
longName[i] = 'a'
|
||||
}
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
@@ -201,6 +205,7 @@ func TestDecryptSegment(t *testing.T) {
|
||||
}{
|
||||
{"64=", ErrorBadBase32Encoding},
|
||||
{"!", base32.CorruptInputError(0)},
|
||||
{string(longName), ErrorTooLongAfterDecode},
|
||||
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Package drive interfaces with the Google Drive object storage system
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
// FIXME need to deal with some corner cases
|
||||
@@ -122,6 +125,29 @@ var (
|
||||
_linkTemplates map[string]*template.Template // available link types
|
||||
)
|
||||
|
||||
// Parse the scopes option returning a slice of scopes
|
||||
func driveScopes(scopesString string) (scopes []string) {
|
||||
if scopesString == "" {
|
||||
scopesString = defaultScope
|
||||
}
|
||||
for _, scope := range strings.Split(scopesString, ",") {
|
||||
scope = strings.TrimSpace(scope)
|
||||
scopes = append(scopes, scopePrefix+scope)
|
||||
}
|
||||
return scopes
|
||||
}
|
||||
|
||||
// Returns true if one of the scopes was "drive.appfolder"
|
||||
func driveScopesContainsAppFolder(scopes []string) bool {
|
||||
for _, scope := range scopes {
|
||||
if scope == scopePrefix+"drive.appfolder" {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -136,18 +162,14 @@ func init() {
|
||||
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Fill in the scopes
|
||||
if opt.Scope == "" {
|
||||
opt.Scope = defaultScope
|
||||
}
|
||||
driveConfig.Scopes = nil
|
||||
for _, scope := range strings.Split(opt.Scope, ",") {
|
||||
driveConfig.Scopes = append(driveConfig.Scopes, scopePrefix+strings.TrimSpace(scope))
|
||||
// Set the root_folder_id if using drive.appfolder
|
||||
if scope == "drive.appfolder" {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
driveConfig.Scopes = driveScopes(opt.Scope)
|
||||
// Set the root_folder_id if using drive.appfolder
|
||||
if driveScopesContainsAppFolder(driveConfig.Scopes) {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
|
||||
if opt.ServiceAccountFile == "" {
|
||||
err = oauthutil.Config("drive", name, m, driveConfig)
|
||||
if err != nil {
|
||||
@@ -696,12 +718,16 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
|
||||
|
||||
// Figure out if the user wants to use a team drive
|
||||
func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
|
||||
// Stop if we are running non-interactive config
|
||||
if fs.Config.AutoConfirm {
|
||||
return nil
|
||||
}
|
||||
if opt.TeamDriveID == "" {
|
||||
fmt.Printf("Configure this as a team drive?\n")
|
||||
} else {
|
||||
fmt.Printf("Change current team drive ID %q?\n", opt.TeamDriveID)
|
||||
}
|
||||
if !config.ConfirmWithDefault(false) {
|
||||
if !config.Confirm() {
|
||||
return nil
|
||||
}
|
||||
client, err := createOAuthClient(opt, name, m)
|
||||
@@ -753,7 +779,8 @@ func newPacer() *pacer.Pacer {
|
||||
}
|
||||
|
||||
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, driveConfig.Scopes...)
|
||||
scopes := driveScopes(opt.Scope)
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error processing credentials")
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
@@ -20,6 +22,31 @@ import (
|
||||
"google.golang.org/api/drive/v3"
|
||||
)
|
||||
|
||||
func TestDriveScopes(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want []string
|
||||
wantFlag bool
|
||||
}{
|
||||
{"", []string{
|
||||
"https://www.googleapis.com/auth/drive",
|
||||
}, false},
|
||||
{" drive.file , drive.readonly", []string{
|
||||
"https://www.googleapis.com/auth/drive.file",
|
||||
"https://www.googleapis.com/auth/drive.readonly",
|
||||
}, false},
|
||||
{" drive.file , drive.appfolder", []string{
|
||||
"https://www.googleapis.com/auth/drive.file",
|
||||
"https://www.googleapis.com/auth/drive.appfolder",
|
||||
}, true},
|
||||
} {
|
||||
got := driveScopes(test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
gotFlag := driveScopesContainsAppFolder(got)
|
||||
assert.Equal(t, test.wantFlag, gotFlag, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
var additionalMimeTypes = map[string]string{
|
||||
"application/vnd.ms-excel.sheet.macroenabled.12": ".xlsm",
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Test Drive filesystem interface
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
|
||||
6
backend/drive/drive_unsupported.go
Normal file
6
backend/drive/drive_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package drive
|
||||
@@ -8,6 +8,8 @@
|
||||
//
|
||||
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
|
||||
@@ -203,7 +204,16 @@ func shouldRetry(err error) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
baseErrString := errors.Cause(err).Error()
|
||||
// FIXME there is probably a better way of doing this!
|
||||
// handle any official Retry-After header from Dropbox's SDK first
|
||||
switch e := err.(type) {
|
||||
case auth.RateLimitAPIError:
|
||||
if e.RateLimitError.RetryAfter > 0 {
|
||||
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
time.Sleep(time.Duration(e.RateLimitError.RetryAfter) * time.Second)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
// Keep old behaviour for backward compatibility
|
||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
|
||||
return true, err
|
||||
}
|
||||
|
||||
@@ -646,7 +646,21 @@ func (f *ftpReadCloser) Read(p []byte) (n int, err error) {
|
||||
|
||||
// Close the FTP reader and return the connection to the pool
|
||||
func (f *ftpReadCloser) Close() error {
|
||||
err := f.rc.Close()
|
||||
var err error
|
||||
errchan := make(chan error, 1)
|
||||
go func() {
|
||||
errchan <- f.rc.Close()
|
||||
}()
|
||||
// Wait for Close for up to 60 seconds
|
||||
timer := time.NewTimer(60 * time.Second)
|
||||
select {
|
||||
case err = <-errchan:
|
||||
timer.Stop()
|
||||
case <-timer.C:
|
||||
// if timer fired assume no error but connection dead
|
||||
fs.Errorf(f.f, "Timeout when waiting for connection Close")
|
||||
return nil
|
||||
}
|
||||
// if errors while reading or closing, dump the connection
|
||||
if err != nil || f.err != nil {
|
||||
_ = f.c.Quit()
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Package googlecloudstorage provides an interface to Google Cloud Storage
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package googlecloudstorage
|
||||
|
||||
/*
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Test GoogleCloudStorage filesystem interface
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package googlecloudstorage_test
|
||||
|
||||
import (
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package googlecloudstorage
|
||||
@@ -193,7 +193,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
}
|
||||
err := o.stat()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Stat failed")
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
@@ -416,6 +416,9 @@ func (o *Object) url() string {
|
||||
func (o *Object) stat() error {
|
||||
url := o.url()
|
||||
res, err := o.fs.httpClient.Head(url)
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to stat")
|
||||
|
||||
@@ -144,6 +144,11 @@ func TestNewObject(t *testing.T) {
|
||||
|
||||
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
|
||||
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
|
||||
|
||||
// check object not found
|
||||
o, err = f.NewObject("not found.txt")
|
||||
assert.Nil(t, o)
|
||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -21,6 +22,7 @@ import (
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/file"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -651,7 +653,7 @@ func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
|
||||
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
|
||||
in, err := os.Open(o.path)
|
||||
in, err := file.Open(o.path)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "hash: failed to open")
|
||||
}
|
||||
@@ -741,6 +743,7 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
n, err = file.in.Read(p)
|
||||
log.Printf("*** Read result n=%d, err=%v, len(p)=%d", n, err, len(p))
|
||||
if n > 0 {
|
||||
// Hash routines never return an error
|
||||
_, _ = file.hash.Write(p[:n])
|
||||
@@ -780,7 +783,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
fd, err := os.Open(o.path)
|
||||
fd, err := file.Open(o.path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -826,7 +829,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := os.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
out, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/lib/file"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -38,7 +38,7 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
filePath := "sub dir/local test"
|
||||
r.WriteFile(filePath, "content", time.Now())
|
||||
|
||||
fd, err := os.Open(path.Join(r.LocalName, filePath))
|
||||
fd, err := file.Open(path.Join(r.LocalName, filePath))
|
||||
if err != nil {
|
||||
t.Fatalf("failed opening file %q: %v", filePath, err)
|
||||
}
|
||||
|
||||
@@ -285,6 +285,7 @@ type AsyncOperationStatus struct {
|
||||
|
||||
// GetID returns a normalized ID of the item
|
||||
// If DriveID is known it will be prefixed to the ID with # seperator
|
||||
// Can be parsed using onedrive.parseNormalizedID(normalizedID)
|
||||
func (i *Item) GetID() string {
|
||||
if i.IsRemote() && i.RemoteItem.ID != "" {
|
||||
return i.RemoteItem.ParentReference.DriveID + "#" + i.RemoteItem.ID
|
||||
|
||||
@@ -75,9 +75,8 @@ func init() {
|
||||
return
|
||||
}
|
||||
|
||||
// Are we running headless?
|
||||
if automatic, _ := m.Get(config.ConfigAutomatic); automatic != "" {
|
||||
// Yes, okay we are done
|
||||
// Stop if we are running non-interactive config
|
||||
if fs.Config.AutoConfirm {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -199,7 +198,7 @@ func init() {
|
||||
|
||||
fmt.Printf("Found drive '%s' of type '%s', URL: %s\nIs that okay?\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)
|
||||
// This does not work, YET :)
|
||||
if !config.Confirm() {
|
||||
if !config.ConfirmWithConfig(m, "config_drive_ok", true) {
|
||||
log.Fatalf("Cancelled by user")
|
||||
}
|
||||
|
||||
@@ -334,20 +333,10 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
|
||||
var opts rest.Opts
|
||||
if len(path) == 0 {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root",
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
|
||||
}
|
||||
}
|
||||
// readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
|
||||
// if `relPath` == "", it reads the metadata for the item with that ID.
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
opts := newOptsCall(normalizedID, "GET", ":/"+rest.URLPathEscape(replaceReservedChars(relPath)))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
@@ -356,6 +345,72 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Respon
|
||||
return info, resp, err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path (relative to the absolute root)
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
|
||||
firstSlashIndex := strings.IndexRune(path, '/')
|
||||
|
||||
if f.driveType != driveTypePersonal || firstSlashIndex == -1 {
|
||||
var opts rest.Opts
|
||||
if len(path) == 0 {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root",
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
|
||||
}
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info, resp, err
|
||||
}
|
||||
|
||||
// The following branch handles the case when we're using OneDrive Personal and the path is in a folder.
|
||||
// For OneDrive Personal, we need to consider the "shared with me" folders.
|
||||
// An item in such a folder can only be addressed by its ID relative to the sharer's driveID or
|
||||
// by its path relative to the folder's ID relative to the sharer's driveID.
|
||||
// Note: A "shared with me" folder can only be placed in the sharee's absolute root.
|
||||
// So we read metadata relative to a suitable folder's normalized ID.
|
||||
var dirCacheFoundRoot bool
|
||||
var rootNormalizedID string
|
||||
if f.dirCache != nil {
|
||||
var ok bool
|
||||
if rootNormalizedID, ok = f.dirCache.Get(""); ok {
|
||||
dirCacheFoundRoot = true
|
||||
}
|
||||
}
|
||||
|
||||
relPath, insideRoot := getRelativePathInsideBase(f.root, path)
|
||||
var firstDir, baseNormalizedID string
|
||||
if !insideRoot || !dirCacheFoundRoot {
|
||||
// We do not have the normalized ID in dirCache for our query to base on. Query it manually.
|
||||
firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
|
||||
info, resp, err := f.readMetaDataForPath(firstDir)
|
||||
if err != nil {
|
||||
return info, resp, err
|
||||
}
|
||||
baseNormalizedID = info.GetID()
|
||||
} else {
|
||||
if f.root != "" {
|
||||
// Read metadata based on root
|
||||
baseNormalizedID = rootNormalizedID
|
||||
} else {
|
||||
// Read metadata based on firstDir
|
||||
firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
|
||||
baseNormalizedID, err = f.dirCache.FindDir(firstDir, false)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return f.readMetaDataForPathRelativeToID(baseNormalizedID, relPath)
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
func errorHandler(resp *http.Response) error {
|
||||
// Decode error response
|
||||
@@ -514,18 +569,11 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
parent, ok := f.dirCache.GetInv(pathID)
|
||||
_, ok := f.dirCache.GetInv(pathID)
|
||||
if !ok {
|
||||
return "", false, errors.New("couldn't find parent ID")
|
||||
}
|
||||
path := leaf
|
||||
if parent != "" {
|
||||
path = parent + "/" + path
|
||||
}
|
||||
if f.dirCache.FoundRoot() {
|
||||
path = f.rootSlash() + path
|
||||
}
|
||||
info, resp, err := f.readMetaDataForPath(path)
|
||||
info, resp, err := f.readMetaDataForPathRelativeToID(pathID, leaf)
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
return "", false, nil
|
||||
@@ -867,13 +915,13 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
opts.ExtraHeaders = map[string]string{"Prefer": "respond-async"}
|
||||
opts.NoResponse = true
|
||||
|
||||
id, _, _ := parseDirID(directoryID)
|
||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
||||
|
||||
replacedLeaf := replaceReservedChars(leaf)
|
||||
copyReq := api.CopyItemRequest{
|
||||
Name: &replacedLeaf,
|
||||
ParentReference: api.ItemReference{
|
||||
DriveID: f.driveID,
|
||||
DriveID: dstDriveID,
|
||||
ID: id,
|
||||
},
|
||||
}
|
||||
@@ -940,15 +988,23 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
||||
_, srcObjDriveID, _ := parseNormalizedID(srcObj.id)
|
||||
|
||||
if dstDriveID != srcObjDriveID {
|
||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
||||
// "Items cannot be moved between Drives using this request."
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Move the object
|
||||
opts := newOptsCall(srcObj.id, "PATCH", "")
|
||||
|
||||
id, _, _ := parseDirID(directoryID)
|
||||
|
||||
move := api.MoveItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
ID: id,
|
||||
DriveID: dstDriveID,
|
||||
ID: id,
|
||||
},
|
||||
// We set the mod time too as it gets reset otherwise
|
||||
FileSystemInfo: &api.FileSystemInfoFacet{
|
||||
@@ -1024,7 +1080,20 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
parsedDstDirID, _, _ := parseDirID(dstDirectoryID)
|
||||
parsedDstDirID, dstDriveID, _ := parseNormalizedID(dstDirectoryID)
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, srcDriveID, _ := parseNormalizedID(srcID)
|
||||
|
||||
if dstDriveID != srcDriveID {
|
||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
||||
// "Items cannot be moved between Drives using this request."
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
@@ -1038,14 +1107,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get timestamps of src so they can be preserved
|
||||
srcInfo, _, err := srcFs.readMetaDataForPath(srcPath)
|
||||
srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(srcID, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1055,7 +1118,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
move := api.MoveItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
ID: parsedDstDirID,
|
||||
DriveID: dstDriveID,
|
||||
ID: parsedDstDirID,
|
||||
},
|
||||
// We set the mod time too as it gets reset otherwise
|
||||
FileSystemInfo: &api.FileSystemInfoFacet{
|
||||
@@ -1122,7 +1186,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
opts := newOptsCall(info.ID, "POST", "/createLink")
|
||||
opts := newOptsCall(info.GetID(), "POST", "/createLink")
|
||||
|
||||
share := api.CreateShareLinkRequest{
|
||||
Type: "view",
|
||||
@@ -1270,13 +1334,13 @@ func (o *Object) ModTime() time.Time {
|
||||
// setModTime sets the modification time of the local fs object
|
||||
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
||||
var opts rest.Opts
|
||||
_, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
_, drive, rootURL := parseDirID(directoryID)
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/root:/" + rest.URLPathEscape(o.srvPath()),
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf),
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
@@ -1344,7 +1408,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession(modTime time.Time) (response *api.CreateUploadResponse, err error) {
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
id, drive, rootURL := parseDirID(directoryID)
|
||||
id, drive, rootURL := parseNormalizedID(directoryID)
|
||||
var opts rest.Opts
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
@@ -1477,13 +1541,13 @@ func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (
|
||||
fs.Debugf(o, "Starting singlepart upload")
|
||||
var resp *http.Response
|
||||
var opts rest.Opts
|
||||
_, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
_, drive, rootURL := parseDirID(directoryID)
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
}
|
||||
@@ -1566,8 +1630,8 @@ func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
func newOptsCall(id string, method string, route string) (opts rest.Opts) {
|
||||
id, drive, rootURL := parseDirID(id)
|
||||
func newOptsCall(normalizedID string, method string, route string) (opts rest.Opts) {
|
||||
id, drive, rootURL := parseNormalizedID(normalizedID)
|
||||
|
||||
if drive != "" {
|
||||
return rest.Opts{
|
||||
@@ -1582,7 +1646,10 @@ func newOptsCall(id string, method string, route string) (opts rest.Opts) {
|
||||
}
|
||||
}
|
||||
|
||||
func parseDirID(ID string) (string, string, string) {
|
||||
// parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`)
|
||||
// and returns itemID, driveID, rootURL.
|
||||
// Such a normalized ID can come from (*Item).GetID()
|
||||
func parseNormalizedID(ID string) (string, string, string) {
|
||||
if strings.Index(ID, "#") >= 0 {
|
||||
s := strings.Split(ID, "#")
|
||||
return s[1], s[0], graphURL + "/drives"
|
||||
@@ -1590,6 +1657,21 @@ func parseDirID(ID string) (string, string, string) {
|
||||
return ID, "", ""
|
||||
}
|
||||
|
||||
// getRelativePathInsideBase checks if `target` is inside `base`. If so, it
|
||||
// returns a relative path for `target` based on `base` and a boolean `true`.
|
||||
// Otherwise returns "", false.
|
||||
func getRelativePathInsideBase(base, target string) (string, bool) {
|
||||
if base == "" {
|
||||
return target, true
|
||||
}
|
||||
|
||||
baseSlash := base + "/"
|
||||
if strings.HasPrefix(target+"/", baseSlash) {
|
||||
return target[len(baseSlash):], true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
|
||||
@@ -72,14 +72,51 @@ func init() {
|
||||
Help: "Number of connection retries.",
|
||||
Default: 3,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
|
||||
Note that "--qingstor-upload-concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
|
||||
If you are transferring large files over high speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
If you are uploading small numbers of large file over high speed link
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.`,
|
||||
Default: 4,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Constants
|
||||
const (
|
||||
listLimitSize = 1000 // Number of items to read at once
|
||||
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
|
||||
listLimitSize = 1000 // Number of items to read at once
|
||||
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
|
||||
minChunkSize = fs.SizeSuffix(minMultiPartSize)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -92,12 +129,15 @@ func timestampToTime(tp int64) time.Time {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Zone string `config:"zone"`
|
||||
ConnectionRetries int `config:"connection_retries"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Zone string `config:"zone"`
|
||||
ConnectionRetries int `config:"connection_retries"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
}
|
||||
|
||||
// Fs represents a remote qingstor server
|
||||
@@ -227,6 +267,36 @@ func qsServiceConnection(opt *Options) (*qs.Service, error) {
|
||||
return qs.Init(cf)
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxUploadCutoff {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -235,6 +305,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "qingstor: chunk size")
|
||||
}
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "qingstor: upload cutoff")
|
||||
}
|
||||
bucket, key, err := qsParsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -913,16 +991,24 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
mimeType := fs.MimeType(src)
|
||||
|
||||
req := uploadInput{
|
||||
body: in,
|
||||
qsSvc: o.fs.svc,
|
||||
bucket: o.fs.bucket,
|
||||
zone: o.fs.zone,
|
||||
key: key,
|
||||
mimeType: mimeType,
|
||||
body: in,
|
||||
qsSvc: o.fs.svc,
|
||||
bucket: o.fs.bucket,
|
||||
zone: o.fs.zone,
|
||||
key: key,
|
||||
mimeType: mimeType,
|
||||
partSize: int64(o.fs.opt.ChunkSize),
|
||||
concurrency: o.fs.opt.UploadConcurrency,
|
||||
}
|
||||
uploader := newUploader(&req)
|
||||
|
||||
err = uploader.upload()
|
||||
size := src.Size()
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
if multipart {
|
||||
err = uploader.upload()
|
||||
} else {
|
||||
err = uploader.singlePartUpload(in, size)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
|
||||
// +build !plan9
|
||||
|
||||
package qingstor_test
|
||||
package qingstor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/qingstor"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -15,6 +15,19 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestQingStor:",
|
||||
NilObject: (*qingstor.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -152,11 +152,11 @@ func (u *uploader) init() {
|
||||
}
|
||||
|
||||
// singlePartUpload upload a single object that contentLength less than "defaultUploadPartSize"
|
||||
func (u *uploader) singlePartUpload(buf io.ReadSeeker) error {
|
||||
func (u *uploader) singlePartUpload(buf io.Reader, size int64) error {
|
||||
bucketInit, _ := u.bucketInit()
|
||||
|
||||
req := qs.PutObjectInput{
|
||||
ContentLength: &u.readerPos,
|
||||
ContentLength: &size,
|
||||
ContentType: &u.cfg.mimeType,
|
||||
Body: buf,
|
||||
}
|
||||
@@ -179,13 +179,13 @@ func (u *uploader) upload() error {
|
||||
// Do one read to determine if we have more than one part
|
||||
reader, _, err := u.nextReader()
|
||||
if err == io.EOF { // single part
|
||||
fs.Debugf(u, "Tried to upload a singile object to QingStor")
|
||||
return u.singlePartUpload(reader)
|
||||
fs.Debugf(u, "Uploading as single part object to QingStor")
|
||||
return u.singlePartUpload(reader, u.readerPos)
|
||||
} else if err != nil {
|
||||
return errors.Errorf("read upload data failed: %s", err)
|
||||
}
|
||||
|
||||
fs.Debugf(u, "Treied to upload a multi-part object to QingStor")
|
||||
fs.Debugf(u, "Uploading as multi-part object to QingStor")
|
||||
mu := multiUploader{uploader: u}
|
||||
return mu.multiPartUpload(reader)
|
||||
}
|
||||
@@ -261,7 +261,7 @@ func (mu *multiUploader) initiate() error {
|
||||
req := qs.InitiateMultipartUploadInput{
|
||||
ContentType: &mu.cfg.mimeType,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to initiate a multi-part upload")
|
||||
fs.Debugf(mu, "Initiating a multi-part upload")
|
||||
rsp, err := bucketInit.InitiateMultipartUpload(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
mu.uploadID = rsp.UploadID
|
||||
@@ -279,12 +279,12 @@ func (mu *multiUploader) send(c chunk) error {
|
||||
ContentLength: &c.size,
|
||||
Body: c.buffer,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to upload a part to QingStor that partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
fs.Debugf(mu, "Uploading a part to QingStor with partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
_, err := bucketInit.UploadMultipart(mu.cfg.key, &req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Debugf(mu, "Upload part finished that partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
fs.Debugf(mu, "Done uploading part partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
|
||||
mu.mtx.Lock()
|
||||
defer mu.mtx.Unlock()
|
||||
@@ -304,7 +304,7 @@ func (mu *multiUploader) list() error {
|
||||
req := qs.ListMultipartInput{
|
||||
UploadID: mu.uploadID,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to list a multi-part")
|
||||
fs.Debugf(mu, "Reading multi-part details")
|
||||
rsp, err := bucketInit.ListMultipart(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
mu.objectParts = rsp.ObjectParts
|
||||
@@ -331,7 +331,7 @@ func (mu *multiUploader) complete() error {
|
||||
ObjectParts: mu.objectParts,
|
||||
ETag: &md5String,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to complete a multi-part")
|
||||
fs.Debugf(mu, "Completing multi-part object")
|
||||
_, err = bucketInit.CompleteMultipartUpload(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
fs.Debugf(mu, "Complete multi-part finished")
|
||||
@@ -348,7 +348,7 @@ func (mu *multiUploader) abort() error {
|
||||
req := qs.AbortMultipartUploadInput{
|
||||
UploadID: uploadID,
|
||||
}
|
||||
fs.Debugf(mu, "Tried to abort a multi-part")
|
||||
fs.Debugf(mu, "Aborting multi-part object %q", *uploadID)
|
||||
_, err = bucketInit.AbortMultipartUpload(mu.cfg.key, &req)
|
||||
}
|
||||
|
||||
@@ -392,6 +392,14 @@ func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) error {
|
||||
var nextChunkLen int
|
||||
reader, nextChunkLen, err = mu.nextReader()
|
||||
if err != nil && err != io.EOF {
|
||||
// empty ch
|
||||
go func() {
|
||||
for range ch {
|
||||
}
|
||||
}()
|
||||
// Wait for all goroutines finish
|
||||
close(ch)
|
||||
mu.wg.Wait()
|
||||
return err
|
||||
}
|
||||
if nextChunkLen == 0 && partNumber > 0 {
|
||||
|
||||
363
backend/s3/s3.go
363
backend/s3/s3.go
@@ -53,7 +53,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers (AWS, Ceph, Dreamhost, IBM COS, Minio)",
|
||||
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: fs.ConfigProvider,
|
||||
@@ -61,6 +61,9 @@ func init() {
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "AWS",
|
||||
Help: "Amazon Web Services (AWS) S3",
|
||||
}, {
|
||||
Value: "Alibaba",
|
||||
Help: "Alibaba Cloud Object Storage System (OSS) formerly Aliyun",
|
||||
}, {
|
||||
Value: "Ceph",
|
||||
Help: "Ceph Object Storage",
|
||||
@@ -76,6 +79,9 @@ func init() {
|
||||
}, {
|
||||
Value: "Minio",
|
||||
Help: "Minio Object Storage",
|
||||
}, {
|
||||
Value: "Netease",
|
||||
Help: "Netease Object Storage (NOS)",
|
||||
}, {
|
||||
Value: "Wasabi",
|
||||
Help: "Wasabi Object Storage",
|
||||
@@ -150,7 +156,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS",
|
||||
Provider: "!AWS,Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
|
||||
@@ -269,10 +275,73 @@ func init() {
|
||||
Value: "s3.tor01.objectstorage.service.networklayer.com",
|
||||
Help: "Toronto Single Site Private Endpoint",
|
||||
}},
|
||||
}, {
|
||||
// oss endpoints: https://help.aliyun.com/document_detail/31837.html
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for OSS API.",
|
||||
Provider: "Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "oss-cn-hangzhou.aliyuncs.com",
|
||||
Help: "East China 1 (Hangzhou)",
|
||||
}, {
|
||||
Value: "oss-cn-shanghai.aliyuncs.com",
|
||||
Help: "East China 2 (Shanghai)",
|
||||
}, {
|
||||
Value: "oss-cn-qingdao.aliyuncs.com",
|
||||
Help: "North China 1 (Qingdao)",
|
||||
}, {
|
||||
Value: "oss-cn-beijing.aliyuncs.com",
|
||||
Help: "North China 2 (Beijing)",
|
||||
}, {
|
||||
Value: "oss-cn-zhangjiakou.aliyuncs.com",
|
||||
Help: "North China 3 (Zhangjiakou)",
|
||||
}, {
|
||||
Value: "oss-cn-huhehaote.aliyuncs.com",
|
||||
Help: "North China 5 (Huhehaote)",
|
||||
}, {
|
||||
Value: "oss-cn-shenzhen.aliyuncs.com",
|
||||
Help: "South China 1 (Shenzhen)",
|
||||
}, {
|
||||
Value: "oss-cn-hongkong.aliyuncs.com",
|
||||
Help: "Hong Kong (Hong Kong)",
|
||||
}, {
|
||||
Value: "oss-us-west-1.aliyuncs.com",
|
||||
Help: "US West 1 (Silicon Valley)",
|
||||
}, {
|
||||
Value: "oss-us-east-1.aliyuncs.com",
|
||||
Help: "US East 1 (Virginia)",
|
||||
}, {
|
||||
Value: "oss-ap-southeast-1.aliyuncs.com",
|
||||
Help: "Southeast Asia Southeast 1 (Singapore)",
|
||||
}, {
|
||||
Value: "oss-ap-southeast-2.aliyuncs.com",
|
||||
Help: "Asia Pacific Southeast 2 (Sydney)",
|
||||
}, {
|
||||
Value: "oss-ap-southeast-3.aliyuncs.com",
|
||||
Help: "Southeast Asia Southeast 3 (Kuala Lumpur)",
|
||||
}, {
|
||||
Value: "oss-ap-southeast-5.aliyuncs.com",
|
||||
Help: "Asia Pacific Southeast 5 (Jakarta)",
|
||||
}, {
|
||||
Value: "oss-ap-northeast-1.aliyuncs.com",
|
||||
Help: "Asia Pacific Northeast 1 (Japan)",
|
||||
}, {
|
||||
Value: "oss-ap-south-1.aliyuncs.com",
|
||||
Help: "Asia Pacific South 1 (Mumbai)",
|
||||
}, {
|
||||
Value: "oss-eu-central-1.aliyuncs.com",
|
||||
Help: "Central Europe 1 (Frankfurt)",
|
||||
}, {
|
||||
Value: "oss-eu-west-1.aliyuncs.com",
|
||||
Help: "West Europe (London)",
|
||||
}, {
|
||||
Value: "oss-me-east-1.aliyuncs.com",
|
||||
Help: "Middle East 1 (Dubai)",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS",
|
||||
Provider: "!AWS,IBMCOS,Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-west-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -449,11 +518,13 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,IBMCOS",
|
||||
Provider: "!AWS,IBMCOS,Alibaba",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
|
||||
This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
|
||||
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
|
||||
Note that this ACL is applied when server side copying objects as S3
|
||||
@@ -499,6 +570,28 @@ doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS",
|
||||
Provider: "IBMCOS",
|
||||
}},
|
||||
}, {
|
||||
Name: "bucket_acl",
|
||||
Help: `Canned ACL used when creating buckets.
|
||||
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
|
||||
Note that this ACL is applied when only when creating buckets. If it
|
||||
isn't set then "acl" is used instead.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "private",
|
||||
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
|
||||
}, {
|
||||
Value: "public-read",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
|
||||
}, {
|
||||
Value: "public-read-write",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
|
||||
}, {
|
||||
Value: "authenticated-read",
|
||||
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
|
||||
}},
|
||||
}, {
|
||||
Name: "server_side_encryption",
|
||||
Help: "The server-side encryption algorithm used when storing this object in S3.",
|
||||
@@ -543,13 +636,42 @@ doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
}, {
|
||||
Value: "ONEZONE_IA",
|
||||
Help: "One Zone Infrequent Access storage class",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Glacier storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in OSS.",
|
||||
Provider: "Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Default",
|
||||
}, {
|
||||
Value: "STANDARD",
|
||||
Help: "Standard storage class",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Archive storage mode.",
|
||||
}, {
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Infrequent access storage mode.",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
Any files larger than this will be uploaded in chunks of this
|
||||
size. The default is 5MB. The minimum is 5MB.
|
||||
When uploading files larger than upload_cutoff they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
|
||||
Note that "--s3-upload-concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
@@ -577,7 +699,7 @@ concurrently.
|
||||
If you are uploading small numbers of large file over high speed link
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.`,
|
||||
Default: 2,
|
||||
Default: 4,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "force_path_style",
|
||||
@@ -607,14 +729,16 @@ Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
|
||||
|
||||
// Constants
|
||||
const (
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
listChunkSize = 1000 // number of items to read at once
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
|
||||
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
|
||||
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
listChunkSize = 1000 // number of items to read at once
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
|
||||
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
|
||||
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
)
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
@@ -627,9 +751,11 @@ type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
LocationConstraint string `config:"location_constraint"`
|
||||
ACL string `config:"acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
ServerSideEncryption string `config:"server_side_encryption"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SessionToken string `config:"session_token"`
|
||||
@@ -651,6 +777,7 @@ type Fs struct {
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketDeleted bool // true if we have deleted the bucket
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
srv *http.Client // a plain http client
|
||||
}
|
||||
|
||||
// Object describes a s3 object
|
||||
@@ -699,7 +826,7 @@ func (f *Fs) Features() *fs.Features {
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
// See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
|
||||
var retryErrorCodes = []int{
|
||||
409, // Conflict - various states that could be resolved on a retry
|
||||
// 409, // Conflict - various states that could be resolved on a retry
|
||||
503, // Service Unavailable/Slow Down - "Reduce your request rate"
|
||||
}
|
||||
|
||||
@@ -707,14 +834,13 @@ var retryErrorCodes = []int{
|
||||
// as it should notice closed connections and timeouts which are the most likely
|
||||
// sort of failure modes
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
|
||||
// If this is an awserr object, try and extract more useful information to determine if we should retry
|
||||
if awsError, ok := err.(awserr.Error); ok {
|
||||
// Simple case, check the original embedded error in case it's generically retriable
|
||||
if fserrors.ShouldRetry(awsError.OrigErr()) {
|
||||
return true, err
|
||||
}
|
||||
//Failing that, if it's a RequestFailure it's probably got an http status code we can check
|
||||
// Failing that, if it's a RequestFailure it's probably got an http status code we can check
|
||||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
for _, e := range retryErrorCodes {
|
||||
if reqErr.StatusCode() == e {
|
||||
@@ -723,7 +849,7 @@ func shouldRetry(err error) (bool, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
//Ok, not an awserr, check for generic failure conditions
|
||||
// Ok, not an awserr, check for generic failure conditions
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
@@ -800,6 +926,9 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
if opt.Region == "" {
|
||||
opt.Region = "us-east-1"
|
||||
}
|
||||
if opt.Provider == "Alibaba" || opt.Provider == "Netease" {
|
||||
opt.ForcePathStyle = false
|
||||
}
|
||||
awsConfig := aws.NewConfig().
|
||||
WithRegion(opt.Region).
|
||||
WithMaxRetries(maxRetries).
|
||||
@@ -854,6 +983,21 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxUploadCutoff {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -866,10 +1010,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "s3: chunk size")
|
||||
}
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "s3: upload cutoff")
|
||||
}
|
||||
bucket, directory, err := s3ParsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ACL == "" {
|
||||
opt.ACL = "private"
|
||||
}
|
||||
if opt.BucketACL == "" {
|
||||
opt.BucketACL = opt.ACL
|
||||
}
|
||||
c, ses, err := s3Connection(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -882,6 +1036,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
bucket: bucket,
|
||||
ses: ses,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
|
||||
srv: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -1227,7 +1382,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
}
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &f.bucket,
|
||||
ACL: &f.opt.ACL,
|
||||
ACL: &f.opt.BucketACL,
|
||||
}
|
||||
if f.opt.LocationConstraint != "" {
|
||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||
@@ -1246,6 +1401,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
if err == nil {
|
||||
f.bucketOK = true
|
||||
f.bucketDeleted = false
|
||||
fs.Infof(f, "Bucket created with ACL %q", *req.ACL)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -1269,6 +1425,7 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
if err == nil {
|
||||
f.bucketOK = false
|
||||
f.bucketDeleted = true
|
||||
fs.Infof(f, "Bucket deleted")
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -1556,38 +1713,46 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
modTime := src.ModTime()
|
||||
size := src.Size()
|
||||
|
||||
uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = o.fs.opt.UploadConcurrency
|
||||
u.LeavePartsOnError = false
|
||||
u.S3 = o.fs.c
|
||||
u.PartSize = int64(o.fs.opt.ChunkSize)
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
var uploader *s3manager.Uploader
|
||||
if multipart {
|
||||
uploader = s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = o.fs.opt.UploadConcurrency
|
||||
u.LeavePartsOnError = false
|
||||
u.S3 = o.fs.c
|
||||
u.PartSize = int64(o.fs.opt.ChunkSize)
|
||||
|
||||
if size == -1 {
|
||||
// Make parts as small as possible while still being able to upload to the
|
||||
// S3 file size limit. Rounded up to nearest MB.
|
||||
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
return
|
||||
}
|
||||
// Adjust PartSize until the number of parts is small enough.
|
||||
if size/u.PartSize >= s3manager.MaxUploadParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
}
|
||||
})
|
||||
if size == -1 {
|
||||
// Make parts as small as possible while still being able to upload to the
|
||||
// S3 file size limit. Rounded up to nearest MB.
|
||||
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
return
|
||||
}
|
||||
// Adjust PartSize until the number of parts is small enough.
|
||||
if size/u.PartSize >= s3manager.MaxUploadParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Set the mtime in the meta data
|
||||
metadata := map[string]*string{
|
||||
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
|
||||
}
|
||||
|
||||
if !o.fs.opt.DisableChecksum && size > uploader.PartSize {
|
||||
// read the md5sum if available for non multpart and if
|
||||
// disable checksum isn't present.
|
||||
var md5sum string
|
||||
if !multipart || !o.fs.opt.DisableChecksum {
|
||||
hash, err := src.Hash(hash.MD5)
|
||||
|
||||
if err == nil && matchMd5.MatchString(hash) {
|
||||
hashBytes, err := hex.DecodeString(hash)
|
||||
|
||||
if err == nil {
|
||||
metadata[metaMD5Hash] = aws.String(base64.StdEncoding.EncodeToString(hashBytes))
|
||||
md5sum = base64.StdEncoding.EncodeToString(hashBytes)
|
||||
if multipart {
|
||||
metadata[metaMD5Hash] = &md5sum
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1596,30 +1761,98 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
mimeType := fs.MimeType(src)
|
||||
|
||||
key := o.fs.root + o.remote
|
||||
req := s3manager.UploadInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
Body: in,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
//ContentLength: &size,
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = uploader.Upload(&req)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
if multipart {
|
||||
req := s3manager.UploadInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
Body: in,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
//ContentLength: &size,
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = uploader.Upload(&req)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
req := s3.PutObjectInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
}
|
||||
if md5sum != "" {
|
||||
req.ContentMD5 = &md5sum
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
|
||||
// Create the request
|
||||
putObj, _ := o.fs.c.PutObjectRequest(&req)
|
||||
|
||||
// Sign it so we can upload using a presigned request.
|
||||
//
|
||||
// Note the SDK doesn't currently support streaming to
|
||||
// PutObject so we'll use this work-around.
|
||||
url, headers, err := putObj.PresignRequest(15 * time.Minute)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "s3 upload: sign request")
|
||||
}
|
||||
|
||||
// Set request to nil if empty so as not to make chunked encoding
|
||||
if size == 0 {
|
||||
in = nil
|
||||
}
|
||||
|
||||
// create the vanilla http request
|
||||
httpReq, err := http.NewRequest("PUT", url, in)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "s3 upload: new request")
|
||||
}
|
||||
|
||||
// set the headers we signed and the length
|
||||
httpReq.Header = headers
|
||||
httpReq.ContentLength = size
|
||||
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Do(httpReq)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
}
|
||||
body, err := rest.ReadBody(resp)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
}
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 299 {
|
||||
return false, nil
|
||||
}
|
||||
err = errors.Errorf("s3 upload: %s: %s", resp.Status, body)
|
||||
return fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
|
||||
@@ -23,4 +23,8 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pkg/sftp"
|
||||
"github.com/xanzy/ssh-agent"
|
||||
sshagent "github.com/xanzy/ssh-agent"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
@@ -66,7 +66,22 @@ func init() {
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "key_file",
|
||||
Help: "Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.",
|
||||
Help: "Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.",
|
||||
}, {
|
||||
Name: "key_file_pass",
|
||||
Help: `The passphrase to decrypt the PEM-encoded private key file.
|
||||
|
||||
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
||||
in the new OpenSSH format can't be used.`,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "key_use_agent",
|
||||
Help: `When set forces the usage of the ssh-agent.
|
||||
|
||||
When key-file is also set, the ".pub" file of the specified key-file is read and only the associated key is
|
||||
requested from the ssh-agent. This allows to avoid ` + "`Too many authentication failures for *username*`" + ` errors
|
||||
when the ssh-agent contains many keys.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "use_insecure_cipher",
|
||||
Help: "Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.",
|
||||
@@ -122,6 +137,8 @@ type Options struct {
|
||||
Port string `config:"port"`
|
||||
Pass string `config:"pass"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
@@ -298,6 +315,18 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
||||
f.poolMu.Unlock()
|
||||
}
|
||||
|
||||
// shellExpand replaces a leading "~" with "${HOME}" and expands all environment
|
||||
// variables afterwards.
|
||||
func shellExpand(s string) string {
|
||||
if s != "" {
|
||||
if s[0] == '~' {
|
||||
s = "${HOME}" + s[1:]
|
||||
}
|
||||
s = os.ExpandEnv(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
@@ -325,8 +354,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc")
|
||||
}
|
||||
|
||||
keyFile := shellExpand(opt.KeyFile)
|
||||
// Add ssh agent-auth if no password or file specified
|
||||
if opt.Pass == "" && opt.KeyFile == "" {
|
||||
if (opt.Pass == "" && keyFile == "") || opt.KeyUseAgent {
|
||||
sshAgentClient, _, err := sshagent.New()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't connect to ssh-agent")
|
||||
@@ -335,16 +365,46 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't read ssh agent signers")
|
||||
}
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signers...))
|
||||
if keyFile != "" {
|
||||
pubBytes, err := ioutil.ReadFile(keyFile + ".pub")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read public key file")
|
||||
}
|
||||
pub, _, _, _, err := ssh.ParseAuthorizedKey(pubBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse public key file")
|
||||
}
|
||||
pubM := pub.Marshal()
|
||||
found := false
|
||||
for _, s := range signers {
|
||||
if bytes.Equal(pubM, s.PublicKey().Marshal()) {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(s))
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, errors.New("private key not found in the ssh-agent")
|
||||
}
|
||||
} else {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signers...))
|
||||
}
|
||||
}
|
||||
|
||||
// Load key file if specified
|
||||
if opt.KeyFile != "" {
|
||||
key, err := ioutil.ReadFile(opt.KeyFile)
|
||||
if keyFile != "" {
|
||||
key, err := ioutil.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read private key file")
|
||||
}
|
||||
signer, err := ssh.ParsePrivateKey(key)
|
||||
clearpass := ""
|
||||
if opt.KeyFilePass != "" {
|
||||
clearpass, err = obscure.Reveal(opt.KeyFilePass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
signer, err := ssh.ParsePrivateKeyWithPassphrase(key, []byte(clearpass))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse private key file")
|
||||
}
|
||||
@@ -594,12 +654,22 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
// Check to see if directory is empty as some servers will
|
||||
// delete recursively with RemoveDirectory
|
||||
entries, err := f.List(dir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
if len(entries) != 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
// Remove the directory
|
||||
root := path.Join(f.root, dir)
|
||||
c, err := f.getSftpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
err = c.sftpClient.Remove(root)
|
||||
err = c.sftpClient.RemoveDirectory(root)
|
||||
f.putSftpConnection(&c, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -43,6 +43,20 @@ Above this size files will be chunked into a _segments container. The
|
||||
default for this is 5GB which is its maximum value.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_chunk",
|
||||
Help: `Don't chunk files during streaming upload.
|
||||
|
||||
When doing streaming uploads (eg using rcat or mount) setting this
|
||||
flag will cause the swift backend to not upload chunked files.
|
||||
|
||||
This will limit the maximum upload size to 5GB. However non chunked
|
||||
files are easier to deal with and have an MD5SUM.
|
||||
|
||||
Rclone will still chunk files bigger than chunk_size when doing normal
|
||||
copy operations.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}}
|
||||
|
||||
// Register with Fs
|
||||
@@ -175,6 +189,7 @@ type Options struct {
|
||||
StoragePolicy string `config:"storage_policy"`
|
||||
EndpointType string `config:"endpoint_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
NoChunk bool `config:"no_chunk"`
|
||||
}
|
||||
|
||||
// Fs represents a remote swift server
|
||||
@@ -196,10 +211,13 @@ type Fs struct {
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
info swift.Object // Info from the swift object if known
|
||||
headers swift.Headers // The object headers if known
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64
|
||||
lastModified time.Time
|
||||
contentType string
|
||||
md5 string
|
||||
headers swift.Headers // The object headers if known
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -432,7 +450,10 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er
|
||||
}
|
||||
if info != nil {
|
||||
// Set info but not headers
|
||||
o.info = *info
|
||||
err := o.decodeMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and headers, returning an error
|
||||
if err != nil {
|
||||
@@ -829,7 +850,7 @@ func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
fs.Debugf(o, "Returning empty Md5sum for swift large object")
|
||||
return "", nil
|
||||
}
|
||||
return strings.ToLower(o.info.Hash), nil
|
||||
return strings.ToLower(o.md5), nil
|
||||
}
|
||||
|
||||
// hasHeader checks for the header passed in returning false if the
|
||||
@@ -858,7 +879,22 @@ func (o *Object) isStaticLargeObject() (bool, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.info.Bytes
|
||||
return o.size
|
||||
}
|
||||
|
||||
// decodeMetaData sets the metadata in the object from a swift.Object
|
||||
//
|
||||
// Sets
|
||||
// o.lastModified
|
||||
// o.size
|
||||
// o.md5
|
||||
// o.contentType
|
||||
func (o *Object) decodeMetaData(info *swift.Object) (err error) {
|
||||
o.lastModified = info.LastModified
|
||||
o.size = info.Bytes
|
||||
o.md5 = info.Hash
|
||||
o.contentType = info.ContentType
|
||||
return nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
@@ -882,8 +918,11 @@ func (o *Object) readMetaData() (err error) {
|
||||
}
|
||||
return err
|
||||
}
|
||||
o.info = info
|
||||
o.headers = h
|
||||
err = o.decodeMetaData(&info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -894,17 +933,17 @@ func (o *Object) readMetaData() (err error) {
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
if fs.Config.UseServerModTime {
|
||||
return o.info.LastModified
|
||||
return o.lastModified
|
||||
}
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to read metadata: %s", err)
|
||||
return o.info.LastModified
|
||||
return o.lastModified
|
||||
}
|
||||
modTime, err := o.headers.ObjectMetadata().GetModTime()
|
||||
if err != nil {
|
||||
// fs.Logf(o, "Failed to read mtime from object: %v", err)
|
||||
return o.info.LastModified
|
||||
return o.lastModified
|
||||
}
|
||||
return modTime
|
||||
}
|
||||
@@ -938,7 +977,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
// It compares the Content-Type to directoryMarkerContentType - that
|
||||
// makes it a directory marker which is not storable.
|
||||
func (o *Object) Storable() bool {
|
||||
return o.info.ContentType != directoryMarkerContentType
|
||||
return o.contentType != directoryMarkerContentType
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
@@ -1105,20 +1144,31 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
contentType := fs.MimeType(src)
|
||||
headers := m.ObjectHeaders()
|
||||
uniquePrefix := ""
|
||||
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
|
||||
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
|
||||
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.headers = nil // wipe old metadata
|
||||
} else {
|
||||
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length as we know it
|
||||
if size >= 0 {
|
||||
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length if we know it
|
||||
}
|
||||
var rxHeaders swift.Headers
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
|
||||
rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// set Metadata since ObjectPut checked the hash and length so we know the
|
||||
// object has been safely uploaded
|
||||
o.lastModified = modTime
|
||||
o.size = size
|
||||
o.md5 = rxHeaders["ETag"]
|
||||
o.contentType = contentType
|
||||
o.headers = headers
|
||||
}
|
||||
|
||||
// If file was a dynamic large object then remove old/all segments
|
||||
@@ -1129,8 +1179,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
o.headers = nil // wipe old metadata
|
||||
// Read the metadata from the newly created object if necessary
|
||||
return o.readMetaData()
|
||||
}
|
||||
|
||||
@@ -1160,7 +1209,7 @@ func (o *Object) Remove() error {
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType() string {
|
||||
return o.info.ContentType
|
||||
return o.contentType
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
|
||||
@@ -6,7 +6,10 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -148,6 +151,8 @@ var timeFormats = []string{
|
||||
time.RFC3339, // Wed, 31 Oct 2018 13:57:11 CET (as used by komfortcloud.de)
|
||||
}
|
||||
|
||||
var oneTimeError sync.Once
|
||||
|
||||
// UnmarshalXML turns XML into a Time
|
||||
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
var v string
|
||||
@@ -171,5 +176,14 @@ func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
oneTimeError.Do(func() {
|
||||
fs.Errorf(nil, "Failed to parse time %q - using the epoch", v)
|
||||
})
|
||||
// Return the epoch instead
|
||||
*t = Time(time.Unix(0, 0))
|
||||
// ignore error
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -601,10 +601,9 @@ func (f *Fs) mkParentDir(dirPath string) error {
|
||||
return f.mkdir(parent)
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using native paths
|
||||
func (f *Fs) mkdir(dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
// We assume the root is already ceated
|
||||
// low level mkdir, only makes the directory, doesn't attempt to create parents
|
||||
func (f *Fs) _mkdir(dirPath string) error {
|
||||
// We assume the root is already created
|
||||
if dirPath == "" {
|
||||
return nil
|
||||
}
|
||||
@@ -617,20 +616,26 @@ func (f *Fs) mkdir(dirPath string) error {
|
||||
Path: dirPath,
|
||||
NoResponse: true,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using native paths
|
||||
func (f *Fs) mkdir(dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
err := f._mkdir(dirPath)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// already exists
|
||||
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable {
|
||||
return nil
|
||||
}
|
||||
// parent does not exists
|
||||
// parent does not exist
|
||||
if apiErr.StatusCode == http.StatusConflict {
|
||||
err = f.mkParentDir(dirPath)
|
||||
if err == nil {
|
||||
err = f.mkdir(dirPath)
|
||||
err = f._mkdir(dirPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
5
bin/build-xgo-cgofuse.sh
Executable file
5
bin/build-xgo-cgofuse.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
docker build -t rclone/xgo-cgofuse https://github.com/billziss-gh/cgofuse.git
|
||||
docker images
|
||||
docker push rclone/xgo-cgofuse
|
||||
10
cmd/cmd.go
10
cmd/cmd.go
@@ -51,7 +51,7 @@ var (
|
||||
errorCommandNotFound = errors.New("command not found")
|
||||
errorUncategorized = errors.New("uncategorized error")
|
||||
errorNotEnoughArguments = errors.New("not enough arguments")
|
||||
errorTooManyArguents = errors.New("too many arguments")
|
||||
errorTooManyArguments = errors.New("too many arguments")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -294,14 +294,12 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
func CheckArgs(MinArgs, MaxArgs int, cmd *cobra.Command, args []string) {
|
||||
if len(args) < MinArgs {
|
||||
_ = cmd.Usage()
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments minimum\n", cmd.Name(), MinArgs)
|
||||
// os.Exit(1)
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments minimum: you provided %d non flag arguments: %q\n", cmd.Name(), MinArgs, len(args), args)
|
||||
resolveExitCode(errorNotEnoughArguments)
|
||||
} else if len(args) > MaxArgs {
|
||||
_ = cmd.Usage()
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments maximum\n", cmd.Name(), MaxArgs)
|
||||
// os.Exit(1)
|
||||
resolveExitCode(errorTooManyArguents)
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments maximum: you provided %d non flag arguments: %q\n", cmd.Name(), MaxArgs, len(args), args)
|
||||
resolveExitCode(errorTooManyArguments)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -93,6 +93,15 @@ For example to make a swift remote of name myremote using auto config
|
||||
you would do:
|
||||
|
||||
rclone config create myremote swift env_auth true
|
||||
|
||||
Note that if the config process would normally ask a question the
|
||||
default is taken. Each time that happens rclone will print a message
|
||||
saying how to affect the value taken.
|
||||
|
||||
So for example if you wanted to configure a Google Drive remote but
|
||||
using remote authorization you would do this:
|
||||
|
||||
rclone config create mydrive drive config_is_local false
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 256, command, args)
|
||||
@@ -119,6 +128,11 @@ in pairs of <key> <value>.
|
||||
For example to update the env_auth field of a remote of name myremote you would do:
|
||||
|
||||
rclone config update myremote swift env_auth true
|
||||
|
||||
If the remote uses oauth the token will be updated, if you don't
|
||||
require this add an extra parameter thus:
|
||||
|
||||
rclone config update myremote swift env_auth true config_refresh_token false
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(3, 256, command, args)
|
||||
|
||||
@@ -51,6 +51,17 @@ written a trailing / - meaning "copy the contents of this directory".
|
||||
This applies to all commands and whether you are talking about the
|
||||
source or destination.
|
||||
|
||||
See the [--no-traverse](/docs/#no-traverse) option for controlling
|
||||
whether rclone lists the destination directory or not. Supplying this
|
||||
option when copying a small number of files into a large destination
|
||||
can speed transfers up greatly.
|
||||
|
||||
For example, if you have many files in /path/to/src but only a few of
|
||||
them change every day, you can to copy all the files which have
|
||||
changed recently very efficiently like this:
|
||||
|
||||
rclone copy --max-age 24h --no-traverse /path/to/src remote:
|
||||
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
|
||||
@@ -7,7 +7,7 @@ package mountlib
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/sevlyar/go-daemon"
|
||||
daemon "github.com/sevlyar/go-daemon"
|
||||
)
|
||||
|
||||
func startBackgroundMode() bool {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -62,6 +63,28 @@ func checkMountEmpty(mountpoint string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check the root doesn't overlap the mountpoint
|
||||
func checkMountpointOverlap(root, mountpoint string) error {
|
||||
abs := func(x string) string {
|
||||
if absX, err := filepath.EvalSymlinks(x); err == nil {
|
||||
x = absX
|
||||
}
|
||||
if absX, err := filepath.Abs(x); err == nil {
|
||||
x = absX
|
||||
}
|
||||
x = filepath.ToSlash(x)
|
||||
if !strings.HasSuffix(x, "/") {
|
||||
x += "/"
|
||||
}
|
||||
return x
|
||||
}
|
||||
rootAbs, mountpointAbs := abs(root), abs(mountpoint)
|
||||
if strings.HasPrefix(rootAbs, mountpointAbs) || strings.HasPrefix(mountpointAbs, rootAbs) {
|
||||
return errors.Errorf("mount point %q and directory to be mounted %q mustn't overlap", mountpoint, root)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewMountCommand makes a mount command with the given name and Mount function
|
||||
func NewMountCommand(commandName string, Mount func(f fs.Fs, mountpoint string) error) *cobra.Command {
|
||||
var commandDefintion = &cobra.Command{
|
||||
@@ -220,7 +243,14 @@ be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
config.PassConfigKeyForDaemonization = true
|
||||
}
|
||||
|
||||
mountpoint := args[1]
|
||||
fdst := cmd.NewFsDir(args)
|
||||
if fdst.Name() == "" || fdst.Name() == "local" {
|
||||
err := checkMountpointOverlap(fdst.Root(), mountpoint)
|
||||
if err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Show stats if the user has specifically requested them
|
||||
if cmd.ShowStats() {
|
||||
@@ -230,7 +260,7 @@ be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
// Skip checkMountEmpty if --allow-non-empty flag is used or if
|
||||
// the Operating System is Windows
|
||||
if !AllowNonEmpty && runtime.GOOS != "windows" {
|
||||
err := checkMountEmpty(args[1])
|
||||
err := checkMountEmpty(mountpoint)
|
||||
if err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
@@ -253,7 +283,7 @@ be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
}
|
||||
}
|
||||
|
||||
err := Mount(fdst, args[1])
|
||||
err := Mount(fdst, mountpoint)
|
||||
if err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
|
||||
@@ -37,6 +37,11 @@ into ` + "`dest:path`" + ` then delete the original (if no errors on copy) in
|
||||
|
||||
If you want to delete empty source directories after move, use the --delete-empty-src-dirs flag.
|
||||
|
||||
See the [--no-traverse](/docs/#no-traverse) option for controlling
|
||||
whether rclone lists the destination directory or not. Supplying this
|
||||
option when moving a small number of files into a large destination
|
||||
can speed transfers up greatly.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
--dry-run flag.
|
||||
|
||||
|
||||
@@ -27,6 +27,11 @@ const (
|
||||
//
|
||||
// It returns a func which should be called to stop the stats.
|
||||
func startProgress() func() {
|
||||
err := initTerminal()
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to start progress: %v", err)
|
||||
return func() {}
|
||||
}
|
||||
stopStats := make(chan struct{})
|
||||
oldLogPrint := fs.LogPrint
|
||||
if !log.Redirected() {
|
||||
|
||||
@@ -4,6 +4,10 @@ package cmd
|
||||
|
||||
import "os"
|
||||
|
||||
func initTerminal() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeToTerminal(b []byte) {
|
||||
_, _ = os.Stdout.Write(b)
|
||||
}
|
||||
|
||||
@@ -5,22 +5,31 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
ansiterm "github.com/Azure/go-ansiterm"
|
||||
"github.com/Azure/go-ansiterm/winterm"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
initAnsiParser sync.Once
|
||||
ansiParser *ansiterm.AnsiParser
|
||||
ansiParser *ansiterm.AnsiParser
|
||||
)
|
||||
|
||||
func initTerminal() error {
|
||||
winEventHandler := winterm.CreateWinEventHandler(os.Stdout.Fd(), os.Stdout)
|
||||
if winEventHandler == nil {
|
||||
err := syscall.GetLastError()
|
||||
if err == nil {
|
||||
err = errors.New("initialization failed")
|
||||
}
|
||||
return errors.Wrap(err, "windows terminal")
|
||||
}
|
||||
ansiParser = ansiterm.CreateParser("Ground", winEventHandler)
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeToTerminal(b []byte) {
|
||||
initAnsiParser.Do(func() {
|
||||
winEventHandler := winterm.CreateWinEventHandler(os.Stdout.Fd(), os.Stdout)
|
||||
ansiParser = ansiterm.CreateParser("Ground", winEventHandler)
|
||||
})
|
||||
// Remove all non-ASCII characters until this is fixed
|
||||
// https://github.com/Azure/go-ansiterm/issues/26
|
||||
r := []rune(string(b))
|
||||
|
||||
451
cmd/serve/dlna/cd-service-desc.go
Normal file
451
cmd/serve/dlna/cd-service-desc.go
Normal file
@@ -0,0 +1,451 @@
|
||||
package dlna
|
||||
|
||||
const contentDirectoryServiceDescription = `<?xml version="1.0"?>
|
||||
<scpd xmlns="urn:schemas-upnp-org:service-1-0">
|
||||
<specVersion>
|
||||
<major>1</major>
|
||||
<minor>0</minor>
|
||||
</specVersion>
|
||||
<actionList>
|
||||
<action>
|
||||
<name>GetSearchCapabilities</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>SearchCaps</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>SearchCapabilities</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>GetSortCapabilities</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>SortCaps</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>SortCapabilities</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>GetSortExtensionCapabilities</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>SortExtensionCaps</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>SortExtensionCapabilities</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>GetFeatureList</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>FeatureList</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>FeatureList</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>GetSystemUpdateID</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>Id</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>SystemUpdateID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>Browse</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ObjectID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>BrowseFlag</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_BrowseFlag</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>Filter</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Filter</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>StartingIndex</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Index</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>RequestedCount</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Count</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>SortCriteria</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_SortCriteria</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>Result</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Result</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>NumberReturned</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Count</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>TotalMatches</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Count</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>UpdateID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_UpdateID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>Search</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ContainerID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>SearchCriteria</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_SearchCriteria</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>Filter</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Filter</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>StartingIndex</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Index</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>RequestedCount</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Count</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>SortCriteria</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_SortCriteria</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>Result</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Result</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>NumberReturned</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Count</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>TotalMatches</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Count</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>UpdateID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_UpdateID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>CreateObject</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ContainerID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>Elements</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Result</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>ObjectID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>Result</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Result</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>DestroyObject</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ObjectID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>UpdateObject</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ObjectID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>CurrentTagValue</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_TagValueList</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>NewTagValue</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_TagValueList</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>MoveObject</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ObjectID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>NewParentID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>NewObjectID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>ImportResource</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>SourceURI</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_URI</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>DestinationURI</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_URI</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>TransferID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_TransferID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>ExportResource</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>SourceURI</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_URI</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>DestinationURI</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_URI</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>TransferID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_TransferID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>StopTransferResource</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>TransferID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_TransferID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>DeleteResource</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ResourceURI</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_URI</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>GetTransferProgress</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>TransferID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_TransferID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>TransferStatus</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_TransferStatus</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>TransferLength</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_TransferLength</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>TransferTotal</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_TransferTotal</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>CreateReference</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ContainerID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>ObjectID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>NewID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
</actionList>
|
||||
<serviceStateTable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>SearchCapabilities</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>SortCapabilities</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>SortExtensionCapabilities</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="yes">
|
||||
<name>SystemUpdateID</name>
|
||||
<dataType>ui4</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="yes">
|
||||
<name>ContainerUpdateIDs</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="yes">
|
||||
<name>TransferIDs</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>FeatureList</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_ObjectID</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_Result</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_SearchCriteria</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_BrowseFlag</name>
|
||||
<dataType>string</dataType>
|
||||
<allowedValueList>
|
||||
<allowedValue>BrowseMetadata</allowedValue>
|
||||
<allowedValue>BrowseDirectChildren</allowedValue>
|
||||
</allowedValueList>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_Filter</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_SortCriteria</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_Index</name>
|
||||
<dataType>ui4</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_Count</name>
|
||||
<dataType>ui4</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_UpdateID</name>
|
||||
<dataType>ui4</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_TransferID</name>
|
||||
<dataType>ui4</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_TransferStatus</name>
|
||||
<dataType>string</dataType>
|
||||
<allowedValueList>
|
||||
<allowedValue>COMPLETED</allowedValue>
|
||||
<allowedValue>ERROR</allowedValue>
|
||||
<allowedValue>IN_PROGRESS</allowedValue>
|
||||
<allowedValue>STOPPED</allowedValue>
|
||||
</allowedValueList>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_TransferLength</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_TransferTotal</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_TagValueList</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_URI</name>
|
||||
<dataType>uri</dataType>
|
||||
</stateVariable>
|
||||
</serviceStateTable>
|
||||
</scpd>`
|
||||
240
cmd/serve/dlna/cds.go
Normal file
240
cmd/serve/dlna/cds.go
Normal file
@@ -0,0 +1,240 @@
|
||||
package dlna
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"github.com/anacrolix/dms/dlna"
|
||||
"github.com/anacrolix/dms/upnp"
|
||||
"github.com/anacrolix/dms/upnpav"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type contentDirectoryService struct {
|
||||
*server
|
||||
upnp.Eventing
|
||||
}
|
||||
|
||||
func (cds *contentDirectoryService) updateIDString() string {
|
||||
return fmt.Sprintf("%d", uint32(os.Getpid()))
|
||||
}
|
||||
|
||||
// Turns the given entry and DMS host into a UPnP object. A nil object is
|
||||
// returned if the entry is not of interest.
|
||||
func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fileInfo os.FileInfo, host string) (ret interface{}, err error) {
|
||||
obj := upnpav.Object{
|
||||
ID: cdsObject.ID(),
|
||||
Restricted: 1,
|
||||
ParentID: cdsObject.ParentID(),
|
||||
}
|
||||
|
||||
if fileInfo.IsDir() {
|
||||
obj.Class = "object.container.storageFolder"
|
||||
obj.Title = fileInfo.Name()
|
||||
ret = upnpav.Container{Object: obj}
|
||||
return
|
||||
}
|
||||
|
||||
if !fileInfo.Mode().IsRegular() {
|
||||
return
|
||||
}
|
||||
|
||||
// Hardcode "videoItem" so that files show up in VLC.
|
||||
obj.Class = "object.item.videoItem"
|
||||
obj.Title = fileInfo.Name()
|
||||
|
||||
item := upnpav.Item{
|
||||
Object: obj,
|
||||
Res: make([]upnpav.Resource, 0, 1),
|
||||
}
|
||||
|
||||
item.Res = append(item.Res, upnpav.Resource{
|
||||
URL: (&url.URL{
|
||||
Scheme: "http",
|
||||
Host: host,
|
||||
Path: resPath,
|
||||
RawQuery: url.Values{
|
||||
"path": {cdsObject.Path},
|
||||
}.Encode(),
|
||||
}).String(),
|
||||
// Hardcode "video/x-matroska" so that files show up in VLC.
|
||||
ProtocolInfo: fmt.Sprintf("http-get:*:video/x-matroska:%s", dlna.ContentFeatures{
|
||||
SupportRange: true,
|
||||
}.String()),
|
||||
Bitrate: 0,
|
||||
Duration: "",
|
||||
Size: uint64(fileInfo.Size()),
|
||||
Resolution: "",
|
||||
})
|
||||
|
||||
ret = item
|
||||
return
|
||||
}
|
||||
|
||||
// Returns all the upnpav objects in a directory.
|
||||
func (cds *contentDirectoryService) readContainer(o object, host string) (ret []interface{}, err error) {
|
||||
node, err := cds.vfs.Stat(o.Path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !node.IsDir() {
|
||||
err = errors.New("not a directory")
|
||||
return
|
||||
}
|
||||
|
||||
dir := node.(*vfs.Dir)
|
||||
dirEntries, err := dir.ReadDirAll()
|
||||
if err != nil {
|
||||
err = errors.New("failed to list directory")
|
||||
return
|
||||
}
|
||||
|
||||
sort.Sort(dirEntries)
|
||||
|
||||
for _, de := range dirEntries {
|
||||
child := object{
|
||||
path.Join(o.Path, de.Name()),
|
||||
}
|
||||
obj, err := cds.cdsObjectToUpnpavObject(child, de, host)
|
||||
if err != nil {
|
||||
log.Printf("error with %s: %s", child.FilePath(), err)
|
||||
continue
|
||||
}
|
||||
if obj != nil {
|
||||
ret = append(ret, obj)
|
||||
} else {
|
||||
log.Printf("bad %s", de)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type browse struct {
|
||||
ObjectID string
|
||||
BrowseFlag string
|
||||
Filter string
|
||||
StartingIndex int
|
||||
RequestedCount int
|
||||
}
|
||||
|
||||
// ContentDirectory object from ObjectID.
|
||||
func (cds *contentDirectoryService) objectFromID(id string) (o object, err error) {
|
||||
o.Path, err = url.QueryUnescape(id)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if o.Path == "0" {
|
||||
o.Path = "/"
|
||||
}
|
||||
o.Path = path.Clean(o.Path)
|
||||
if !path.IsAbs(o.Path) {
|
||||
err = fmt.Errorf("bad ObjectID %v", o.Path)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (cds *contentDirectoryService) Handle(action string, argsXML []byte, r *http.Request) (map[string]string, error) {
|
||||
host := r.Host
|
||||
|
||||
switch action {
|
||||
case "GetSystemUpdateID":
|
||||
return map[string]string{
|
||||
"Id": cds.updateIDString(),
|
||||
}, nil
|
||||
case "GetSortCapabilities":
|
||||
return map[string]string{
|
||||
"SortCaps": "dc:title",
|
||||
}, nil
|
||||
case "Browse":
|
||||
var browse browse
|
||||
if err := xml.Unmarshal([]byte(argsXML), &browse); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj, err := cds.objectFromID(browse.ObjectID)
|
||||
if err != nil {
|
||||
return nil, upnp.Errorf(upnpav.NoSuchObjectErrorCode, err.Error())
|
||||
}
|
||||
switch browse.BrowseFlag {
|
||||
case "BrowseDirectChildren":
|
||||
objs, err := cds.readContainer(obj, host)
|
||||
if err != nil {
|
||||
return nil, upnp.Errorf(upnpav.NoSuchObjectErrorCode, err.Error())
|
||||
}
|
||||
totalMatches := len(objs)
|
||||
objs = objs[func() (low int) {
|
||||
low = browse.StartingIndex
|
||||
if low > len(objs) {
|
||||
low = len(objs)
|
||||
}
|
||||
return
|
||||
}():]
|
||||
if browse.RequestedCount != 0 && int(browse.RequestedCount) < len(objs) {
|
||||
objs = objs[:browse.RequestedCount]
|
||||
}
|
||||
result, err := xml.Marshal(objs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]string{
|
||||
"TotalMatches": fmt.Sprint(totalMatches),
|
||||
"NumberReturned": fmt.Sprint(len(objs)),
|
||||
"Result": didlLite(string(result)),
|
||||
"UpdateID": cds.updateIDString(),
|
||||
}, nil
|
||||
default:
|
||||
return nil, upnp.Errorf(upnp.ArgumentValueInvalidErrorCode, "unhandled browse flag: %v", browse.BrowseFlag)
|
||||
}
|
||||
case "GetSearchCapabilities":
|
||||
return map[string]string{
|
||||
"SearchCaps": "",
|
||||
}, nil
|
||||
default:
|
||||
return nil, upnp.InvalidActionError
|
||||
}
|
||||
}
|
||||
|
||||
// Represents a ContentDirectory object.
|
||||
type object struct {
|
||||
Path string // The cleaned, absolute path for the object relative to the server.
|
||||
}
|
||||
|
||||
// Returns the actual local filesystem path for the object.
|
||||
func (o *object) FilePath() string {
|
||||
return filepath.FromSlash(o.Path)
|
||||
}
|
||||
|
||||
// Returns the ObjectID for the object. This is used in various ContentDirectory actions.
|
||||
func (o object) ID() string {
|
||||
if !path.IsAbs(o.Path) {
|
||||
log.Panicf("Relative object path: %s", o.Path)
|
||||
}
|
||||
if len(o.Path) == 1 {
|
||||
return "0"
|
||||
}
|
||||
return url.QueryEscape(o.Path)
|
||||
}
|
||||
|
||||
func (o *object) IsRoot() bool {
|
||||
return o.Path == "/"
|
||||
}
|
||||
|
||||
// Returns the object's parent ObjectID. Fortunately it can be deduced from the
|
||||
// ObjectID (for now).
|
||||
func (o object) ParentID() string {
|
||||
if o.IsRoot() {
|
||||
return "-1"
|
||||
}
|
||||
o.Path = path.Dir(o.Path)
|
||||
return o.ID()
|
||||
}
|
||||
440
cmd/serve/dlna/dlna.go
Normal file
440
cmd/serve/dlna/dlna.go
Normal file
@@ -0,0 +1,440 @@
|
||||
package dlna
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/anacrolix/dms/soap"
|
||||
"github.com/anacrolix/dms/ssdp"
|
||||
"github.com/anacrolix/dms/upnp"
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/cmd/serve/dlna/dlnaflags"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
dlnaflags.AddFlags(Command.Flags())
|
||||
vfsflags.AddFlags(Command.Flags())
|
||||
}
|
||||
|
||||
// Command definition for cobra.
|
||||
var Command = &cobra.Command{
|
||||
Use: "dlna remote:path",
|
||||
Short: `Serve remote:path over DLNA`,
|
||||
Long: `rclone serve dlna is a DLNA media server for media stored in a rclone remote. Many
|
||||
devices, such as the Xbox and PlayStation, can automatically discover this server in the LAN
|
||||
and play audio/video from it. VLC is also supported. Service discovery uses UDP multicast
|
||||
packets (SSDP) and will thus only work on LANs.
|
||||
|
||||
Rclone will list all files present in the remote, without filtering based on media formats or
|
||||
file extensions. Additionally, there is no media transcoding support. This means that some
|
||||
players might show files that they are not able to play back correctly.
|
||||
|
||||
` + dlnaflags.Help + vfs.Help,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
|
||||
cmd.Run(false, false, command, func() error {
|
||||
s := newServer(f, &dlnaflags.Opt)
|
||||
if err := s.Serve(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
s.Wait()
|
||||
return nil
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
const (
|
||||
serverField = "Linux/3.4 DLNADOC/1.50 UPnP/1.0 DMS/1.0"
|
||||
rootDeviceType = "urn:schemas-upnp-org:device:MediaServer:1"
|
||||
rootDeviceModelName = "rclone"
|
||||
resPath = "/res"
|
||||
rootDescPath = "/rootDesc.xml"
|
||||
serviceControlURL = "/ctl"
|
||||
)
|
||||
|
||||
// Groups the service definition with its XML description.
|
||||
type service struct {
|
||||
upnp.Service
|
||||
SCPD string
|
||||
}
|
||||
|
||||
// Exposed UPnP AV services.
|
||||
var services = []*service{
|
||||
{
|
||||
Service: upnp.Service{
|
||||
ServiceType: "urn:schemas-upnp-org:service:ContentDirectory:1",
|
||||
ServiceId: "urn:upnp-org:serviceId:ContentDirectory",
|
||||
ControlURL: serviceControlURL,
|
||||
},
|
||||
SCPD: contentDirectoryServiceDescription,
|
||||
},
|
||||
}
|
||||
|
||||
func devices() []string {
|
||||
return []string{
|
||||
"urn:schemas-upnp-org:device:MediaServer:1",
|
||||
}
|
||||
}
|
||||
|
||||
func serviceTypes() (ret []string) {
|
||||
for _, s := range services {
|
||||
ret = append(ret, s.ServiceType)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type server struct {
|
||||
// The service SOAP handler keyed by service URN.
|
||||
services map[string]UPnPService
|
||||
|
||||
Interfaces []net.Interface
|
||||
|
||||
HTTPConn net.Listener
|
||||
httpListenAddr string
|
||||
httpServeMux *http.ServeMux
|
||||
|
||||
rootDeviceUUID string
|
||||
rootDescXML []byte
|
||||
|
||||
FriendlyName string
|
||||
|
||||
// For waiting on the listener to close
|
||||
waitChan chan struct{}
|
||||
|
||||
// Time interval between SSPD announces
|
||||
AnnounceInterval time.Duration
|
||||
|
||||
f fs.Fs
|
||||
vfs *vfs.VFS
|
||||
}
|
||||
|
||||
func newServer(f fs.Fs, opt *dlnaflags.Options) *server {
|
||||
hostName, err := os.Hostname()
|
||||
if err != nil {
|
||||
hostName = ""
|
||||
} else {
|
||||
hostName = " (" + hostName + ")"
|
||||
}
|
||||
|
||||
s := &server{
|
||||
AnnounceInterval: 10 * time.Second,
|
||||
FriendlyName: "rclone" + hostName,
|
||||
|
||||
httpListenAddr: opt.ListenAddr,
|
||||
|
||||
f: f,
|
||||
vfs: vfs.New(f, &vfsflags.Opt),
|
||||
}
|
||||
|
||||
s.initServicesMap()
|
||||
s.listInterfaces()
|
||||
|
||||
s.httpServeMux = http.NewServeMux()
|
||||
s.rootDeviceUUID = makeDeviceUUID(s.FriendlyName)
|
||||
s.rootDescXML, err = xml.MarshalIndent(
|
||||
upnp.DeviceDesc{
|
||||
SpecVersion: upnp.SpecVersion{Major: 1, Minor: 0},
|
||||
Device: upnp.Device{
|
||||
DeviceType: rootDeviceType,
|
||||
FriendlyName: s.FriendlyName,
|
||||
Manufacturer: "rclone (rclone.org)",
|
||||
ModelName: rootDeviceModelName,
|
||||
UDN: s.rootDeviceUUID,
|
||||
ServiceList: func() (ss []upnp.Service) {
|
||||
for _, s := range services {
|
||||
ss = append(ss, s.Service)
|
||||
}
|
||||
return
|
||||
}(),
|
||||
},
|
||||
},
|
||||
" ", " ")
|
||||
if err != nil {
|
||||
// Contents are hardcoded, so this will never happen in production.
|
||||
log.Panicf("Marshal root descriptor XML: %v", err)
|
||||
}
|
||||
s.rootDescXML = append([]byte(`<?xml version="1.0"?>`), s.rootDescXML...)
|
||||
s.initMux(s.httpServeMux)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// UPnPService is the interface for the SOAP service.
|
||||
type UPnPService interface {
|
||||
Handle(action string, argsXML []byte, r *http.Request) (respArgs map[string]string, err error)
|
||||
Subscribe(callback []*url.URL, timeoutSeconds int) (sid string, actualTimeout int, err error)
|
||||
Unsubscribe(sid string) error
|
||||
}
|
||||
|
||||
// initServicesMap is called during initialization of the server to prepare some internal datastructures.
|
||||
func (s *server) initServicesMap() {
|
||||
urn, err := upnp.ParseServiceType(services[0].ServiceType)
|
||||
if err != nil {
|
||||
// The service type is hardcoded, so this error should never happen.
|
||||
log.Panicf("ParseServiceType: %v", err)
|
||||
}
|
||||
s.services = map[string]UPnPService{
|
||||
urn.Type: &contentDirectoryService{
|
||||
server: s,
|
||||
},
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// listInterfaces is called during initialization of the server to list the network interfaces
|
||||
// on the machine.
|
||||
func (s *server) listInterfaces() {
|
||||
ifs, err := net.Interfaces()
|
||||
if err != nil {
|
||||
fs.Errorf(s.f, "list network interfaces: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
var tmp []net.Interface
|
||||
for _, intf := range ifs {
|
||||
if intf.Flags&net.FlagUp == 0 || intf.MTU <= 0 {
|
||||
continue
|
||||
}
|
||||
s.Interfaces = append(s.Interfaces, intf)
|
||||
tmp = append(tmp, intf)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) initMux(mux *http.ServeMux) {
|
||||
mux.HandleFunc(resPath, func(w http.ResponseWriter, r *http.Request) {
|
||||
remotePath := r.URL.Query().Get("path")
|
||||
node, err := s.vfs.Stat(remotePath)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(node.Size(), 10))
|
||||
|
||||
file := node.(*vfs.File)
|
||||
in, err := file.Open(os.O_RDONLY)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
defer fs.CheckClose(in, &err)
|
||||
|
||||
http.ServeContent(w, r, remotePath, node.ModTime(), in)
|
||||
return
|
||||
})
|
||||
|
||||
mux.HandleFunc(rootDescPath, func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("content-type", `text/xml; charset="utf-8"`)
|
||||
w.Header().Set("content-length", fmt.Sprint(len(s.rootDescXML)))
|
||||
w.Header().Set("server", serverField)
|
||||
_, err := w.Write(s.rootDescXML)
|
||||
if err != nil {
|
||||
fs.Errorf(s, "Failed to serve root descriptor XML: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Install handlers to serve SCPD for each UPnP service.
|
||||
for _, s := range services {
|
||||
p := path.Join("/scpd", s.ServiceId)
|
||||
s.SCPDURL = p
|
||||
|
||||
mux.HandleFunc(s.SCPDURL, func(serviceDesc string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("content-type", `text/xml; charset="utf-8"`)
|
||||
http.ServeContent(w, r, ".xml", time.Time{}, bytes.NewReader([]byte(serviceDesc)))
|
||||
}
|
||||
}(s.SCPD))
|
||||
}
|
||||
|
||||
mux.HandleFunc(serviceControlURL, s.serviceControlHandler)
|
||||
}
|
||||
|
||||
// Handle a service control HTTP request.
|
||||
func (s *server) serviceControlHandler(w http.ResponseWriter, r *http.Request) {
|
||||
soapActionString := r.Header.Get("SOAPACTION")
|
||||
soapAction, err := upnp.ParseActionHTTPHeader(soapActionString)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
var env soap.Envelope
|
||||
if err := xml.NewDecoder(r.Body).Decode(&env); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", `text/xml; charset="utf-8"`)
|
||||
w.Header().Set("Ext", "")
|
||||
w.Header().Set("server", serverField)
|
||||
soapRespXML, code := func() ([]byte, int) {
|
||||
respArgs, err := s.soapActionResponse(soapAction, env.Body.Action, r)
|
||||
if err != nil {
|
||||
upnpErr := upnp.ConvertError(err)
|
||||
return mustMarshalXML(soap.NewFault("UPnPError", upnpErr)), 500
|
||||
}
|
||||
return marshalSOAPResponse(soapAction, respArgs), 200
|
||||
}()
|
||||
bodyStr := fmt.Sprintf(`<?xml version="1.0" encoding="utf-8" standalone="yes"?><s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"><s:Body>%s</s:Body></s:Envelope>`, soapRespXML)
|
||||
w.WriteHeader(code)
|
||||
if _, err := w.Write([]byte(bodyStr)); err != nil {
|
||||
log.Print(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Handle a SOAP request and return the response arguments or UPnP error.
|
||||
func (s *server) soapActionResponse(sa upnp.SoapAction, actionRequestXML []byte, r *http.Request) (map[string]string, error) {
|
||||
service, ok := s.services[sa.Type]
|
||||
if !ok {
|
||||
// TODO: What's the invalid service error?
|
||||
return nil, upnp.Errorf(upnp.InvalidActionErrorCode, "Invalid service: %s", sa.Type)
|
||||
}
|
||||
return service.Handle(sa.Action, actionRequestXML, r)
|
||||
}
|
||||
|
||||
// Serve runs the server - returns the error only if
|
||||
// the listener was not started; does not block, so
|
||||
// use s.Wait() to block on the listener indefinitely.
|
||||
func (s *server) Serve() (err error) {
|
||||
if s.HTTPConn == nil {
|
||||
s.HTTPConn, err = net.Listen("tcp", s.httpListenAddr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
s.startSSDP()
|
||||
}()
|
||||
|
||||
go func() {
|
||||
fs.Logf(s.f, "Serving HTTP on %s", s.HTTPConn.Addr().String())
|
||||
|
||||
err = s.serveHTTP()
|
||||
if err != nil {
|
||||
fs.Logf(s.f, "Error on serving HTTP server: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait blocks while the listener is open.
|
||||
func (s *server) Wait() {
|
||||
<-s.waitChan
|
||||
}
|
||||
|
||||
func (s *server) Close() {
|
||||
err := s.HTTPConn.Close()
|
||||
if err != nil {
|
||||
fs.Errorf(s.f, "Error closing HTTP server: %v", err)
|
||||
return
|
||||
}
|
||||
close(s.waitChan)
|
||||
}
|
||||
|
||||
// Run SSDP (multicast for server discovery) on all interfaces.
|
||||
func (s *server) startSSDP() {
|
||||
active := 0
|
||||
stopped := make(chan struct{})
|
||||
for _, intf := range s.Interfaces {
|
||||
active++
|
||||
go func(intf2 net.Interface) {
|
||||
defer func() {
|
||||
stopped <- struct{}{}
|
||||
}()
|
||||
s.ssdpInterface(intf2)
|
||||
}(intf)
|
||||
}
|
||||
for active > 0 {
|
||||
<-stopped
|
||||
active--
|
||||
}
|
||||
}
|
||||
|
||||
// Run SSDP server on an interface.
|
||||
func (s *server) ssdpInterface(intf net.Interface) {
|
||||
// Figure out which HTTP location to advertise based on the interface IP.
|
||||
advertiseLocationFn := func(ip net.IP) string {
|
||||
url := url.URL{
|
||||
Scheme: "http",
|
||||
Host: (&net.TCPAddr{
|
||||
IP: ip,
|
||||
Port: s.HTTPConn.Addr().(*net.TCPAddr).Port,
|
||||
}).String(),
|
||||
Path: rootDescPath,
|
||||
}
|
||||
return url.String()
|
||||
}
|
||||
|
||||
ssdpServer := ssdp.Server{
|
||||
Interface: intf,
|
||||
Devices: devices(),
|
||||
Services: serviceTypes(),
|
||||
Location: advertiseLocationFn,
|
||||
Server: serverField,
|
||||
UUID: s.rootDeviceUUID,
|
||||
NotifyInterval: s.AnnounceInterval,
|
||||
}
|
||||
|
||||
// An interface with these flags should be valid for SSDP.
|
||||
const ssdpInterfaceFlags = net.FlagUp | net.FlagMulticast
|
||||
|
||||
if err := ssdpServer.Init(); err != nil {
|
||||
if intf.Flags&ssdpInterfaceFlags != ssdpInterfaceFlags {
|
||||
// Didn't expect it to work anyway.
|
||||
return
|
||||
}
|
||||
if strings.Contains(err.Error(), "listen") {
|
||||
// OSX has a lot of dud interfaces. Failure to create a socket on
|
||||
// the interface are what we're expecting if the interface is no
|
||||
// good.
|
||||
return
|
||||
}
|
||||
log.Printf("Error creating ssdp server on %s: %s", intf.Name, err)
|
||||
return
|
||||
}
|
||||
defer ssdpServer.Close()
|
||||
log.Println("Started SSDP on", intf.Name)
|
||||
stopped := make(chan struct{})
|
||||
go func() {
|
||||
defer close(stopped)
|
||||
if err := ssdpServer.Serve(); err != nil {
|
||||
log.Printf("%q: %q\n", intf.Name, err)
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-s.waitChan:
|
||||
// Returning will close the server.
|
||||
case <-stopped:
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) serveHTTP() error {
|
||||
srv := &http.Server{
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
s.httpServeMux.ServeHTTP(w, r)
|
||||
}),
|
||||
}
|
||||
err := srv.Serve(s.HTTPConn)
|
||||
select {
|
||||
case <-s.waitChan:
|
||||
return nil
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
88
cmd/serve/dlna/dlna_test.go
Normal file
88
cmd/serve/dlna/dlna_test.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// +build go1.8
|
||||
|
||||
package dlna
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/vfs"
|
||||
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/cmd/serve/dlna/dlnaflags"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
dlnaServer *server
|
||||
)
|
||||
|
||||
const (
|
||||
testBindAddress = "localhost:51777"
|
||||
testURL = "http://" + testBindAddress + "/"
|
||||
)
|
||||
|
||||
func startServer(t *testing.T, f fs.Fs) {
|
||||
opt := dlnaflags.DefaultOpt
|
||||
opt.ListenAddr = testBindAddress
|
||||
dlnaServer = newServer(f, &opt)
|
||||
assert.NoError(t, dlnaServer.Serve())
|
||||
}
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
config.LoadConfig()
|
||||
|
||||
f, err := fs.NewFs("testdata/files")
|
||||
l, _ := f.List("")
|
||||
fmt.Println(l)
|
||||
require.NoError(t, err)
|
||||
|
||||
startServer(t, f)
|
||||
}
|
||||
|
||||
// Make sure that it serves rootDesc.xml (SCPD in uPnP parlance).
|
||||
func TestRootSCPD(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", testURL+"rootDesc.xml", nil)
|
||||
require.NoError(t, err)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
// Make sure that the SCPD contains a CDS service.
|
||||
require.Contains(t, string(body),
|
||||
"<serviceType>urn:schemas-upnp-org:service:ContentDirectory:1</serviceType>")
|
||||
}
|
||||
|
||||
// Make sure that it serves content from the remote.
|
||||
func TestServeContent(t *testing.T) {
|
||||
itemPath := "/small_jpeg.jpg"
|
||||
pathQuery := url.QueryEscape(itemPath)
|
||||
req, err := http.NewRequest("GET", testURL+"res?path="+pathQuery, nil)
|
||||
require.NoError(t, err)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
actualContents, err := ioutil.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Now compare the contents with the golden file.
|
||||
node, err := dlnaServer.vfs.Stat(itemPath)
|
||||
assert.NoError(t, err)
|
||||
goldenFile := node.(*vfs.File)
|
||||
goldenReader, err := goldenFile.Open(os.O_RDONLY)
|
||||
assert.NoError(t, err)
|
||||
defer fs.CheckClose(goldenReader, &err)
|
||||
goldenContents, err := ioutil.ReadAll(goldenReader)
|
||||
assert.NoError(t, err)
|
||||
|
||||
require.Equal(t, goldenContents, actualContents)
|
||||
}
|
||||
52
cmd/serve/dlna/dlna_util.go
Normal file
52
cmd/serve/dlna/dlna_util.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package dlna
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
|
||||
"github.com/anacrolix/dms/soap"
|
||||
"github.com/anacrolix/dms/upnp"
|
||||
)
|
||||
|
||||
func makeDeviceUUID(unique string) string {
|
||||
h := md5.New()
|
||||
if _, err := io.WriteString(h, unique); err != nil {
|
||||
log.Panicf("makeDeviceUUID write failed: %s", err)
|
||||
}
|
||||
buf := h.Sum(nil)
|
||||
return upnp.FormatUUID(buf)
|
||||
}
|
||||
|
||||
func didlLite(chardata string) string {
|
||||
return `<DIDL-Lite` +
|
||||
` xmlns:dc="http://purl.org/dc/elements/1.1/"` +
|
||||
` xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"` +
|
||||
` xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"` +
|
||||
` xmlns:dlna="urn:schemas-dlna-org:metadata-1-0/">` +
|
||||
chardata +
|
||||
`</DIDL-Lite>`
|
||||
}
|
||||
|
||||
func mustMarshalXML(value interface{}) []byte {
|
||||
ret, err := xml.MarshalIndent(value, "", " ")
|
||||
if err != nil {
|
||||
log.Panicf("mustMarshalXML failed to marshal %v: %s", value, err)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Marshal SOAP response arguments into a response XML snippet.
|
||||
func marshalSOAPResponse(sa upnp.SoapAction, args map[string]string) []byte {
|
||||
soapArgs := make([]soap.Arg, 0, len(args))
|
||||
for argName, value := range args {
|
||||
soapArgs = append(soapArgs, soap.Arg{
|
||||
XMLName: xml.Name{Local: argName},
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
return []byte(fmt.Sprintf(`<u:%[1]sResponse xmlns:u="%[2]s">%[3]s</u:%[1]sResponse>`,
|
||||
sa.Action, sa.ServiceURN.String(), mustMarshalXML(soapArgs)))
|
||||
}
|
||||
42
cmd/serve/dlna/dlnaflags/dlnaflags.go
Normal file
42
cmd/serve/dlna/dlnaflags/dlnaflags.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package dlnaflags
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Help contains the text for the command line help and manual.
|
||||
var Help = `
|
||||
### Server options
|
||||
|
||||
Use --addr to specify which IP address and port the server should
|
||||
listen on, eg --addr 1.2.3.4:8000 or --addr :8080 to listen to all
|
||||
IPs.
|
||||
|
||||
`
|
||||
|
||||
// Options is the type for DLNA serving options.
|
||||
type Options struct {
|
||||
ListenAddr string
|
||||
}
|
||||
|
||||
// DefaultOpt contains the defaults options for DLNA serving.
|
||||
var DefaultOpt = Options{
|
||||
ListenAddr: ":7879",
|
||||
}
|
||||
|
||||
// Opt contains the options for DLNA serving.
|
||||
var (
|
||||
Opt = DefaultOpt
|
||||
)
|
||||
|
||||
func addFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *Options) {
|
||||
rc.AddOption("dlna", &Opt)
|
||||
flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "ip:port or :port to bind the DLNA http server to.")
|
||||
}
|
||||
|
||||
// AddFlags add the command line flags for DLNA serving.
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
addFlagsPrefix(flagSet, "", &Opt)
|
||||
}
|
||||
BIN
cmd/serve/dlna/testdata/files/small_jpeg.jpg
vendored
Normal file
BIN
cmd/serve/dlna/testdata/files/small_jpeg.jpg
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 107 B |
@@ -126,7 +126,7 @@ func (s *server) serveDir(w http.ResponseWriter, r *http.Request, dirRemote stri
|
||||
}
|
||||
|
||||
// Make the entries for display
|
||||
directory := serve.NewDirectory(dirRemote)
|
||||
directory := serve.NewDirectory(dirRemote, s.HTMLTemplate)
|
||||
for _, node := range dirEntries {
|
||||
directory.AddEntry(node.Path(), node.IsDir())
|
||||
}
|
||||
|
||||
@@ -4,14 +4,18 @@ package httplib
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
auth "github.com/abbot/go-http-auth"
|
||||
"github.com/ncw/rclone/cmd/serve/httplib/serve/data"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -105,8 +109,9 @@ type Server struct {
|
||||
waitChan chan struct{} // for waiting on the listener to close
|
||||
httpServer *http.Server
|
||||
basicPassHashed string
|
||||
useSSL bool // if server is configured for SSL/TLS
|
||||
usingAuth bool // set if authentication is configured
|
||||
useSSL bool // if server is configured for SSL/TLS
|
||||
usingAuth bool // set if authentication is configured
|
||||
HTMLTemplate *template.Template // HTML template for web interface
|
||||
}
|
||||
|
||||
// singleUserProvider provides the encrypted password for a single user
|
||||
@@ -143,7 +148,28 @@ func NewServer(handler http.Handler, opt *Options) *Server {
|
||||
secretProvider = s.singleUserProvider
|
||||
}
|
||||
authenticator := auth.NewBasicAuthenticator(s.Opt.Realm, secretProvider)
|
||||
handler = auth.JustCheck(authenticator, handler.ServeHTTP)
|
||||
oldHandler := handler
|
||||
handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if username := authenticator.CheckAuth(r); username == "" {
|
||||
authHeader := r.Header.Get(authenticator.Headers.V().Authorization)
|
||||
if authHeader != "" {
|
||||
s := strings.SplitN(authHeader, " ", 2)
|
||||
var userName = "UNKNOWN"
|
||||
if len(s) == 2 && s[0] == "Basic" {
|
||||
b, err := base64.StdEncoding.DecodeString(s[1])
|
||||
if err == nil {
|
||||
userName = strings.SplitN(string(b), ":", 2)[0]
|
||||
}
|
||||
}
|
||||
fs.Infof(r.URL.Path, "%s: Unauthorized request from %s", r.RemoteAddr, userName)
|
||||
} else {
|
||||
fs.Infof(r.URL.Path, "%s: Basic auth challenge sent", r.RemoteAddr)
|
||||
}
|
||||
authenticator.RequireAuth(w, r)
|
||||
} else {
|
||||
oldHandler.ServeHTTP(w, r)
|
||||
}
|
||||
})
|
||||
s.usingAuth = true
|
||||
}
|
||||
|
||||
@@ -182,6 +208,12 @@ func NewServer(handler http.Handler, opt *Options) *Server {
|
||||
s.httpServer.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
}
|
||||
|
||||
htmlTemplate, templateErr := data.GetTemplate()
|
||||
if templateErr != nil {
|
||||
log.Fatalf(templateErr.Error())
|
||||
}
|
||||
s.HTMLTemplate = htmlTemplate
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
|
||||
22
cmd/serve/httplib/serve/data/assets_generate.go
Normal file
22
cmd/serve/httplib/serve/data/assets_generate.go
Normal file
@@ -0,0 +1,22 @@
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/shurcooL/vfsgen"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var AssetDir http.FileSystem = http.Dir("./templates")
|
||||
err := vfsgen.Generate(AssetDir, vfsgen.Options{
|
||||
PackageName: "data",
|
||||
BuildTags: "!dev",
|
||||
VariableName: "Assets",
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
186
cmd/serve/httplib/serve/data/assets_vfsdata.go
Normal file
186
cmd/serve/httplib/serve/data/assets_vfsdata.go
Normal file
@@ -0,0 +1,186 @@
|
||||
// Code generated by vfsgen; DO NOT EDIT.
|
||||
|
||||
// +build !dev
|
||||
|
||||
package data
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
pathpkg "path"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Assets statically implements the virtual filesystem provided to vfsgen.
|
||||
var Assets = func() http.FileSystem {
|
||||
fs := vfsgen۰FS{
|
||||
"/": &vfsgen۰DirInfo{
|
||||
name: "/",
|
||||
modTime: time.Date(2018, 12, 16, 6, 54, 42, 894445775, time.UTC),
|
||||
},
|
||||
"/index.html": &vfsgen۰CompressedFileInfo{
|
||||
name: "index.html",
|
||||
modTime: time.Date(2018, 12, 16, 6, 54, 42, 790442328, time.UTC),
|
||||
uncompressedSize: 226,
|
||||
|
||||
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x5c\x8f\x31\xcf\x83\x20\x10\x86\x77\x7e\xc5\x7d\xc4\xf5\x93\xb8\x35\x0d\xb0\xb4\x6e\x26\x6d\x1a\x3b\x74\x3c\xeb\x29\x24\x4a\x13\xa4\x43\x43\xf8\xef\x0d\xea\xd4\x09\xee\x79\xef\x9e\xcb\xc9\xbf\xf3\xe5\xd4\x3e\xae\x35\x98\x30\x4f\x9a\xc9\xfc\xc0\x84\x6e\x54\x9c\x1c\xcf\x80\xb0\xd7\x4c\xce\x14\x10\x9e\x06\xfd\x42\x41\xf1\x77\x18\xfe\x0f\x39\x0d\x36\x4c\xa4\x63\x84\xb2\xcd\x3f\x48\x49\x8a\x8d\x31\x29\xf6\xd1\xee\xd5\x7f\xb2\xa8\xfa\xe9\x33\x95\x66\x31\x82\x47\x37\x12\x14\x16\x8e\x0a\xca\xda\x05\x6f\x69\xc9\x39\x82\xf1\x34\x28\x1e\x23\x14\xb6\xbc\xdf\x1a\x48\x89\xeb\xad\x6a\x08\x87\xd5\x81\x5a\x76\x1e\xc4\x2a\x22\xd7\xaf\x6c\xdf\x27\xb6\x8b\xbe\x01\x00\x00\xff\xff\x92\x2e\x35\x75\xe2\x00\x00\x00"),
|
||||
},
|
||||
}
|
||||
fs["/"].(*vfsgen۰DirInfo).entries = []os.FileInfo{
|
||||
fs["/index.html"].(os.FileInfo),
|
||||
}
|
||||
|
||||
return fs
|
||||
}()
|
||||
|
||||
type vfsgen۰FS map[string]interface{}
|
||||
|
||||
func (fs vfsgen۰FS) Open(path string) (http.File, error) {
|
||||
path = pathpkg.Clean("/" + path)
|
||||
f, ok := fs[path]
|
||||
if !ok {
|
||||
return nil, &os.PathError{Op: "open", Path: path, Err: os.ErrNotExist}
|
||||
}
|
||||
|
||||
switch f := f.(type) {
|
||||
case *vfsgen۰CompressedFileInfo:
|
||||
gr, err := gzip.NewReader(bytes.NewReader(f.compressedContent))
|
||||
if err != nil {
|
||||
// This should never happen because we generate the gzip bytes such that they are always valid.
|
||||
panic("unexpected error reading own gzip compressed bytes: " + err.Error())
|
||||
}
|
||||
return &vfsgen۰CompressedFile{
|
||||
vfsgen۰CompressedFileInfo: f,
|
||||
gr: gr,
|
||||
}, nil
|
||||
case *vfsgen۰DirInfo:
|
||||
return &vfsgen۰Dir{
|
||||
vfsgen۰DirInfo: f,
|
||||
}, nil
|
||||
default:
|
||||
// This should never happen because we generate only the above types.
|
||||
panic(fmt.Sprintf("unexpected type %T", f))
|
||||
}
|
||||
}
|
||||
|
||||
// vfsgen۰CompressedFileInfo is a static definition of a gzip compressed file.
|
||||
type vfsgen۰CompressedFileInfo struct {
|
||||
name string
|
||||
modTime time.Time
|
||||
compressedContent []byte
|
||||
uncompressedSize int64
|
||||
}
|
||||
|
||||
func (f *vfsgen۰CompressedFileInfo) Readdir(count int) ([]os.FileInfo, error) {
|
||||
return nil, fmt.Errorf("cannot Readdir from file %s", f.name)
|
||||
}
|
||||
func (f *vfsgen۰CompressedFileInfo) Stat() (os.FileInfo, error) { return f, nil }
|
||||
|
||||
func (f *vfsgen۰CompressedFileInfo) GzipBytes() []byte {
|
||||
return f.compressedContent
|
||||
}
|
||||
|
||||
func (f *vfsgen۰CompressedFileInfo) Name() string { return f.name }
|
||||
func (f *vfsgen۰CompressedFileInfo) Size() int64 { return f.uncompressedSize }
|
||||
func (f *vfsgen۰CompressedFileInfo) Mode() os.FileMode { return 0444 }
|
||||
func (f *vfsgen۰CompressedFileInfo) ModTime() time.Time { return f.modTime }
|
||||
func (f *vfsgen۰CompressedFileInfo) IsDir() bool { return false }
|
||||
func (f *vfsgen۰CompressedFileInfo) Sys() interface{} { return nil }
|
||||
|
||||
// vfsgen۰CompressedFile is an opened compressedFile instance.
|
||||
type vfsgen۰CompressedFile struct {
|
||||
*vfsgen۰CompressedFileInfo
|
||||
gr *gzip.Reader
|
||||
grPos int64 // Actual gr uncompressed position.
|
||||
seekPos int64 // Seek uncompressed position.
|
||||
}
|
||||
|
||||
func (f *vfsgen۰CompressedFile) Read(p []byte) (n int, err error) {
|
||||
if f.grPos > f.seekPos {
|
||||
// Rewind to beginning.
|
||||
err = f.gr.Reset(bytes.NewReader(f.compressedContent))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
f.grPos = 0
|
||||
}
|
||||
if f.grPos < f.seekPos {
|
||||
// Fast-forward.
|
||||
_, err = io.CopyN(ioutil.Discard, f.gr, f.seekPos-f.grPos)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
f.grPos = f.seekPos
|
||||
}
|
||||
n, err = f.gr.Read(p)
|
||||
f.grPos += int64(n)
|
||||
f.seekPos = f.grPos
|
||||
return n, err
|
||||
}
|
||||
func (f *vfsgen۰CompressedFile) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
f.seekPos = 0 + offset
|
||||
case io.SeekCurrent:
|
||||
f.seekPos += offset
|
||||
case io.SeekEnd:
|
||||
f.seekPos = f.uncompressedSize + offset
|
||||
default:
|
||||
panic(fmt.Errorf("invalid whence value: %v", whence))
|
||||
}
|
||||
return f.seekPos, nil
|
||||
}
|
||||
func (f *vfsgen۰CompressedFile) Close() error {
|
||||
return f.gr.Close()
|
||||
}
|
||||
|
||||
// vfsgen۰DirInfo is a static definition of a directory.
|
||||
type vfsgen۰DirInfo struct {
|
||||
name string
|
||||
modTime time.Time
|
||||
entries []os.FileInfo
|
||||
}
|
||||
|
||||
func (d *vfsgen۰DirInfo) Read([]byte) (int, error) {
|
||||
return 0, fmt.Errorf("cannot Read from directory %s", d.name)
|
||||
}
|
||||
func (d *vfsgen۰DirInfo) Close() error { return nil }
|
||||
func (d *vfsgen۰DirInfo) Stat() (os.FileInfo, error) { return d, nil }
|
||||
|
||||
func (d *vfsgen۰DirInfo) Name() string { return d.name }
|
||||
func (d *vfsgen۰DirInfo) Size() int64 { return 0 }
|
||||
func (d *vfsgen۰DirInfo) Mode() os.FileMode { return 0755 | os.ModeDir }
|
||||
func (d *vfsgen۰DirInfo) ModTime() time.Time { return d.modTime }
|
||||
func (d *vfsgen۰DirInfo) IsDir() bool { return true }
|
||||
func (d *vfsgen۰DirInfo) Sys() interface{} { return nil }
|
||||
|
||||
// vfsgen۰Dir is an opened dir instance.
|
||||
type vfsgen۰Dir struct {
|
||||
*vfsgen۰DirInfo
|
||||
pos int // Position within entries for Seek and Readdir.
|
||||
}
|
||||
|
||||
func (d *vfsgen۰Dir) Seek(offset int64, whence int) (int64, error) {
|
||||
if offset == 0 && whence == io.SeekStart {
|
||||
d.pos = 0
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("unsupported Seek in directory %s", d.name)
|
||||
}
|
||||
|
||||
func (d *vfsgen۰Dir) Readdir(count int) ([]os.FileInfo, error) {
|
||||
if d.pos >= len(d.entries) && count > 0 {
|
||||
return nil, io.EOF
|
||||
}
|
||||
if count <= 0 || count > len(d.entries)-d.pos {
|
||||
count = len(d.entries) - d.pos
|
||||
}
|
||||
e := d.entries[d.pos : d.pos+count]
|
||||
d.pos += count
|
||||
return e, nil
|
||||
}
|
||||
36
cmd/serve/httplib/serve/data/data.go
Normal file
36
cmd/serve/httplib/serve/data/data.go
Normal file
@@ -0,0 +1,36 @@
|
||||
//go:generate go run assets_generate.go
|
||||
// The "go:generate" directive compiles static assets by running assets_generate.go
|
||||
|
||||
package data
|
||||
|
||||
import (
|
||||
"html/template"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// GetTemplate eturns the HTML template for serving directories via HTTP
|
||||
func GetTemplate() (tpl *template.Template, err error) {
|
||||
templateFile, err := Assets.Open("index.html")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get template open")
|
||||
}
|
||||
|
||||
defer fs.CheckClose(templateFile, &err)
|
||||
|
||||
templateBytes, err := ioutil.ReadAll(templateFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get template read")
|
||||
}
|
||||
|
||||
var templateString = string(templateBytes)
|
||||
|
||||
tpl, err = template.New("index").Parse(templateString)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get template parse")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
11
cmd/serve/httplib/serve/data/templates/index.html
Normal file
11
cmd/serve/httplib/serve/data/templates/index.html
Normal file
@@ -0,0 +1,11 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>{{ .Title }}</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>{{ .Title }}</h1>
|
||||
{{ range $i := .Entries }}<a href="{{ $i.URL }}">{{ $i.Leaf }}</a><br />
|
||||
{{ end }}</body>
|
||||
</html>
|
||||
@@ -21,17 +21,19 @@ type DirEntry struct {
|
||||
|
||||
// Directory represents a directory
|
||||
type Directory struct {
|
||||
DirRemote string
|
||||
Title string
|
||||
Entries []DirEntry
|
||||
Query string
|
||||
DirRemote string
|
||||
Title string
|
||||
Entries []DirEntry
|
||||
Query string
|
||||
HTMLTemplate *template.Template
|
||||
}
|
||||
|
||||
// NewDirectory makes an empty Directory
|
||||
func NewDirectory(dirRemote string) *Directory {
|
||||
func NewDirectory(dirRemote string, htmlTemplate *template.Template) *Directory {
|
||||
d := &Directory{
|
||||
DirRemote: dirRemote,
|
||||
Title: fmt.Sprintf("Directory listing of /%s", dirRemote),
|
||||
DirRemote: dirRemote,
|
||||
Title: fmt.Sprintf("Directory listing of /%s", dirRemote),
|
||||
HTMLTemplate: htmlTemplate,
|
||||
}
|
||||
return d
|
||||
}
|
||||
@@ -77,26 +79,10 @@ func (d *Directory) Serve(w http.ResponseWriter, r *http.Request) {
|
||||
defer accounting.Stats.DoneTransferring(d.DirRemote, true)
|
||||
|
||||
fs.Infof(d.DirRemote, "%s: Serving directory", r.RemoteAddr)
|
||||
err := indexTemplate.Execute(w, d)
|
||||
|
||||
err := d.HTMLTemplate.Execute(w, d)
|
||||
if err != nil {
|
||||
Error(d.DirRemote, w, "Failed to render template", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// indexPage is a directory listing template
|
||||
var indexPage = `<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>{{ .Title }}</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>{{ .Title }}</h1>
|
||||
{{ range $i := .Entries }}<a href="{{ $i.URL }}">{{ $i.Leaf }}</a><br />
|
||||
{{ end }}</body>
|
||||
</html>
|
||||
`
|
||||
|
||||
// indexTemplate is the instantiated indexPage
|
||||
var indexTemplate = template.Must(template.New("index").Parse(indexPage))
|
||||
|
||||
@@ -2,23 +2,32 @@ package serve
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"html/template"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/cmd/serve/httplib/serve/data"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func GetTemplate(t *testing.T) *template.Template {
|
||||
htmlTemplate, err := data.GetTemplate()
|
||||
require.NoError(t, err)
|
||||
return htmlTemplate
|
||||
}
|
||||
|
||||
func TestNewDirectory(t *testing.T) {
|
||||
d := NewDirectory("z")
|
||||
d := NewDirectory("z", GetTemplate(t))
|
||||
assert.Equal(t, "z", d.DirRemote)
|
||||
assert.Equal(t, "Directory listing of /z", d.Title)
|
||||
}
|
||||
|
||||
func TestSetQuery(t *testing.T) {
|
||||
d := NewDirectory("z")
|
||||
d := NewDirectory("z", GetTemplate(t))
|
||||
assert.Equal(t, "", d.Query)
|
||||
d.SetQuery(url.Values{"potato": []string{"42"}})
|
||||
assert.Equal(t, "?potato=42", d.Query)
|
||||
@@ -27,7 +36,7 @@ func TestSetQuery(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAddEntry(t *testing.T) {
|
||||
var d = NewDirectory("z")
|
||||
var d = NewDirectory("z", GetTemplate(t))
|
||||
d.AddEntry("", true)
|
||||
d.AddEntry("dir", true)
|
||||
d.AddEntry("a/b/c/d.txt", false)
|
||||
@@ -42,7 +51,7 @@ func TestAddEntry(t *testing.T) {
|
||||
}, d.Entries)
|
||||
|
||||
// Now test with a query parameter
|
||||
d = NewDirectory("z").SetQuery(url.Values{"potato": []string{"42"}})
|
||||
d = NewDirectory("z", GetTemplate(t)).SetQuery(url.Values{"potato": []string{"42"}})
|
||||
d.AddEntry("file", false)
|
||||
d.AddEntry("dir", true)
|
||||
assert.Equal(t, []DirEntry{
|
||||
@@ -62,7 +71,7 @@ func TestError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServe(t *testing.T) {
|
||||
d := NewDirectory("aDirectory")
|
||||
d := NewDirectory("aDirectory", GetTemplate(t))
|
||||
d.AddEntry("file", false)
|
||||
d.AddEntry("dir", true)
|
||||
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Package restic serves a remote suitable for use with restic
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build go1.9
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
// Serve restic tests set up a server and run the integration tests
|
||||
// for restic against it.
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
|
||||
11
cmd/serve/restic/restic_unsupported.go
Normal file
11
cmd/serve/restic/restic_unsupported.go
Normal file
@@ -0,0 +1,11 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package restic
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
// Command definition is nil to show not implemented
|
||||
var Command *cobra.Command = nil
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build go1.9
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//+build !go1.10
|
||||
//+build go1.9,!go1.10
|
||||
|
||||
// Fallback deadline setting for pre go1.10
|
||||
|
||||
|
||||
@@ -3,6 +3,8 @@ package serve
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/ncw/rclone/cmd/serve/dlna"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/cmd/serve/ftp"
|
||||
"github.com/ncw/rclone/cmd/serve/http"
|
||||
@@ -13,8 +15,15 @@ import (
|
||||
|
||||
func init() {
|
||||
Command.AddCommand(http.Command)
|
||||
Command.AddCommand(webdav.Command)
|
||||
Command.AddCommand(restic.Command)
|
||||
if webdav.Command != nil {
|
||||
Command.AddCommand(webdav.Command)
|
||||
}
|
||||
if restic.Command != nil {
|
||||
Command.AddCommand(restic.Command)
|
||||
}
|
||||
if dlna.Command != nil {
|
||||
Command.AddCommand(dlna.Command)
|
||||
}
|
||||
if ftp.Command != nil {
|
||||
Command.AddCommand(ftp.Command)
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//+build go1.9
|
||||
|
||||
package webdav
|
||||
|
||||
import (
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
//
|
||||
// We skip tests on platforms with troublesome character mappings
|
||||
|
||||
//+build !windows,!darwin
|
||||
//+build !windows,!darwin,go1.9
|
||||
|
||||
package webdav
|
||||
|
||||
|
||||
11
cmd/serve/webdav/webdav_unsupported.go
Normal file
11
cmd/serve/webdav/webdav_unsupported.go
Normal file
@@ -0,0 +1,11 @@
|
||||
// Build for webdav for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package webdav
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
// Command definition is nil to show not implemented
|
||||
var Command *cobra.Command = nil
|
||||
@@ -13,6 +13,7 @@ Rclone
|
||||
|
||||
Rclone is a command line program to sync files and directories to and from:
|
||||
|
||||
* {{< provider name="Alibaba Cloud (Aliyun) Object Storage System (OSS)" home="https://www.alibabacloud.com/product/oss/" config="/s3/#alibaba-oss" >}}
|
||||
* {{< provider name="Amazon Drive" home="https://www.amazon.com/clouddrive" config="/amazonclouddrive/" >}} ([See note](/amazonclouddrive/#status))
|
||||
* {{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}}
|
||||
* {{< provider name="Backblaze B2" home="https://www.backblaze.com/b2/cloud-storage.html" config="/b2/" >}}
|
||||
|
||||
@@ -154,7 +154,7 @@ Contributors
|
||||
* Michael P. Dubner <pywebmail@list.ru>
|
||||
* Antoine GIRARD <sapk@users.noreply.github.com>
|
||||
* Mateusz Piotrowski <mpp302@gmail.com>
|
||||
* Animosity022 <animosity22@users.noreply.github.com>
|
||||
* Animosity022 <animosity22@users.noreply.github.com> <earl.texter@gmail.com>
|
||||
* Peter Baumgartner <pete@lincolnloop.com>
|
||||
* Craig Rachel <craig@craigrachel.com>
|
||||
* Michael G. Noll <miguno@users.noreply.github.com>
|
||||
@@ -217,3 +217,14 @@ Contributors
|
||||
* Peter Kaminski <kaminski@istori.com>
|
||||
* Henry Ptasinski <henry@logout.com>
|
||||
* Alexander <kharkovalexander@gmail.com>
|
||||
* Garry McNulty <garrmcnu@gmail.com>
|
||||
* Mathieu Carbou <mathieu.carbou@gmail.com>
|
||||
* Mark Otway <mark@otway.com>
|
||||
* William Cocker <37018962+WilliamCocker@users.noreply.github.com>
|
||||
* François Leurent <131.js@cloudyks.org>
|
||||
* Arkadius Stefanski <arkste@gmail.com>
|
||||
* Jay <dev@jaygoel.com>
|
||||
* andrea rota <a@xelera.eu>
|
||||
* nicolov <nicolov@users.noreply.github.com>
|
||||
* Dario Guzik <dario@guzik.com.ar>
|
||||
* qip <qip@users.noreply.github.com>
|
||||
|
||||
@@ -98,7 +98,8 @@ excess files in the bucket.
|
||||
B2 supports multiple [Application Keys for different access permission
|
||||
to B2 Buckets](https://www.backblaze.com/b2/docs/application_keys.html).
|
||||
|
||||
You can use these with rclone too.
|
||||
You can use these with rclone too; you will need to use rclone version 1.43
|
||||
or later.
|
||||
|
||||
Follow Backblaze's docs to create an Application Key with the required
|
||||
permission and add the `Application Key ID` as the `account` and the
|
||||
@@ -181,8 +182,8 @@ versions of files, leaving the current ones intact. You can also
|
||||
supply a path and only old versions under that path will be deleted,
|
||||
eg `rclone cleanup remote:bucket/path/to/stuff`.
|
||||
|
||||
Note that `cleanup` does not remove partially uploaded files
|
||||
from the bucket.
|
||||
Note that `cleanup` will remove partially uploaded files from the bucket
|
||||
if they are more than a day old.
|
||||
|
||||
When you `purge` a bucket, the current and the old versions will be
|
||||
deleted then the bucket will be deleted.
|
||||
|
||||
@@ -267,6 +267,15 @@ Options
|
||||
|
||||
Rclone has a number of options to control its behaviour.
|
||||
|
||||
Options that take parameters can have the values passed in two ways,
|
||||
`--option=value` or `--option value`. However boolean (true/false)
|
||||
options behave slightly differently to the other options in that
|
||||
`--boolean` sets the option to `true` and the absence of the flag sets
|
||||
it to `false`. It is also possible to specify `--boolean=false` or
|
||||
`--boolean=true`. Note that `--boolean false` is not valid - this is
|
||||
parsed as `--boolean` and the `false` is parsed as an extra command
|
||||
line argument for rclone.
|
||||
|
||||
Options which use TIME use the go time parser. A duration string is a
|
||||
possibly signed sequence of decimal numbers, each with optional
|
||||
fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid
|
||||
@@ -428,8 +437,8 @@ Normally the config file is in your home directory as a file called
|
||||
older version). If `$XDG_CONFIG_HOME` is set it will be at
|
||||
`$XDG_CONFIG_HOME/rclone/rclone.conf`
|
||||
|
||||
If you run `rclone -h` and look at the help for the `--config` option
|
||||
you will see where the default location is for you.
|
||||
If you run `rclone config file` you will see where the default
|
||||
location is for you.
|
||||
|
||||
Use this flag to override the config location, eg `rclone
|
||||
--config=".myconfig" .config`.
|
||||
@@ -842,8 +851,8 @@ will fall back to the default behaviour and log an error level message
|
||||
to the console. Note: Encrypted destinations are not supported
|
||||
by `--track-renames`.
|
||||
|
||||
Note that `--track-renames` uses extra memory to keep track of all
|
||||
the rename candidates.
|
||||
Note that `--track-renames` is incompatible with `--no-traverse` and
|
||||
that it uses extra memory to keep track of all the rename candidates.
|
||||
|
||||
Note also that `--track-renames` is incompatible with
|
||||
`--delete-before` and will select `--delete-after` instead of
|
||||
@@ -1132,6 +1141,24 @@ This option defaults to `false`.
|
||||
|
||||
**This should be used only for testing.**
|
||||
|
||||
### --no-traverse ###
|
||||
|
||||
The `--no-traverse` flag controls whether the destination file system
|
||||
is traversed when using the `copy` or `move` commands.
|
||||
`--no-traverse` is not compatible with `sync` and will be ignored if
|
||||
you supply it with `sync`.
|
||||
|
||||
If you are only copying a small number of files (or are filtering most
|
||||
of the files) and/or have a large number of files on the destination
|
||||
then `--no-traverse` will stop rclone listing the destination and save
|
||||
time.
|
||||
|
||||
However, if you are copying a large number of files, especially if you
|
||||
are doing a copy where lots of the files under consideration haven't
|
||||
changed and won't need copying then you shouldn't use `--no-traverse`.
|
||||
|
||||
See [rclone copy](/commands/rclone_copy/) for an example of how to use it.
|
||||
|
||||
Filtering
|
||||
---------
|
||||
|
||||
|
||||
@@ -845,9 +845,7 @@ second that each client_id can do set by Google. rclone already has a
|
||||
high quota and I will continue to make sure it is high enough by
|
||||
contacting Google.
|
||||
|
||||
However you might find you get better performance making your own
|
||||
client_id if you are a heavy user. Or you may not depending on exactly
|
||||
how Google have been raising rclone's rate limit.
|
||||
It is strongly recommended to use your own client ID as the default rclone ID is heavily used. If you have multiple services running, it is recommended to use an API key for each service. The default Google quota is 10 transactions per second so it is recommended to stay under that number as if you use more than that, it will cause rclone to rate limit and make things slower.
|
||||
|
||||
Here is how to create your own Google Drive client ID for rclone:
|
||||
|
||||
|
||||
@@ -15,8 +15,8 @@ work on all the remote storage systems.
|
||||
### Can I copy the config from one machine to another ###
|
||||
|
||||
Sure! Rclone stores all of its config in a single file. If you want
|
||||
to find this file, the simplest way is to run `rclone -h` and look at
|
||||
the help for the `--config` flag which will tell you where it is.
|
||||
to find this file, run `rclone config file` which will tell you where
|
||||
it is.
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for more info.
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
set -e
|
||||
|
||||
#when adding a tool to the list make sure to also add it's corresponding command further in the script
|
||||
unzip_tools_list=('unzip' '7z', 'busybox')
|
||||
unzip_tools_list=('unzip' '7z' 'busybox')
|
||||
|
||||
usage() { echo "Usage: curl https://rclone.org/install.sh | sudo bash [-s beta]" 1>&2; exit 1; }
|
||||
|
||||
|
||||
@@ -242,13 +242,17 @@ platforms they are common. Rclone will map these names to and from an
|
||||
identical looking unicode equivalent. For example if a file has a `?`
|
||||
in it will be mapped to `?` instead.
|
||||
|
||||
The largest allowed file size is 10GiB (10,737,418,240 bytes).
|
||||
The largest allowed file sizes are 15GB for OneDrive for Business and 35GB for OneDrive Personal (Updated 4 Jan 2019).
|
||||
|
||||
The entire path, including the file name, must contain fewer than 400 characters for OneDrive, OneDrive for Business and SharePoint Online. If you are encrypting file and folder names with rclone, you may want to pay attention to this limitation because the encrypted names are typically longer than the original ones.
|
||||
|
||||
OneDrive seems to be OK with at least 50,000 files in a folder, but at
|
||||
100,000 rclone will get errors listing the directory like `couldn’t
|
||||
list files: UnknownError:`. See
|
||||
[#2707](https://github.com/ncw/rclone/issues/2707) for more info.
|
||||
|
||||
An official document about the limitations for different types of OneDrive can be found [here](https://support.office.com/en-us/article/invalid-file-names-and-file-types-in-onedrive-onedrive-for-business-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa).
|
||||
|
||||
### Versioning issue ###
|
||||
|
||||
Every change in OneDrive causes the service to create a new version.
|
||||
@@ -260,6 +264,16 @@ The `copy` is the only rclone command affected by this as we copy
|
||||
the file and then afterwards set the modification time to match the
|
||||
source file.
|
||||
|
||||
**Note**: Starting October 2018, users will no longer be able to disable versioning by default. This is because Microsoft has brought an [update](https://techcommunity.microsoft.com/t5/Microsoft-OneDrive-Blog/New-Updates-to-OneDrive-and-SharePoint-Team-Site-Versioning/ba-p/204390) to the mechanism. To change this new default setting, a PowerShell command is required to be run by a SharePoint admin. If you are an admin, you can run these commands in PowerShell to change that setting:
|
||||
|
||||
1. `Install-Module -Name Microsoft.Online.SharePoint.PowerShell` (in case you haven't installed this already)
|
||||
1. `Import-Module Microsoft.Online.SharePoint.PowerShell -DisableNameChecking`
|
||||
1. `Connect-SPOService -Url https://YOURSITE-admin.sharepoint.com -Credential YOU@YOURSITE.COM` (replacing `YOURSITE`, `YOU`, `YOURSITE.COM` with the actual values; this will prompt for your credentials)
|
||||
1. `Set-SPOTenant -EnableMinimumVersionRequirement $False`
|
||||
1. `Disconnect-SPOService` (to disconnect from the server)
|
||||
|
||||
*Below are the steps for normal users to disable versioning. If you don't see the "No Versioning" option, make sure the above requirements are met.*
|
||||
|
||||
User [Weropol](https://github.com/Weropol) has found a method to disable
|
||||
versioning on OneDrive
|
||||
|
||||
|
||||
@@ -234,4 +234,50 @@ Number of connection retries.
|
||||
- Type: int
|
||||
- Default: 3
|
||||
|
||||
#### --qingstor-upload-cutoff
|
||||
|
||||
Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5GB.
|
||||
|
||||
- Config: upload_cutoff
|
||||
- Env Var: RCLONE_QINGSTOR_UPLOAD_CUTOFF
|
||||
- Type: SizeSuffix
|
||||
- Default: 200M
|
||||
|
||||
#### --qingstor-chunk-size
|
||||
|
||||
Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
|
||||
Note that "--qingstor-upload-concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
|
||||
If you are transferring large files over high speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_QINGSTOR_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
- Default: 4M
|
||||
|
||||
#### --qingstor-upload-concurrency
|
||||
|
||||
Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
If you are uploading small numbers of large file over high speed link
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.
|
||||
|
||||
- Config: upload_concurrency
|
||||
- Env Var: RCLONE_QINGSTOR_UPLOAD_CONCURRENCY
|
||||
- Type: int
|
||||
- Default: 4
|
||||
|
||||
<!--- autogenerated options stop -->
|
||||
|
||||
@@ -74,15 +74,14 @@ So first configure rclone on your desktop machine
|
||||
|
||||
to set up the config file.
|
||||
|
||||
Find the config file by running `rclone -h` and looking for the help for the `--config` option
|
||||
Find the config file by running `rclone config file`, for example
|
||||
|
||||
```
|
||||
$ rclone -h
|
||||
[snip]
|
||||
--config="/home/user/.rclone.conf": Config file.
|
||||
[snip]
|
||||
$ rclone config file
|
||||
Configuration file is stored at:
|
||||
/home/user/.rclone.conf
|
||||
```
|
||||
|
||||
Now transfer it to the remote box (scp, cut paste, ftp, sftp etc) and
|
||||
place it in the correct place (use `rclone -h` on the remote box to
|
||||
find out where).
|
||||
place it in the correct place (use `rclone config file` on the remote
|
||||
box to find out where).
|
||||
|
||||
@@ -10,6 +10,7 @@ date: "2016-07-11"
|
||||
The S3 backend can be used with a number of different providers:
|
||||
|
||||
* {{< provider name="AWS S3" home="https://aws.amazon.com/s3/" config="/s3/#amazon-s3" >}}
|
||||
* {{< provider name="Alibaba Cloud (Aliyun) Object Storage System (OSS)" home="https://www.alibabacloud.com/product/oss/" config="/s3/#alibaba-oss" >}}
|
||||
* {{< provider name="Ceph" home="http://ceph.com/" config="/s3/#ceph" >}}
|
||||
* {{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
|
||||
* {{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
|
||||
@@ -217,6 +218,8 @@ Choose a number from below, or type in your own value
|
||||
\ "STANDARD_IA"
|
||||
5 / One Zone Infrequent Access storage class
|
||||
\ "ONEZONE_IA"
|
||||
6 / Glacier storage class
|
||||
\ "GLACIER"
|
||||
storage_class> 1
|
||||
Remote config
|
||||
--------------------
|
||||
@@ -266,8 +269,33 @@ The modified time is stored as metadata on the object as
|
||||
### Multipart uploads ###
|
||||
|
||||
rclone supports multipart uploads with S3 which means that it can
|
||||
upload files bigger than 5GB. Note that files uploaded *both* with
|
||||
multipart upload *and* through crypt remotes do not have MD5 sums.
|
||||
upload files bigger than 5GB.
|
||||
|
||||
Note that files uploaded *both* with multipart upload *and* through
|
||||
crypt remotes do not have MD5 sums.
|
||||
|
||||
Rclone switches from single part uploads to multipart uploads at the
|
||||
point specified by `--s3-upload-cutoff`. This can be a maximum of 5GB
|
||||
and a minimum of 0 (ie always upload mulipart files).
|
||||
|
||||
The chunk sizes used in the multipart upload are specified by
|
||||
`--s3-chunk-size` and the number of chunks uploaded concurrently is
|
||||
specified by `--s3-upload-concurrency`.
|
||||
|
||||
Multipart uploads will use `--transfers` * `--s3-upload-concurrency` *
|
||||
`--s3-chunk-size` extra memory. Single part uploads to not use extra
|
||||
memory.
|
||||
|
||||
Single part transfers can be faster than multipart transfers or slower
|
||||
depending on your latency from S3 - the more latency, the more likely
|
||||
single part transfers will be faster.
|
||||
|
||||
Increasing `--s3-upload-concurrency` will increase throughput (8 would
|
||||
be a sensible value) and increasing `--s3-chunk-size` also increases
|
||||
througput (16M would be sensible). Increasing either of these will
|
||||
use more memory. The default values are high enough to gain most of
|
||||
the possible performance without using too much memory.
|
||||
|
||||
|
||||
### Buckets and Regions ###
|
||||
|
||||
@@ -361,9 +389,9 @@ A proper fix is being worked on in [issue #1824](https://github.com/ncw/rclone/i
|
||||
|
||||
### Glacier ###
|
||||
|
||||
You can transition objects to glacier storage using a [lifecycle policy](http://docs.aws.amazon.com/AmazonS3/latest/user-guide/create-lifecycle.html).
|
||||
You can upload objects using the glacier storage class or transition them to glacier using a [lifecycle policy](http://docs.aws.amazon.com/AmazonS3/latest/user-guide/create-lifecycle.html).
|
||||
The bucket can still be synced or copied into normally, but if rclone
|
||||
tries to access the data you will see an error like below.
|
||||
tries to access data from the glacier storage class you will see an error like below.
|
||||
|
||||
2017/09/11 19:07:43 Failed to sync: failed to open source object: Object in GLACIER, restore first: path/to/file
|
||||
|
||||
@@ -373,7 +401,7 @@ the object(s) in question before using rclone.
|
||||
<!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/s3/s3.go then run make backenddocs -->
|
||||
### Standard Options
|
||||
|
||||
Here are the standard options specific to s3 (Amazon S3 Compliant Storage Providers (AWS, Ceph, Dreamhost, IBM COS, Minio)).
|
||||
Here are the standard options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)).
|
||||
|
||||
#### --s3-provider
|
||||
|
||||
@@ -386,6 +414,8 @@ Choose your S3 provider.
|
||||
- Examples:
|
||||
- "AWS"
|
||||
- Amazon Web Services (AWS) S3
|
||||
- "Alibaba"
|
||||
- Alibaba Cloud Object Storage System (OSS) formerly Aliyun
|
||||
- "Ceph"
|
||||
- Ceph Object Storage
|
||||
- "DigitalOcean"
|
||||
@@ -396,6 +426,8 @@ Choose your S3 provider.
|
||||
- IBM COS S3
|
||||
- "Minio"
|
||||
- Minio Object Storage
|
||||
- "Netease"
|
||||
- Netease Object Storage (NOS)
|
||||
- "Wasabi"
|
||||
- Wasabi Object Storage
|
||||
- "Other"
|
||||
@@ -595,6 +627,54 @@ Specify if using an IBM COS On Premise.
|
||||
|
||||
#### --s3-endpoint
|
||||
|
||||
Endpoint for OSS API.
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_S3_ENDPOINT
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Examples:
|
||||
- "oss-cn-hangzhou.aliyuncs.com"
|
||||
- East China 1 (Hangzhou)
|
||||
- "oss-cn-shanghai.aliyuncs.com"
|
||||
- East China 2 (Shanghai)
|
||||
- "oss-cn-qingdao.aliyuncs.com"
|
||||
- North China 1 (Qingdao)
|
||||
- "oss-cn-beijing.aliyuncs.com"
|
||||
- North China 2 (Beijing)
|
||||
- "oss-cn-zhangjiakou.aliyuncs.com"
|
||||
- North China 3 (Zhangjiakou)
|
||||
- "oss-cn-huhehaote.aliyuncs.com"
|
||||
- North China 5 (Huhehaote)
|
||||
- "oss-cn-shenzhen.aliyuncs.com"
|
||||
- South China 1 (Shenzhen)
|
||||
- "oss-cn-hongkong.aliyuncs.com"
|
||||
- Hong Kong (Hong Kong)
|
||||
- "oss-us-west-1.aliyuncs.com"
|
||||
- US West 1 (Silicon Valley)
|
||||
- "oss-us-east-1.aliyuncs.com"
|
||||
- US East 1 (Virginia)
|
||||
- "oss-ap-southeast-1.aliyuncs.com"
|
||||
- Southeast Asia Southeast 1 (Singapore)
|
||||
- "oss-ap-southeast-2.aliyuncs.com"
|
||||
- Asia Pacific Southeast 2 (Sydney)
|
||||
- "oss-ap-southeast-3.aliyuncs.com"
|
||||
- Southeast Asia Southeast 3 (Kuala Lumpur)
|
||||
- "oss-ap-southeast-5.aliyuncs.com"
|
||||
- Asia Pacific Southeast 5 (Jakarta)
|
||||
- "oss-ap-northeast-1.aliyuncs.com"
|
||||
- Asia Pacific Northeast 1 (Japan)
|
||||
- "oss-ap-south-1.aliyuncs.com"
|
||||
- Asia Pacific South 1 (Mumbai)
|
||||
- "oss-eu-central-1.aliyuncs.com"
|
||||
- Central Europe 1 (Frankfurt)
|
||||
- "oss-eu-west-1.aliyuncs.com"
|
||||
- West Europe (London)
|
||||
- "oss-me-east-1.aliyuncs.com"
|
||||
- Middle East 1 (Dubai)
|
||||
|
||||
#### --s3-endpoint
|
||||
|
||||
Endpoint for S3 API.
|
||||
Required when using an S3 clone.
|
||||
|
||||
@@ -827,17 +907,47 @@ The storage class to use when storing new objects in S3.
|
||||
- Standard Infrequent Access storage class
|
||||
- "ONEZONE_IA"
|
||||
- One Zone Infrequent Access storage class
|
||||
- "GLACIER"
|
||||
- Glacier storage class
|
||||
|
||||
#### --s3-storage-class
|
||||
|
||||
The storage class to use when storing new objects in OSS.
|
||||
|
||||
- Config: storage_class
|
||||
- Env Var: RCLONE_S3_STORAGE_CLASS
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Examples:
|
||||
- "Standard"
|
||||
- Standard storage class
|
||||
- "Archive"
|
||||
- Archive storage mode.
|
||||
- "IA"
|
||||
- Infrequent access storage mode.
|
||||
|
||||
### Advanced Options
|
||||
|
||||
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Providers (AWS, Ceph, Dreamhost, IBM COS, Minio)).
|
||||
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)).
|
||||
|
||||
#### --s3-upload-cutoff
|
||||
|
||||
Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5GB.
|
||||
|
||||
- Config: upload_cutoff
|
||||
- Env Var: RCLONE_S3_UPLOAD_CUTOFF
|
||||
- Type: SizeSuffix
|
||||
- Default: 200M
|
||||
|
||||
#### --s3-chunk-size
|
||||
|
||||
Chunk size to use for uploading.
|
||||
|
||||
Any files larger than this will be uploaded in chunks of this
|
||||
size. The default is 5MB. The minimum is 5MB.
|
||||
When uploading files larger than upload_cutoff they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
|
||||
Note that "--s3-upload-concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
@@ -882,7 +992,7 @@ this may help to speed up the transfers.
|
||||
- Config: upload_concurrency
|
||||
- Env Var: RCLONE_S3_UPLOAD_CONCURRENCY
|
||||
- Type: int
|
||||
- Default: 2
|
||||
- Default: 4
|
||||
|
||||
#### --s3-force-path-style
|
||||
|
||||
@@ -1417,30 +1527,41 @@ server_side_encryption =
|
||||
storage_class =
|
||||
```
|
||||
|
||||
### Aliyun OSS / Netease NOS ###
|
||||
### Alibaba OSS {#alibaba-oss}
|
||||
|
||||
This describes how to set up Aliyun OSS - Netease NOS is the same
|
||||
except for different endpoints.
|
||||
Here is an example of making an [Alibaba Cloud (Aliyun) OSS](https://www.alibabacloud.com/product/oss/)
|
||||
configuration. First run:
|
||||
|
||||
Note this is a pretty standard S3 setup, except for the setting of
|
||||
`force_path_style = false` in the advanced config.
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process.
|
||||
|
||||
```
|
||||
# rclone config
|
||||
e/n/d/r/c/s/q> n
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
name> oss
|
||||
Type of storage to configure.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
3 / Amazon S3 Compliant Storage Providers (AWS, Ceph, Dreamhost, IBM COS, Minio)
|
||||
[snip]
|
||||
4 / Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)
|
||||
\ "s3"
|
||||
[snip]
|
||||
Storage> s3
|
||||
Choose your S3 provider.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
8 / Any other S3 compatible provider
|
||||
\ "Other"
|
||||
provider> other
|
||||
1 / Amazon Web Services (AWS) S3
|
||||
\ "AWS"
|
||||
2 / Alibaba Cloud Object Storage System (OSS) formerly Aliyun
|
||||
\ "Alibaba"
|
||||
3 / Ceph Object Storage
|
||||
\ "Ceph"
|
||||
[snip]
|
||||
provider> Alibaba
|
||||
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
Only applies if access_key_id and secret_access_key is blank.
|
||||
Enter a boolean value (true or false). Press Enter for the default ("false").
|
||||
@@ -1453,70 +1574,71 @@ env_auth> 1
|
||||
AWS Access Key ID.
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
access_key_id> xxxxxxxxxxxx
|
||||
access_key_id> accesskeyid
|
||||
AWS Secret Access Key (password)
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
secret_access_key> xxxxxxxxxxxxxxxxx
|
||||
Region to connect to.
|
||||
Leave blank if you are using an S3 clone and you don't have a region.
|
||||
secret_access_key> secretaccesskey
|
||||
Endpoint for OSS API.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Use this if unsure. Will use v4 signatures and an empty region.
|
||||
\ ""
|
||||
2 / Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.
|
||||
\ "other-v2-signature"
|
||||
region> 1
|
||||
Endpoint for S3 API.
|
||||
Required when using an S3 clone.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
endpoint> oss-cn-shenzhen.aliyuncs.com
|
||||
Location constraint - must be set to match the Region.
|
||||
Leave blank if not sure. Used when creating buckets only.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
location_constraint>
|
||||
Canned ACL used when creating buckets and/or storing objects in S3.
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
1 / East China 1 (Hangzhou)
|
||||
\ "oss-cn-hangzhou.aliyuncs.com"
|
||||
2 / East China 2 (Shanghai)
|
||||
\ "oss-cn-shanghai.aliyuncs.com"
|
||||
3 / North China 1 (Qingdao)
|
||||
\ "oss-cn-qingdao.aliyuncs.com"
|
||||
[snip]
|
||||
endpoint> 1
|
||||
Canned ACL used when creating buckets and storing or copying objects.
|
||||
|
||||
Note that this ACL is applied when server side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Owner gets FULL_CONTROL. No one else has access rights (default).
|
||||
\ "private"
|
||||
2 / Owner gets FULL_CONTROL. The AllUsers group gets READ access.
|
||||
\ "public-read"
|
||||
/ Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.
|
||||
[snip]
|
||||
acl> 1
|
||||
The storage class to use when storing new objects in OSS.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Default
|
||||
\ ""
|
||||
2 / Standard storage class
|
||||
\ "STANDARD"
|
||||
3 / Archive storage mode.
|
||||
\ "GLACIER"
|
||||
4 / Infrequent access storage mode.
|
||||
\ "STANDARD_IA"
|
||||
storage_class> 1
|
||||
Edit advanced config? (y/n)
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
Chunk size to use for uploading
|
||||
Enter a size with suffix k,M,G,T. Press Enter for the default ("5M").
|
||||
chunk_size>
|
||||
Don't store MD5 checksum with object metadata
|
||||
Enter a boolean value (true or false). Press Enter for the default ("false").
|
||||
disable_checksum>
|
||||
An AWS session token
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
session_token>
|
||||
Concurrency for multipart uploads.
|
||||
Enter a signed integer. Press Enter for the default ("2").
|
||||
upload_concurrency>
|
||||
If true use path style access if false use virtual hosted style.
|
||||
Some providers (eg Aliyun OSS or Netease COS) require this.
|
||||
Enter a boolean value (true or false). Press Enter for the default ("true").
|
||||
force_path_style> false
|
||||
y/n> n
|
||||
Remote config
|
||||
--------------------
|
||||
[oss]
|
||||
type = s3
|
||||
provider = Other
|
||||
provider = Alibaba
|
||||
env_auth = false
|
||||
access_key_id = xxxxxxxxx
|
||||
secret_access_key = xxxxxxxxxxxxx
|
||||
endpoint = oss-cn-shenzhen.aliyuncs.com
|
||||
access_key_id = accesskeyid
|
||||
secret_access_key = secretaccesskey
|
||||
endpoint = oss-cn-hangzhou.aliyuncs.com
|
||||
acl = private
|
||||
force_path_style = false
|
||||
storage_class = Standard
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
### Netease NOS ###
|
||||
|
||||
For Netease NOS configure as per the configurator `rclone config`
|
||||
setting the provider `Netease`. This will automatically set
|
||||
`force_path_style = false` which is necessary for it to run properly.
|
||||
|
||||
@@ -124,11 +124,15 @@ The SFTP remote supports three authentication methods:
|
||||
* Key file
|
||||
* ssh-agent
|
||||
|
||||
Key files should be unencrypted PEM-encoded private key files. For
|
||||
instance `/home/$USER/.ssh/id_rsa`.
|
||||
Key files should be PEM-encoded private key files. For instance `/home/$USER/.ssh/id_rsa`.
|
||||
Only unencrypted OpenSSH or PEM encrypted files are supported.
|
||||
|
||||
If you don't specify `pass` or `key_file` then rclone will attempt to
|
||||
contact an ssh-agent.
|
||||
If you don't specify `pass` or `key_file` then rclone will attempt to contact an ssh-agent.
|
||||
|
||||
You can also specify `key_use_agent` to force the usage of an ssh-agent. In this case
|
||||
`key_file` can also be specified to force the usage of a specific key in the ssh-agent.
|
||||
|
||||
Using an ssh-agent is the only way to load encrypted OpenSSH keys at the moment.
|
||||
|
||||
If you set the `--sftp-ask-password` option, rclone will prompt for a
|
||||
password when needed and no password has been configured.
|
||||
@@ -204,13 +208,38 @@ SSH password, leave blank to use ssh-agent.
|
||||
|
||||
#### --sftp-key-file
|
||||
|
||||
Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
|
||||
- Config: key_file
|
||||
- Env Var: RCLONE_SFTP_KEY_FILE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --sftp-key-file-pass
|
||||
|
||||
The passphrase to decrypt the PEM-encoded private key file.
|
||||
|
||||
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
||||
in the new OpenSSH format can't be used.
|
||||
|
||||
- Config: key_file_pass
|
||||
- Env Var: RCLONE_SFTP_KEY_FILE_PASS
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --sftp-key-use-agent
|
||||
|
||||
When set forces the usage of the ssh-agent.
|
||||
|
||||
When key-file is also set, the ".pub" file of the specified key-file is read and only the associated key is
|
||||
requested from the ssh-agent. This allows to avoid `Too many authentication failures for *username*` errors
|
||||
when the ssh-agent contains many keys.
|
||||
|
||||
- Config: key_use_agent
|
||||
- Env Var: RCLONE_SFTP_KEY_USE_AGENT
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --sftp-use-insecure-cipher
|
||||
|
||||
Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
|
||||
@@ -253,7 +253,7 @@ pass = encryptedpassword
|
||||
|
||||
### dCache ###
|
||||
|
||||
dCache is a storage system with WebDAV doors that support, beside basic and x509,
|
||||
[dCache](https://www.dcache.org/) is a storage system with WebDAV doors that support, beside basic and x509,
|
||||
authentication with [Macaroons](https://www.dcache.org/manuals/workshop-2017-05-29-Umea/000-Final/anupam_macaroons_v02.pdf) (bearer tokens).
|
||||
|
||||
Configure as normal using the `other` type. Don't enter a username or
|
||||
@@ -271,5 +271,5 @@ pass =
|
||||
bearer_token = your-macaroon
|
||||
```
|
||||
|
||||
There is a [script](https://github.com/onnozweers/dcache-scripts/blob/master/get-share-link) that
|
||||
There is a [script](https://github.com/sara-nl/GridScripts/blob/master/get-macaroon) that
|
||||
obtains a Macaroon from a dCache WebDAV endpoint, and creates an rclone config file.
|
||||
|
||||
@@ -127,6 +127,19 @@ does not take any path arguments.
|
||||
To view your current quota you can use the `rclone about remote:`
|
||||
command which will display your usage limit (quota) and the current usage.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
When uploading very large files (bigger than about 5GB) you will need
|
||||
to increase the `--timeout` parameter. This is because Yandex pauses
|
||||
(perhaps to calculate the MD5SUM for the entire file) before returning
|
||||
confirmation that the file has been uploaded. The default handling of
|
||||
timeouts in rclone is to assume a 5 minute pause is an error and close
|
||||
the connection - you'll see `net/http: timeout awaiting response
|
||||
headers` errors in the logs if this is happening. Setting the timeout
|
||||
to twice the max size of file in GB should be enough, so if you want
|
||||
to upload a 30GB file set a timeout of `2 * 30 = 60m`, that is
|
||||
`--timeout 60m`.
|
||||
|
||||
<!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/yandex/yandex.go then run make backenddocs -->
|
||||
### Standard Options
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/asyncreader"
|
||||
@@ -243,6 +244,24 @@ func (acc *Account) eta() (etaDuration time.Duration, ok bool) {
|
||||
return eta(acc.bytes, acc.size, acc.avg)
|
||||
}
|
||||
|
||||
// shortenName shortens in to size runes long
|
||||
// If size <= 0 then in is left untouched
|
||||
func shortenName(in string, size int) string {
|
||||
if size <= 0 {
|
||||
return in
|
||||
}
|
||||
if utf8.RuneCountInString(in) <= size {
|
||||
return in
|
||||
}
|
||||
name := []rune(in)
|
||||
size-- // don't count elipsis rune
|
||||
suffixLength := size / 2
|
||||
prefixLength := size - suffixLength
|
||||
suffixStart := len(name) - suffixLength
|
||||
name = append(append(name[:prefixLength], '…'), name[suffixStart:]...)
|
||||
return string(name)
|
||||
}
|
||||
|
||||
// String produces stats for this file
|
||||
func (acc *Account) String() string {
|
||||
a, b := acc.progress()
|
||||
@@ -257,16 +276,6 @@ func (acc *Account) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
name := []rune(acc.name)
|
||||
if fs.Config.StatsFileNameLength > 0 {
|
||||
if len(name) > fs.Config.StatsFileNameLength {
|
||||
suffixLength := fs.Config.StatsFileNameLength / 2
|
||||
prefixLength := fs.Config.StatsFileNameLength - suffixLength
|
||||
suffixStart := len(name) - suffixLength
|
||||
name = append(append(name[:prefixLength], '…'), name[suffixStart:]...)
|
||||
}
|
||||
}
|
||||
|
||||
if fs.Config.DataRateUnit == "bits" {
|
||||
cur = cur * 8
|
||||
}
|
||||
@@ -276,11 +285,11 @@ func (acc *Account) String() string {
|
||||
percentageDone = int(100 * float64(a) / float64(b))
|
||||
}
|
||||
|
||||
done := fmt.Sprintf("%2d%% /%s", percentageDone, fs.SizeSuffix(b))
|
||||
|
||||
return fmt.Sprintf("%45s: %s, %s/s, %s",
|
||||
string(name),
|
||||
done,
|
||||
return fmt.Sprintf("%*s:%3d%% /%s, %s/s, %s",
|
||||
fs.Config.StatsFileNameLength,
|
||||
shortenName(acc.name, fs.Config.StatsFileNameLength),
|
||||
percentageDone,
|
||||
fs.SizeSuffix(b),
|
||||
fs.SizeSuffix(cur),
|
||||
etas,
|
||||
)
|
||||
|
||||
@@ -2,10 +2,12 @@ package accounting
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/asyncreader"
|
||||
@@ -208,3 +210,46 @@ func TestAccountMaxTransfer(t *testing.T) {
|
||||
assert.Equal(t, ErrorMaxTransferLimitReached, err)
|
||||
assert.True(t, fserrors.IsFatalError(err))
|
||||
}
|
||||
|
||||
func TestShortenName(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
size int
|
||||
want string
|
||||
}{
|
||||
{"", 0, ""},
|
||||
{"abcde", 10, "abcde"},
|
||||
{"abcde", 0, "abcde"},
|
||||
{"abcde", -1, "abcde"},
|
||||
{"abcde", 5, "abcde"},
|
||||
{"abcde", 4, "ab…e"},
|
||||
{"abcde", 3, "a…e"},
|
||||
{"abcde", 2, "a…"},
|
||||
{"abcde", 1, "…"},
|
||||
{"abcdef", 6, "abcdef"},
|
||||
{"abcdef", 5, "ab…ef"},
|
||||
{"abcdef", 4, "ab…f"},
|
||||
{"abcdef", 3, "a…f"},
|
||||
{"abcdef", 2, "a…"},
|
||||
{"áßcdèf", 1, "…"},
|
||||
{"áßcdè", 5, "áßcdè"},
|
||||
{"áßcdè", 4, "áß…è"},
|
||||
{"áßcdè", 3, "á…è"},
|
||||
{"áßcdè", 2, "á…"},
|
||||
{"áßcdè", 1, "…"},
|
||||
{"áßcdèł", 6, "áßcdèł"},
|
||||
{"áßcdèł", 5, "áß…èł"},
|
||||
{"áßcdèł", 4, "áß…ł"},
|
||||
{"áßcdèł", 3, "á…ł"},
|
||||
{"áßcdèł", 2, "á…"},
|
||||
{"áßcdèł", 1, "…"},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("in=%q, size=%d", test.in, test.size), func(t *testing.T) {
|
||||
got := shortenName(test.in, test.size)
|
||||
assert.Equal(t, test.want, got)
|
||||
if test.size > 0 {
|
||||
assert.True(t, utf8.RuneCountInString(got) <= test.size, "too big")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,8 +92,8 @@ type StatsInfo struct {
|
||||
// NewStats cretates an initialised StatsInfo
|
||||
func NewStats() *StatsInfo {
|
||||
return &StatsInfo{
|
||||
checking: newStringSet(fs.Config.Checkers),
|
||||
transferring: newStringSet(fs.Config.Transfers),
|
||||
checking: newStringSet(fs.Config.Checkers, "checking"),
|
||||
transferring: newStringSet(fs.Config.Transfers, "transferring"),
|
||||
start: time.Now(),
|
||||
inProgress: newInProgress(),
|
||||
}
|
||||
@@ -320,6 +320,13 @@ func (s *StatsInfo) GetLastError() error {
|
||||
return s.lastError
|
||||
}
|
||||
|
||||
// GetChecks returns the number of checks
|
||||
func (s *StatsInfo) GetChecks() int64 {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.checks
|
||||
}
|
||||
|
||||
// FatalError sets the fatalError flag
|
||||
func (s *StatsInfo) FatalError() {
|
||||
s.mu.Lock()
|
||||
|
||||
@@ -1,21 +1,26 @@
|
||||
package accounting
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// stringSet holds a set of strings
|
||||
type stringSet struct {
|
||||
mu sync.RWMutex
|
||||
items map[string]struct{}
|
||||
name string
|
||||
}
|
||||
|
||||
// newStringSet creates a new empty string set of capacity size
|
||||
func newStringSet(size int) *stringSet {
|
||||
func newStringSet(size int, name string) *stringSet {
|
||||
return &stringSet{
|
||||
items: make(map[string]struct{}, size),
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,7 +62,11 @@ func (ss *stringSet) Strings() []string {
|
||||
if acc := Stats.inProgress.get(name); acc != nil {
|
||||
out = acc.String()
|
||||
} else {
|
||||
out = name
|
||||
out = fmt.Sprintf("%*s: %s",
|
||||
fs.Config.StatsFileNameLength,
|
||||
shortenName(name, fs.Config.StatsFileNameLength),
|
||||
ss.name,
|
||||
)
|
||||
}
|
||||
strings = append(strings, " * "+out)
|
||||
}
|
||||
|
||||
@@ -62,6 +62,7 @@ type ConfigInfo struct {
|
||||
MaxDepth int
|
||||
IgnoreSize bool
|
||||
IgnoreChecksum bool
|
||||
NoTraverse bool
|
||||
NoUpdateModTime bool
|
||||
DataRateUnit string
|
||||
BackupDir string
|
||||
@@ -84,6 +85,7 @@ type ConfigInfo struct {
|
||||
MaxBacklog int
|
||||
StatsOneLine bool
|
||||
Progress bool
|
||||
Cookie bool
|
||||
}
|
||||
|
||||
// NewConfig creates a new config with everything set to the default
|
||||
@@ -108,7 +110,7 @@ func NewConfig() *ConfigInfo {
|
||||
c.BufferSize = SizeSuffix(16 << 20)
|
||||
c.UserAgent = "rclone/" + Version
|
||||
c.StreamingUploadCutoff = SizeSuffix(100 * 1024)
|
||||
c.StatsFileNameLength = 40
|
||||
c.StatsFileNameLength = 45
|
||||
c.AskPassword = true
|
||||
c.TPSLimitBurst = 1
|
||||
c.MaxTransfer = -1
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/Unknwon/goconfig"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/driveletter"
|
||||
@@ -57,8 +58,8 @@ const (
|
||||
// ConfigTokenURL is the config key used to store the token server endpoint
|
||||
ConfigTokenURL = "token_url"
|
||||
|
||||
// ConfigAutomatic indicates that we want non-interactive configuration
|
||||
ConfigAutomatic = "config_automatic"
|
||||
// ConfigAuthorize indicates that we just want "rclone authorize"
|
||||
ConfigAuthorize = "config_authorize"
|
||||
)
|
||||
|
||||
// Global
|
||||
@@ -575,6 +576,17 @@ func SetValueAndSave(name, key, value string) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileGetFresh reads the config key under section return the value or
|
||||
// an error if the config file was not found or that value couldn't be
|
||||
// read.
|
||||
func FileGetFresh(section, key string) (value string, err error) {
|
||||
reloadedConfigFile, err := loadConfigFile()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return reloadedConfigFile.GetValue(section, key)
|
||||
}
|
||||
|
||||
// ShowRemotes shows an overview of the config file
|
||||
func ShowRemotes() {
|
||||
remotes := getConfigData().GetSectionList()
|
||||
@@ -628,21 +640,38 @@ func Command(commands []string) byte {
|
||||
}
|
||||
}
|
||||
|
||||
// ConfirmWithDefault asks the user for Yes or No and returns true or false.
|
||||
//
|
||||
// If AutoConfirm is set, it will return the Default value passed in
|
||||
func ConfirmWithDefault(Default bool) bool {
|
||||
if fs.Config.AutoConfirm {
|
||||
return Default
|
||||
}
|
||||
return Command([]string{"yYes", "nNo"}) == 'y'
|
||||
}
|
||||
|
||||
// Confirm asks the user for Yes or No and returns true or false
|
||||
//
|
||||
// If AutoConfirm is set, it will return true
|
||||
func Confirm() bool {
|
||||
return ConfirmWithDefault(true)
|
||||
return Command([]string{"yYes", "nNo"}) == 'y'
|
||||
}
|
||||
|
||||
// ConfirmWithConfig asks the user for Yes or No and returns true or
|
||||
// false.
|
||||
//
|
||||
// If AutoConfirm is set, it will look up the value in m and return
|
||||
// that, but if it isn't set then it will return the Default value
|
||||
// passed in
|
||||
func ConfirmWithConfig(m configmap.Getter, configName string, Default bool) bool {
|
||||
if fs.Config.AutoConfirm {
|
||||
configString, ok := m.Get(configName)
|
||||
if ok {
|
||||
configValue, err := strconv.ParseBool(configString)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to parse config parameter %s=%q as boolean - using default %v: %v", configName, configString, Default, err)
|
||||
} else {
|
||||
Default = configValue
|
||||
}
|
||||
}
|
||||
answer := "No"
|
||||
if Default {
|
||||
answer = "Yes"
|
||||
}
|
||||
fmt.Printf("Auto confirm is set: answering %s, override by setting config parameter %s=%v\n", answer, configName, !Default)
|
||||
return Default
|
||||
}
|
||||
return Confirm()
|
||||
}
|
||||
|
||||
// Choose one of the defaults or type a new string if newOk is set
|
||||
@@ -932,8 +961,6 @@ func CreateRemote(name string, provider string, keyValues rc.Params) error {
|
||||
getConfigData().DeleteSection(name)
|
||||
// Set the type
|
||||
getConfigData().SetValue(name, "type", provider)
|
||||
// Show this is automatically configured
|
||||
getConfigData().SetValue(name, ConfigAutomatic, "yes")
|
||||
// Set the remaining values
|
||||
return UpdateRemote(name, keyValues)
|
||||
}
|
||||
@@ -1216,6 +1243,7 @@ func SetPassword() {
|
||||
// rclone authorize "fs name"
|
||||
// rclone authorize "fs name" "client id" "client secret"
|
||||
func Authorize(args []string) {
|
||||
defer suppressConfirm()()
|
||||
switch len(args) {
|
||||
case 1, 3:
|
||||
default:
|
||||
@@ -1232,8 +1260,8 @@ func Authorize(args []string) {
|
||||
// Make sure we delete it
|
||||
defer DeleteRemote(name)
|
||||
|
||||
// Indicate that we want fully automatic configuration.
|
||||
getConfigData().SetValue(name, ConfigAutomatic, "yes")
|
||||
// Indicate that we are running rclone authorize
|
||||
getConfigData().SetValue(name, ConfigAuthorize, "true")
|
||||
if len(args) == 3 {
|
||||
getConfigData().SetValue(name, ConfigClientID, args[1])
|
||||
getConfigData().SetValue(name, ConfigClientSecret, args[2])
|
||||
|
||||
@@ -27,7 +27,6 @@ var (
|
||||
deleteAfter bool
|
||||
bindAddr string
|
||||
disableFeatures string
|
||||
noTraverse bool
|
||||
)
|
||||
|
||||
// AddFlags adds the non filing system specific flags to the command
|
||||
@@ -41,7 +40,7 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.IntVarP(flagSet, &fs.Config.Transfers, "transfers", "", fs.Config.Transfers, "Number of file transfers to run in parallel.")
|
||||
flags.StringVarP(flagSet, &config.ConfigPath, "config", "", config.ConfigPath, "Config file.")
|
||||
flags.StringVarP(flagSet, &config.CacheDir, "cache-dir", "", config.CacheDir, "Directory rclone will use for caching.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.CheckSum, "checksum", "c", fs.Config.CheckSum, "Skip based on checksum & size, not mod-time & size")
|
||||
flags.BoolVarP(flagSet, &fs.Config.CheckSum, "checksum", "c", fs.Config.CheckSum, "Skip based on checksum (if available) & size, not mod-time & size")
|
||||
flags.BoolVarP(flagSet, &fs.Config.SizeOnly, "size-only", "", fs.Config.SizeOnly, "Skip based on size only, not mod-time or checksum")
|
||||
flags.BoolVarP(flagSet, &fs.Config.IgnoreTimes, "ignore-times", "I", fs.Config.IgnoreTimes, "Don't skip files that match size and time - transfer all files")
|
||||
flags.BoolVarP(flagSet, &fs.Config.IgnoreExisting, "ignore-existing", "", fs.Config.IgnoreExisting, "Skip all files that exist on destination")
|
||||
@@ -65,7 +64,7 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.IntVarP(flagSet, &fs.Config.MaxDepth, "max-depth", "", fs.Config.MaxDepth, "If set limits the recursion depth to this.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.IgnoreSize, "ignore-size", "", false, "Ignore size when skipping use mod-time or checksum.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.IgnoreChecksum, "ignore-checksum", "", fs.Config.IgnoreChecksum, "Skip post copy check of checksums.")
|
||||
flags.BoolVarP(flagSet, &noTraverse, "no-traverse", "", noTraverse, "Obsolete - does nothing.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.NoTraverse, "no-traverse", "", fs.Config.NoTraverse, "Don't traverse destination file system on copy.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.NoUpdateModTime, "no-update-modtime", "", fs.Config.NoUpdateModTime, "Don't update destination mod-time if files identical.")
|
||||
flags.StringVarP(flagSet, &fs.Config.BackupDir, "backup-dir", "", fs.Config.BackupDir, "Make backups into hierarchy based in DIR.")
|
||||
flags.StringVarP(flagSet, &fs.Config.Suffix, "suffix", "", fs.Config.Suffix, "Suffix for use with --backup-dir.")
|
||||
@@ -88,6 +87,7 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.IntVarP(flagSet, &fs.Config.MaxBacklog, "max-backlog", "", fs.Config.MaxBacklog, "Maximum number of objects in sync or check backlog.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.StatsOneLine, "stats-one-line", "", fs.Config.StatsOneLine, "Make the stats fit on one line.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.Progress, "progress", "P", fs.Config.Progress, "Show progress during transfer.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.Cookie, "use-cookies", "", fs.Config.Cookie, "Enable session cookiejar.")
|
||||
}
|
||||
|
||||
// SetFlags converts any flags into config which weren't straight foward
|
||||
@@ -113,10 +113,6 @@ func SetFlags() {
|
||||
}
|
||||
}
|
||||
|
||||
if noTraverse {
|
||||
fs.Logf(nil, "--no-traverse is obsolete and no longer needed - please remove")
|
||||
}
|
||||
|
||||
if dumpHeaders {
|
||||
fs.Config.Dump |= fs.DumpHeaders
|
||||
fs.Logf(nil, "--dump-headers is obsolete - please use --dump headers instead")
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// Active is the globally active filter
|
||||
@@ -511,17 +512,33 @@ func (f *Filter) MakeListR(NewObject func(remote string) (fs.Object, error)) fs.
|
||||
if !f.HaveFilesFrom() {
|
||||
return errFilesFromNotSet
|
||||
}
|
||||
var entries fs.DirEntries
|
||||
for remote := range f.files {
|
||||
entry, err := NewObject(remote)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// Skip files that are not found
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
var (
|
||||
remotes = make(chan string, fs.Config.Checkers)
|
||||
g errgroup.Group
|
||||
)
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
g.Go(func() (err error) {
|
||||
var entries = make(fs.DirEntries, 1)
|
||||
for remote := range remotes {
|
||||
entries[0], err = NewObject(remote)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// Skip files that are not found
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
err = callback(entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return callback(entries)
|
||||
for remote := range f.files {
|
||||
remotes <- remote
|
||||
}
|
||||
close(remotes)
|
||||
return g.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -220,7 +221,10 @@ func TestNewFilterMakeListR(t *testing.T) {
|
||||
|
||||
// NewObject function for MakeListR
|
||||
newObjects := FilesMap{}
|
||||
var newObjectMu sync.Mutex
|
||||
NewObject := func(remote string) (fs.Object, error) {
|
||||
newObjectMu.Lock()
|
||||
defer newObjectMu.Unlock()
|
||||
if remote == "notfound" {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
} else if remote == "error" {
|
||||
@@ -233,7 +237,10 @@ func TestNewFilterMakeListR(t *testing.T) {
|
||||
|
||||
// Callback for ListRFn
|
||||
listRObjects := FilesMap{}
|
||||
var callbackMu sync.Mutex
|
||||
listRcallback := func(entries fs.DirEntries) error {
|
||||
callbackMu.Lock()
|
||||
defer callbackMu.Unlock()
|
||||
for _, entry := range entries {
|
||||
listRObjects[entry.Remote()] = struct{}{}
|
||||
}
|
||||
|
||||
@@ -7,12 +7,14 @@ import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
"net/http/httputil"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"golang.org/x/net/publicsuffix"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
@@ -22,9 +24,10 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
transport http.RoundTripper
|
||||
noTransport sync.Once
|
||||
tpsBucket *rate.Limiter // for limiting number of http transactions per second
|
||||
transport http.RoundTripper
|
||||
noTransport sync.Once
|
||||
tpsBucket *rate.Limiter // for limiting number of http transactions per second
|
||||
cookieJar, _ = cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
|
||||
)
|
||||
|
||||
// StartHTTPTokenBucket starts the token bucket if necessary
|
||||
@@ -142,9 +145,13 @@ func NewTransport(ci *fs.ConfigInfo) http.RoundTripper {
|
||||
|
||||
// NewClient returns an http.Client with the correct timeouts
|
||||
func NewClient(ci *fs.ConfigInfo) *http.Client {
|
||||
return &http.Client{
|
||||
transport := &http.Client{
|
||||
Transport: NewTransport(ci),
|
||||
}
|
||||
if ci.Cookie {
|
||||
transport.Jar = cookieJar
|
||||
}
|
||||
return transport
|
||||
}
|
||||
|
||||
// Transport is a our http Transport which wraps an http.Transport
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user