mirror of
https://github.com/rclone/rclone.git
synced 2025-12-28 22:23:41 +00:00
Compare commits
78 Commits
experiment
...
crypt-pass
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2769321555 | ||
|
|
c496efe9a4 | ||
|
|
cf583e0237 | ||
|
|
f09d0f5fef | ||
|
|
1e6cbaa355 | ||
|
|
be643ecfbc | ||
|
|
0c4ed35b9b | ||
|
|
4e4feebf0a | ||
|
|
291f270904 | ||
|
|
f799be1d6a | ||
|
|
74297a0c55 | ||
|
|
7e13103ba2 | ||
|
|
34baf05d9d | ||
|
|
38c0018906 | ||
|
|
6f25e48cbb | ||
|
|
7e99abb5da | ||
|
|
629019c3e4 | ||
|
|
1402fcb234 | ||
|
|
b26276b416 | ||
|
|
e317f04098 | ||
|
|
65ff330602 | ||
|
|
52763e1918 | ||
|
|
23e06cedbd | ||
|
|
b369fcde28 | ||
|
|
c294068780 | ||
|
|
8a774a3dd4 | ||
|
|
53a8b5a275 | ||
|
|
bbd03f49a4 | ||
|
|
e31578e03c | ||
|
|
0855608bc1 | ||
|
|
f8dbf8292a | ||
|
|
144daec800 | ||
|
|
6a832b7173 | ||
|
|
184a9c8da6 | ||
|
|
88592a1779 | ||
|
|
92fa30a787 | ||
|
|
e4dfe78ef0 | ||
|
|
ba84eecd94 | ||
|
|
ea12d76c03 | ||
|
|
5f0a8a4e28 | ||
|
|
2fc095cd3e | ||
|
|
a2341cc412 | ||
|
|
9685be64cd | ||
|
|
39f5059d48 | ||
|
|
a30e80564d | ||
|
|
8e107b9657 | ||
|
|
21a0693b79 | ||
|
|
4846d9393d | ||
|
|
fc4f20d52f | ||
|
|
60558b5d37 | ||
|
|
5990573ccd | ||
|
|
bd11d3cb62 | ||
|
|
5e5578d2c3 | ||
|
|
1318c6aec8 | ||
|
|
f29757de3b | ||
|
|
f397c35935 | ||
|
|
f365230aea | ||
|
|
ff0b8e10af | ||
|
|
8d16a5693c | ||
|
|
781142a73f | ||
|
|
f471a7e3f5 | ||
|
|
d7a1fd2a6b | ||
|
|
7782eda88e | ||
|
|
d08453d402 | ||
|
|
71e98ea584 | ||
|
|
42d997f639 | ||
|
|
571b4c060b | ||
|
|
ff72059a94 | ||
|
|
2e6ef4f6ec | ||
|
|
0ec6dd9f4b | ||
|
|
0b7fdf16a2 | ||
|
|
5edfd31a6d | ||
|
|
7ee7bc87ae | ||
|
|
1433558c01 | ||
|
|
0458b961c5 | ||
|
|
c1998c4efe | ||
|
|
49da220b65 | ||
|
|
554ee0d963 |
@@ -1,3 +1,4 @@
|
||||
---
|
||||
version: 2
|
||||
|
||||
jobs:
|
||||
@@ -13,10 +14,10 @@ jobs:
|
||||
- run:
|
||||
name: Cross-compile rclone
|
||||
command: |
|
||||
docker pull billziss/xgo-cgofuse
|
||||
docker pull rclone/xgo-cgofuse
|
||||
go get -v github.com/karalabe/xgo
|
||||
xgo \
|
||||
--image=billziss/xgo-cgofuse \
|
||||
--image=rclone/xgo-cgofuse \
|
||||
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-tags cmount \
|
||||
.
|
||||
@@ -29,6 +30,21 @@ jobs:
|
||||
command: |
|
||||
mkdir -p /tmp/rclone.dist
|
||||
cp -R rclone-* /tmp/rclone.dist
|
||||
mkdir build
|
||||
cp -R rclone-* build/
|
||||
|
||||
- run:
|
||||
name: Build rclone
|
||||
command: |
|
||||
go version
|
||||
go build
|
||||
|
||||
- run:
|
||||
name: Upload artifacts
|
||||
command: |
|
||||
if [[ $CIRCLE_PULL_REQUEST != "" ]]; then
|
||||
make circleci_upload
|
||||
fi
|
||||
|
||||
- store_artifacts:
|
||||
path: /tmp/rclone.dist
|
||||
|
||||
@@ -351,6 +351,12 @@ Unit tests
|
||||
Integration tests
|
||||
|
||||
* Add your backend to `fstest/test_all/config.yaml`
|
||||
* Once you've done that then you can use the integration test framework from the project root:
|
||||
* go install ./...
|
||||
* test_all -backend remote
|
||||
|
||||
Or if you want to run the integration tests manually:
|
||||
|
||||
* Make sure integration tests pass with
|
||||
* `cd fs/operations`
|
||||
* `go test -v -remote TestRemote:`
|
||||
@@ -372,4 +378,3 @@ Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](
|
||||
* `docs/content/about.md` - front page of rclone.org
|
||||
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||
* `cmd/cmd.go` - the main help for rclone
|
||||
|
||||
9
Makefile
9
Makefile
@@ -67,7 +67,7 @@ ifdef FULL_TESTS
|
||||
go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
|
||||
errcheck $(BUILDTAGS) ./...
|
||||
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
|
||||
go list ./... | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
|
||||
go list ./... | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl|ApplicationCredentialId)' ; test $$? -eq 1
|
||||
else
|
||||
@echo Skipping source quality tests as version of go too old
|
||||
endif
|
||||
@@ -185,6 +185,13 @@ ifndef BRANCH_PATH
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
circleci_upload:
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||
ifndef BRANCH_PATH
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||
|
||||
BUILD_FLAGS := -exclude "^(windows|darwin)/"
|
||||
ifeq ($(TRAVIS_OS_NAME),osx)
|
||||
BUILD_FLAGS := -include "^darwin/" -cgo
|
||||
|
||||
@@ -20,6 +20,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
|
||||
## Storage providers
|
||||
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
@@ -50,6 +51,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* put.io [:page_facing_up:](https://rclone.org/webdav/#put-io)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
|
||||
@@ -754,6 +754,35 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
||||
return fs, fs.Update(in, src, options...)
|
||||
}
|
||||
|
||||
// Check if the container exists
|
||||
//
|
||||
// NB this can return incorrect results if called immediately after container deletion
|
||||
func (f *Fs) dirExists() (bool, error) {
|
||||
options := azblob.ListBlobsSegmentOptions{
|
||||
Details: azblob.BlobListingDetails{
|
||||
Copy: false,
|
||||
Metadata: false,
|
||||
Snapshots: false,
|
||||
UncommittedBlobs: false,
|
||||
Deleted: false,
|
||||
},
|
||||
MaxResults: 1,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
ctx := context.Background()
|
||||
_, err := f.cntURL.ListBlobsHierarchySegment(ctx, azblob.Marker{}, "", options)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
|
||||
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
f.containerOKMu.Lock()
|
||||
@@ -761,6 +790,15 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
if f.containerOK {
|
||||
return nil
|
||||
}
|
||||
if !f.containerDeleted {
|
||||
exists, err := f.dirExists()
|
||||
if err == nil {
|
||||
f.containerOK = exists
|
||||
}
|
||||
if err != nil || exists {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// now try to create the container
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
|
||||
@@ -120,20 +120,26 @@ these chunks are buffered in memory and there might a maximum of
|
||||
minimim size.`,
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Disable checksums for large (> upload cutoff) files`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
TestMode string `config:"test_mode"`
|
||||
Versions bool `config:"versions"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
TestMode string `config:"test_mode"`
|
||||
Versions bool `config:"versions"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableCheckSum bool `config:"disable_checksum"`
|
||||
}
|
||||
|
||||
// Fs represents a remote b2 server
|
||||
@@ -1500,11 +1506,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
},
|
||||
ContentLength: &size,
|
||||
}
|
||||
// for go1.8 (see release notes) we must nil the Body if we want a
|
||||
// "Content-Length: 0" header which b2 requires for all files.
|
||||
if size == 0 {
|
||||
opts.Body = nil
|
||||
}
|
||||
var response api.FileInfo
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
|
||||
@@ -116,8 +116,10 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
|
||||
},
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
request.Info[sha1Key] = calculatedSha1
|
||||
if !o.fs.opt.DisableCheckSum {
|
||||
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
request.Info[sha1Key] = calculatedSha1
|
||||
}
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
|
||||
@@ -144,6 +144,7 @@ type cipher struct {
|
||||
buffers sync.Pool // encrypt/decrypt buffers
|
||||
cryptoRand io.Reader // read crypto random numbers from here
|
||||
dirNameEncrypt bool
|
||||
passCorrupted bool
|
||||
}
|
||||
|
||||
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
||||
@@ -163,6 +164,11 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Set to pass corrupted blocks
|
||||
func (c *cipher) setPassCorrupted(passCorrupted bool) {
|
||||
c.passCorrupted = passCorrupted
|
||||
}
|
||||
|
||||
// Key creates all the internal keys from the password passed in using
|
||||
// scrypt.
|
||||
//
|
||||
@@ -822,7 +828,10 @@ func (fh *decrypter) fillBuffer() (err error) {
|
||||
if err != nil {
|
||||
return err // return pending error as it is likely more accurate
|
||||
}
|
||||
return ErrorEncryptedBadBlock
|
||||
if !fh.c.passCorrupted {
|
||||
return ErrorEncryptedBadBlock
|
||||
}
|
||||
fs.Errorf(nil, "passing corrupted block")
|
||||
}
|
||||
fh.bufIndex = 0
|
||||
fh.bufSize = n - blockHeaderSize
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Globals
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -80,6 +79,15 @@ names, or for debugging purposes.`,
|
||||
Default: false,
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "pass_corrupted_blocks",
|
||||
Help: `Pass through corrupted blocks to the output.
|
||||
|
||||
This is for debugging corruption problems in crypt - it shouldn't be needed normally.
|
||||
`,
|
||||
Default: false,
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -108,6 +116,7 @@ func newCipherForConfig(opt *Options) (Cipher, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make cipher")
|
||||
}
|
||||
cipher.setPassCorrupted(opt.PassCorruptedBlocks)
|
||||
return cipher, nil
|
||||
}
|
||||
|
||||
@@ -197,6 +206,7 @@ type Options struct {
|
||||
Password string `config:"password"`
|
||||
Password2 string `config:"password2"`
|
||||
ShowMapping bool `config:"show_mapping"`
|
||||
PassCorruptedBlocks bool `config:"pass_corrupted_blocks"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
|
||||
@@ -39,6 +39,7 @@ import (
|
||||
"github.com/ncw/rclone/lib/dircache"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
@@ -54,7 +55,8 @@ const (
|
||||
driveFolderType = "application/vnd.google-apps.folder"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
minSleep = 10 * time.Millisecond
|
||||
defaultMinSleep = fs.Duration(100 * time.Millisecond)
|
||||
defaultBurst = 100
|
||||
defaultExportExtensions = "docx,xlsx,pptx,svg"
|
||||
scopePrefix = "https://www.googleapis.com/auth/"
|
||||
defaultScope = "drive"
|
||||
@@ -356,6 +358,16 @@ will download it anyway.`,
|
||||
Default: fs.SizeSuffix(-1),
|
||||
Help: "If Object's are greater, use drive v2 API to download.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "pacer_min_sleep",
|
||||
Default: defaultMinSleep,
|
||||
Help: "Minimum time to sleep between API calls.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "pacer_burst",
|
||||
Default: defaultBurst,
|
||||
Help: "Number of API calls to allow without sleeping.",
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
|
||||
@@ -398,6 +410,8 @@ type Options struct {
|
||||
AcknowledgeAbuse bool `config:"acknowledge_abuse"`
|
||||
KeepRevisionForever bool `config:"keep_revision_forever"`
|
||||
V2DownloadMinSize fs.SizeSuffix `config:"v2_download_min_size"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
PacerBurst int `config:"pacer_burst"`
|
||||
}
|
||||
|
||||
// Fs represents a remote drive server
|
||||
@@ -718,12 +732,16 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
|
||||
|
||||
// Figure out if the user wants to use a team drive
|
||||
func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
|
||||
// Stop if we are running non-interactive config
|
||||
if fs.Config.AutoConfirm {
|
||||
return nil
|
||||
}
|
||||
if opt.TeamDriveID == "" {
|
||||
fmt.Printf("Configure this as a team drive?\n")
|
||||
} else {
|
||||
fmt.Printf("Change current team drive ID %q?\n", opt.TeamDriveID)
|
||||
}
|
||||
if !config.ConfirmWithDefault(false) {
|
||||
if !config.Confirm() {
|
||||
return nil
|
||||
}
|
||||
client, err := createOAuthClient(opt, name, m)
|
||||
@@ -740,7 +758,7 @@ func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
|
||||
listFailed := false
|
||||
for {
|
||||
var teamDrives *drive.TeamDriveList
|
||||
err = newPacer().Call(func() (bool, error) {
|
||||
err = newPacer(opt).Call(func() (bool, error) {
|
||||
teamDrives, err = listTeamDrives.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
@@ -770,8 +788,8 @@ func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
|
||||
}
|
||||
|
||||
// newPacer makes a pacer configured for drive
|
||||
func newPacer() *pacer.Pacer {
|
||||
return pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer)
|
||||
func newPacer(opt *Options) *pacer.Pacer {
|
||||
return pacer.New().SetMinSleep(time.Duration(opt.PacerMinSleep)).SetBurst(opt.PacerBurst).SetPacer(pacer.GoogleDrivePacer)
|
||||
}
|
||||
|
||||
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
|
||||
@@ -875,7 +893,7 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: newPacer(),
|
||||
pacer: newPacer(opt),
|
||||
}
|
||||
f.isTeamDrive = opt.TeamDriveID != ""
|
||||
f.features = (&fs.Features{
|
||||
@@ -2477,16 +2495,32 @@ func (o *documentObject) Open(options ...fs.OpenOption) (in io.ReadCloser, err e
|
||||
// Update the size with what we are reading as it can change from
|
||||
// the HEAD in the listing to this GET. This stops rclone marking
|
||||
// the transfer as corrupted.
|
||||
var offset, end int64 = 0, -1
|
||||
var newOptions = options[:0]
|
||||
for _, o := range options {
|
||||
// Note that Range requests don't work on Google docs:
|
||||
// https://developers.google.com/drive/v3/web/manage-downloads#partial_download
|
||||
if _, ok := o.(*fs.RangeOption); ok {
|
||||
return nil, errors.New("partial downloads are not supported while exporting Google Documents")
|
||||
// So do a subset of them manually
|
||||
switch x := o.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, end = x.Start, x.End
|
||||
case *fs.SeekOption:
|
||||
offset, end = x.Offset, -1
|
||||
default:
|
||||
newOptions = append(newOptions, o)
|
||||
}
|
||||
}
|
||||
options = newOptions
|
||||
if offset != 0 {
|
||||
return nil, errors.New("partial downloads are not supported while exporting Google Documents")
|
||||
}
|
||||
in, err = o.baseObject.open(o.url, options...)
|
||||
if in != nil {
|
||||
in = &openDocumentFile{o: o, in: in}
|
||||
}
|
||||
if end >= 0 {
|
||||
in = readers.NewLimitedReadCloser(in, end-offset+1)
|
||||
}
|
||||
return
|
||||
}
|
||||
func (o *linkObject) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
|
||||
@@ -646,7 +646,21 @@ func (f *ftpReadCloser) Read(p []byte) (n int, err error) {
|
||||
|
||||
// Close the FTP reader and return the connection to the pool
|
||||
func (f *ftpReadCloser) Close() error {
|
||||
err := f.rc.Close()
|
||||
var err error
|
||||
errchan := make(chan error, 1)
|
||||
go func() {
|
||||
errchan <- f.rc.Close()
|
||||
}()
|
||||
// Wait for Close for up to 60 seconds
|
||||
timer := time.NewTimer(60 * time.Second)
|
||||
select {
|
||||
case err = <-errchan:
|
||||
timer.Stop()
|
||||
case <-timer.C:
|
||||
// if timer fired assume no error but connection dead
|
||||
fs.Errorf(f.f, "Timeout when waiting for connection Close")
|
||||
return nil
|
||||
}
|
||||
// if errors while reading or closing, dump the connection
|
||||
if err != nil || f.err != nil {
|
||||
_ = f.c.Quit()
|
||||
|
||||
@@ -9,7 +9,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// default time format for almost all request and responses
|
||||
timeFormat = "2006-01-02-T15:04:05Z0700"
|
||||
// the API server seems to use a different format
|
||||
apiTimeFormat = "2006-01-02T15:04:05Z07:00"
|
||||
)
|
||||
|
||||
// Time represents time values in the Jottacloud API. It uses a custom RFC3339 like format.
|
||||
@@ -40,6 +43,9 @@ func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
// Return Time string in Jottacloud format
|
||||
func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
||||
|
||||
// APIString returns Time string in Jottacloud API format
|
||||
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
|
||||
|
||||
// Flag is a hacky type for checking if an attribute is present
|
||||
type Flag bool
|
||||
|
||||
@@ -58,6 +64,15 @@ func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
|
||||
return attr, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
// TokenJSON is the struct representing the HTTP response from OAuth2
|
||||
// providers returning a token in JSON form.
|
||||
type TokenJSON struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>
|
||||
|
||||
@@ -265,3 +280,37 @@ func (e *Error) Error() string {
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// AllocateFileRequest to prepare an upload to Jottacloud
|
||||
type AllocateFileRequest struct {
|
||||
Bytes int64 `json:"bytes"`
|
||||
Created string `json:"created"`
|
||||
Md5 string `json:"md5"`
|
||||
Modified string `json:"modified"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// AllocateFileResponse for upload requests
|
||||
type AllocateFileResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
State string `json:"state"`
|
||||
UploadID string `json:"upload_id"`
|
||||
UploadURL string `json:"upload_url"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
ResumePos int64 `json:"resume_pos"`
|
||||
}
|
||||
|
||||
// UploadResponse after an upload
|
||||
type UploadResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Kind string `json:"kind"`
|
||||
ContentID string `json:"content_id"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Md5 string `json:"md5"`
|
||||
Created int64 `json:"created"`
|
||||
Modified int64 `json:"modified"`
|
||||
Deleted interface{} `json:"deleted"`
|
||||
Mime string `json:"mime"`
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -26,22 +27,41 @@ import (
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// Globals
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Sync"
|
||||
rootURL = "https://www.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com"
|
||||
shareURL = "https://www.jottacloud.com/"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Sync"
|
||||
rootURL = "https://www.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com/files/v1/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
tokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
configUsername = "user"
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app for a personal account
|
||||
oauthConfig = &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: tokenURL,
|
||||
TokenURL: tokenURL,
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -50,13 +70,71 @@ func init() {
|
||||
Name: "jottacloud",
|
||||
Description: "JottaCloud",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
tokenString, ok := m.Get("token")
|
||||
if ok && tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !config.Confirm() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
username, ok := m.Get(configUsername)
|
||||
if !ok {
|
||||
log.Fatalf("No username defined")
|
||||
}
|
||||
password := config.GetPassword("Your Jottacloud password is only required during config and will not be stored.")
|
||||
|
||||
// prepare out token request with username and password
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
values := url.Values{}
|
||||
values.Set("grant_type", "PASSWORD")
|
||||
values.Set("password", password)
|
||||
values.Set("username", username)
|
||||
values.Set("client_id", oauthConfig.ClientID)
|
||||
values.Set("client_secret", oauthConfig.ClientSecret)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: oauthConfig.Endpoint.AuthURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
var jsonToken api.TokenJSON
|
||||
resp, err := srv.CallJSON(&opts, nil, &jsonToken)
|
||||
if err != nil {
|
||||
// if 2fa is enabled the first request is expected to fail. we'lls do another request with the 2fa code as an additional http header
|
||||
if resp != nil {
|
||||
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
||||
fmt.Printf("This account has 2 factor authentication enabled you will receive a verification code via SMS.\n")
|
||||
fmt.Printf("Enter verification code> ")
|
||||
authCode := config.ReadLine()
|
||||
authCode = strings.Replace(authCode, "-", "", -1) // the sms received contains a pair of 3 digit numbers seperated by '-' but wants a single 6 digit number
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||
resp, err = srv.CallJSON(&opts, nil, &jsonToken)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get resource token: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var token oauth2.Token
|
||||
token.AccessToken = jsonToken.AccessToken
|
||||
token.RefreshToken = jsonToken.RefreshToken
|
||||
token.TokenType = jsonToken.TokenType
|
||||
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
|
||||
|
||||
// finally save them in the config
|
||||
err = oauthutil.PutToken(name, m, &token, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Error while setting token: %s", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User Name",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
IsPassword: true,
|
||||
Name: configUsername,
|
||||
Help: "User Name:",
|
||||
}, {
|
||||
Name: "mountpoint",
|
||||
Help: "The mountpoint to use.",
|
||||
@@ -83,6 +161,11 @@ func init() {
|
||||
Help: "Remove existing public link to file/folder with link command rather than creating.\nDefault is false, meaning link command will create or retrieve public link.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_resume_limit",
|
||||
Help: "Files bigger than this can be resumed if the upload failes.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -90,23 +173,25 @@ func init() {
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Mountpoint string `config:"mountpoint"`
|
||||
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
Unlink bool `config:"unlink"`
|
||||
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
|
||||
}
|
||||
|
||||
// Fs represents a remote jottacloud
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
user string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
endpointURL string
|
||||
srv *rest.Client
|
||||
pacer *pacer.Pacer
|
||||
name string
|
||||
root string
|
||||
user string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
endpointURL string
|
||||
srv *rest.Client
|
||||
apiSrv *rest.Client
|
||||
pacer *pacer.Pacer
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
}
|
||||
|
||||
// Object describes a jottacloud object
|
||||
@@ -261,6 +346,29 @@ func (o *Object) filePath() string {
|
||||
return o.fs.filePath(o.remote)
|
||||
}
|
||||
|
||||
// Jottacloud requires the grant_type 'refresh_token' string
|
||||
// to be uppercase and throws a 400 Bad Request if we use the
|
||||
// lower case used by the oauth2 module
|
||||
//
|
||||
// This filter catches all refresh requests, reads the body,
|
||||
// changes the case and then sends it on
|
||||
func grantTypeFilter(req *http.Request) {
|
||||
if tokenURL == req.URL.String() {
|
||||
// read the entire body
|
||||
refreshBody, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_ = req.Body.Close()
|
||||
|
||||
// make the refesh token upper case
|
||||
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
|
||||
|
||||
// set the new ReadCloser (with a dummy Close())
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
|
||||
}
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -273,25 +381,29 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = parsePath(root)
|
||||
|
||||
user := config.FileGet(name, "user")
|
||||
pass := config.FileGet(name, "pass")
|
||||
|
||||
if opt.Pass != "" {
|
||||
var err error
|
||||
opt.Pass, err = obscure.Reveal(opt.Pass)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't decrypt password")
|
||||
}
|
||||
// the oauth client for the api servers needs
|
||||
// a filter to fix the grant_type issues (see above)
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
SetRequestFilter(f func(req *http.Request))
|
||||
}); ok {
|
||||
do.SetRequestFilter(grantTypeFilter)
|
||||
} else {
|
||||
fs.Debugf(name+":", "Couldn't add request filter - uploads will fail")
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
user: opt.User,
|
||||
opt: *opt,
|
||||
//endpointURL: rest.URLPathEscape(path.Join(user, defaultDevice, opt.Mountpoint)),
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(rootURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
name: name,
|
||||
root: root,
|
||||
user: opt.User,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -299,14 +411,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
}).Fill(f)
|
||||
|
||||
if user == "" || pass == "" {
|
||||
return nil, errors.New("jottacloud needs user and password")
|
||||
}
|
||||
|
||||
f.srv.SetUserPass(opt.User, opt.Pass)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath("")
|
||||
return err
|
||||
})
|
||||
|
||||
err = f.setEndpointURL(opt.Mountpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get account info")
|
||||
@@ -331,7 +443,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
@@ -348,7 +459,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, e
|
||||
// Set info
|
||||
err = o.setMetaData(info)
|
||||
} else {
|
||||
err = o.readMetaData() // reads info and meta, returning an error
|
||||
err = o.readMetaData(false) // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -396,7 +507,7 @@ func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
//fmt.Printf("List: %s\n", dir)
|
||||
//fmt.Printf("List: %s\n", f.filePath(dir))
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.filePath(dir),
|
||||
@@ -676,7 +787,6 @@ func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err erro
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
@@ -824,7 +934,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||
if result.PublicSharePath == "" {
|
||||
return "", errors.New("couldn't create public link - no link path received")
|
||||
}
|
||||
link = path.Join(shareURL, result.PublicSharePath)
|
||||
link = path.Join(baseURL, result.PublicSharePath)
|
||||
return link, nil
|
||||
}
|
||||
|
||||
@@ -880,7 +990,7 @@ func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
err := o.readMetaData()
|
||||
err := o.readMetaData(false)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return 0
|
||||
@@ -903,14 +1013,17 @@ func (o *Object) setMetaData(info *api.JottaFile) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.hasMetaData {
|
||||
func (o *Object) readMetaData(force bool) (err error) {
|
||||
if o.hasMetaData && !force {
|
||||
return nil
|
||||
}
|
||||
info, err := o.fs.readMetaDataForPath(o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Deleted {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return o.setMetaData(info)
|
||||
}
|
||||
|
||||
@@ -919,7 +1032,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
err := o.readMetaData(false)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
@@ -1040,43 +1153,74 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
in = wrap(in)
|
||||
}
|
||||
|
||||
// use the api to allocate the file first and get resume / deduplication info
|
||||
var resp *http.Response
|
||||
var result api.JottaFile
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: o.filePath(),
|
||||
Body: in,
|
||||
ContentType: fs.MimeType(src),
|
||||
ContentLength: &size,
|
||||
ExtraHeaders: make(map[string]string),
|
||||
Parameters: url.Values{},
|
||||
Method: "POST",
|
||||
Path: "allocate",
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
fileDate := api.Time(src.ModTime()).APIString()
|
||||
|
||||
// the allocate request
|
||||
var request = api.AllocateFileRequest{
|
||||
Bytes: size,
|
||||
Created: fileDate,
|
||||
Modified: fileDate,
|
||||
Md5: md5String,
|
||||
Path: path.Join(o.fs.opt.Mountpoint, replaceReservedChars(path.Join(o.fs.root, o.remote))),
|
||||
}
|
||||
|
||||
opts.ExtraHeaders["JMd5"] = md5String
|
||||
opts.Parameters.Set("cphash", md5String)
|
||||
opts.ExtraHeaders["JSize"] = strconv.FormatInt(size, 10)
|
||||
// opts.ExtraHeaders["JCreated"] = api.Time(src.ModTime()).String()
|
||||
opts.ExtraHeaders["JModified"] = api.Time(src.ModTime()).String()
|
||||
|
||||
// Parameters observed in other implementations
|
||||
//opts.ExtraHeaders["X-Jfs-DeviceName"] = "Jotta"
|
||||
//opts.ExtraHeaders["X-Jfs-Devicename-Base64"] = ""
|
||||
//opts.ExtraHeaders["X-Jftp-Version"] = "2.4" this appears to be the current version
|
||||
//opts.ExtraHeaders["jx_csid"] = ""
|
||||
//opts.ExtraHeaders["jx_lisence"] = ""
|
||||
|
||||
opts.Parameters.Set("umode", "nomultipart")
|
||||
|
||||
// send it
|
||||
var response api.AllocateFileResponse
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallXML(&opts, nil, &result)
|
||||
resp, err = o.fs.apiSrv.CallJSON(&opts, &request, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: Check returned Metadata? Timeout on big uploads?
|
||||
return o.setMetaData(&result)
|
||||
// If the file state is INCOMPLETE and CORRPUT, try to upload a then
|
||||
if response.State != "COMPLETED" {
|
||||
// how much do we still have to upload?
|
||||
remainingBytes := size - response.ResumePos
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: response.UploadURL,
|
||||
ContentLength: &remainingBytes,
|
||||
ContentType: "application/octet-stream",
|
||||
Body: in,
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
if response.ResumePos != 0 {
|
||||
opts.ExtraHeaders["Range"] = "bytes=" + strconv.FormatInt(response.ResumePos, 10) + "-" + strconv.FormatInt(size-1, 10)
|
||||
}
|
||||
|
||||
// copy the already uploaded bytes into the trash :)
|
||||
var result api.UploadResponse
|
||||
_, err = io.CopyN(ioutil.Discard, in, response.ResumePos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// send the remaining bytes
|
||||
resp, err = o.fs.apiSrv.CallJSON(&opts, nil, &result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// finally update the meta data
|
||||
o.hasMetaData = true
|
||||
o.size = int64(result.Bytes)
|
||||
o.md5 = result.Md5
|
||||
o.modTime = time.Unix(result.Modified/1000, 0)
|
||||
} else {
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was allready found but we still ned to update our metadata
|
||||
return o.readMetaData(true)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
|
||||
20
backend/local/lchtimes.go
Normal file
20
backend/local/lchtimes.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// +build windows plan9
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const haveLChtimes = false
|
||||
|
||||
// lChtimes changes the access and modification times of the named
|
||||
// link, similar to the Unix utime() or utimes() functions.
|
||||
//
|
||||
// The underlying filesystem may truncate or round the values to a
|
||||
// less precise time unit.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func lChtimes(name string, atime time.Time, mtime time.Time) error {
|
||||
// Does nothing
|
||||
return nil
|
||||
}
|
||||
28
backend/local/lchtimes_unix.go
Normal file
28
backend/local/lchtimes_unix.go
Normal file
@@ -0,0 +1,28 @@
|
||||
// +build !windows,!plan9
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const haveLChtimes = true
|
||||
|
||||
// lChtimes changes the access and modification times of the named
|
||||
// link, similar to the Unix utime() or utimes() functions.
|
||||
//
|
||||
// The underlying filesystem may truncate or round the values to a
|
||||
// less precise time unit.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func lChtimes(name string, atime time.Time, mtime time.Time) error {
|
||||
var utimes [2]unix.Timespec
|
||||
utimes[0] = unix.NsecToTimespec(atime.UnixNano())
|
||||
utimes[1] = unix.NsecToTimespec(mtime.UnixNano())
|
||||
if e := unix.UtimesNanoAt(unix.AT_FDCWD, name, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); e != nil {
|
||||
return &os.PathError{Op: "lchtimes", Path: name, Err: e}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -2,6 +2,7 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -21,12 +22,14 @@ import (
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/file"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Constants
|
||||
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
@@ -48,6 +51,13 @@ func init() {
|
||||
NoPrefix: true,
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Help: `Don't warn about skipped symlinks.
|
||||
@@ -92,12 +102,13 @@ check can be disabled with this flag.`,
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
SkipSymlinks bool `config:"skip_links"`
|
||||
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||
NoCheckUpdated bool `config:"no_check_updated"`
|
||||
NoUNC bool `config:"nounc"`
|
||||
OneFileSystem bool `config:"one_file_system"`
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
TranslateSymlinks bool `config:"links"`
|
||||
SkipSymlinks bool `config:"skip_links"`
|
||||
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||
NoCheckUpdated bool `config:"no_check_updated"`
|
||||
NoUNC bool `config:"nounc"`
|
||||
OneFileSystem bool `config:"one_file_system"`
|
||||
}
|
||||
|
||||
// Fs represents a local filesystem rooted at root
|
||||
@@ -119,17 +130,20 @@ type Fs struct {
|
||||
|
||||
// Object represents a local filesystem object
|
||||
type Object struct {
|
||||
fs *Fs // The Fs this object is part of
|
||||
remote string // The remote path - properly UTF-8 encoded - for rclone
|
||||
path string // The local path - may not be properly UTF-8 encoded - for OS
|
||||
size int64 // file metadata - always present
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
hashes map[hash.Type]string // Hashes
|
||||
fs *Fs // The Fs this object is part of
|
||||
remote string // The remote path - properly UTF-8 encoded - for rclone
|
||||
path string // The local path - may not be properly UTF-8 encoded - for OS
|
||||
size int64 // file metadata - always present
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
hashes map[hash.Type]string // Hashes
|
||||
translatedLink bool // Is this object a translated link
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -138,6 +152,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.TranslateSymlinks && opt.FollowSymlinks {
|
||||
return nil, errLinksAndCopyLinks
|
||||
}
|
||||
|
||||
if opt.NoUTFNorm {
|
||||
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
||||
@@ -165,7 +182,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err == nil {
|
||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
}
|
||||
if err == nil && fi.Mode().IsRegular() {
|
||||
if err == nil && f.isRegular(fi.Mode()) {
|
||||
// It is a file, so use the parent as the root
|
||||
f.root = filepath.Dir(f.root)
|
||||
// return an error with an fs which points to the parent
|
||||
@@ -174,6 +191,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Determine whether a file is a 'regular' file,
|
||||
// Symlinks are regular files, only if the TranslateSymlink
|
||||
// option is in-effect
|
||||
func (f *Fs) isRegular(mode os.FileMode) bool {
|
||||
if !f.opt.TranslateSymlinks {
|
||||
return mode.IsRegular()
|
||||
}
|
||||
|
||||
// fi.Mode().IsRegular() tests that all mode bits are zero
|
||||
// Since symlinks are accepted, test that all other bits are zero,
|
||||
// except the symlink bit
|
||||
return mode&os.ModeType&^os.ModeSymlink == 0
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
@@ -204,18 +235,38 @@ func (f *Fs) caseInsensitive() bool {
|
||||
return runtime.GOOS == "windows" || runtime.GOOS == "darwin"
|
||||
}
|
||||
|
||||
// translateLink checks whether the remote is a translated link
|
||||
// and returns a new path, removing the suffix as needed,
|
||||
// It also returns whether this is a translated link at all
|
||||
//
|
||||
// for regular files, dstPath is returned unchanged
|
||||
func translateLink(remote, dstPath string) (newDstPath string, isTranslatedLink bool) {
|
||||
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
|
||||
newDstPath = strings.TrimSuffix(dstPath, linkSuffix)
|
||||
return newDstPath, isTranslatedLink
|
||||
}
|
||||
|
||||
// newObject makes a half completed Object
|
||||
//
|
||||
// if dstPath is empty then it is made from remote
|
||||
func (f *Fs) newObject(remote, dstPath string) *Object {
|
||||
translatedLink := false
|
||||
|
||||
if dstPath == "" {
|
||||
dstPath = f.cleanPath(filepath.Join(f.root, remote))
|
||||
}
|
||||
remote = f.cleanRemote(remote)
|
||||
|
||||
if f.opt.TranslateSymlinks {
|
||||
// Possibly receive a new name for dstPath
|
||||
dstPath, translatedLink = translateLink(remote, dstPath)
|
||||
}
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
path: dstPath,
|
||||
fs: f,
|
||||
remote: remote,
|
||||
path: dstPath,
|
||||
translatedLink: translatedLink,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -237,6 +288,11 @@ func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Obj
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// Handle the odd case, that a symlink was specfied by name without the link suffix
|
||||
if o.fs.opt.TranslateSymlinks && o.mode&os.ModeSymlink != 0 && !o.translatedLink {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
}
|
||||
if o.mode.IsDir() {
|
||||
return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote)
|
||||
@@ -260,6 +316,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
|
||||
dir = f.dirNames.Load(dir)
|
||||
fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
|
||||
remote := f.cleanRemote(dir)
|
||||
@@ -316,6 +373,10 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
entries = append(entries, d)
|
||||
}
|
||||
} else {
|
||||
// Check whether this link should be translated
|
||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
newRemote += linkSuffix
|
||||
}
|
||||
fso, err := f.newObjectWithInfo(newRemote, newPath, fi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -529,7 +590,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
} else if !dstObj.mode.IsRegular() {
|
||||
} else if !dstObj.fs.isRegular(dstObj.mode) {
|
||||
// It isn't a file
|
||||
return nil, errors.New("can't move file onto non-file")
|
||||
}
|
||||
@@ -651,7 +712,13 @@ func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
|
||||
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
|
||||
in, err := os.Open(o.path)
|
||||
var in io.ReadCloser
|
||||
|
||||
if !o.translatedLink {
|
||||
in, err = file.Open(o.path)
|
||||
} else {
|
||||
in, err = o.openTranslatedLink(0, -1)
|
||||
}
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "hash: failed to open")
|
||||
}
|
||||
@@ -682,7 +749,12 @@ func (o *Object) ModTime() time.Time {
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
err := os.Chtimes(o.path, modTime, modTime)
|
||||
var err error
|
||||
if o.translatedLink {
|
||||
err = lChtimes(o.path, modTime, modTime)
|
||||
} else {
|
||||
err = os.Chtimes(o.path, modTime, modTime)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -700,7 +772,7 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
}
|
||||
mode := o.mode
|
||||
if mode&os.ModeSymlink != 0 {
|
||||
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
|
||||
if !o.fs.opt.SkipSymlinks {
|
||||
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
|
||||
}
|
||||
@@ -761,6 +833,16 @@ func (file *localOpenFile) Close() (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Returns a ReadCloser() object that contains the contents of a symbolic link
|
||||
func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err error) {
|
||||
// Read the link and return the destination it as the contents of the object
|
||||
linkdst, err := os.Readlink(o.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
@@ -780,7 +862,12 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
fd, err := os.Open(o.path)
|
||||
// Handle a translated link
|
||||
if o.translatedLink {
|
||||
return o.openTranslatedLink(offset, limit)
|
||||
}
|
||||
|
||||
fd, err := file.Open(o.path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -811,8 +898,19 @@ func (o *Object) mkdirAll() error {
|
||||
return os.MkdirAll(dir, 0777)
|
||||
}
|
||||
|
||||
type nopWriterCloser struct {
|
||||
*bytes.Buffer
|
||||
}
|
||||
|
||||
func (nwc nopWriterCloser) Close() error {
|
||||
// noop
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update the object from in with modTime and size
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
var out io.WriteCloser
|
||||
|
||||
hashes := hash.Supported
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
@@ -826,15 +924,23 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := os.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Pre-allocate the file for performance reasons
|
||||
err = preAllocate(src.Size(), out)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
var symlinkData bytes.Buffer
|
||||
// If the object is a regular file, create it.
|
||||
// If it is a translated link, just read in the contents, and
|
||||
// then create a symlink
|
||||
if !o.translatedLink {
|
||||
f, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Pre-allocate the file for performance reasons
|
||||
err = preAllocate(src.Size(), f)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
}
|
||||
out = f
|
||||
} else {
|
||||
out = nopWriterCloser{&symlinkData}
|
||||
}
|
||||
|
||||
// Calculate the hash of the object we are reading as we go along
|
||||
@@ -849,6 +955,26 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
|
||||
if o.translatedLink {
|
||||
if err == nil {
|
||||
// Remove any current symlink or file, if one exsits
|
||||
if _, err := os.Lstat(o.path); err == nil {
|
||||
if removeErr := os.Remove(o.path); removeErr != nil {
|
||||
fs.Errorf(o, "Failed to remove previous file: %v", removeErr)
|
||||
return removeErr
|
||||
}
|
||||
}
|
||||
// Use the contents for the copied object to create a symlink
|
||||
err = os.Symlink(symlinkData.String(), o.path)
|
||||
}
|
||||
|
||||
// only continue if symlink creation succeeded
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
fs.Logf(o, "Removing partially written file on error: %v", err)
|
||||
if removeErr := os.Remove(o.path); removeErr != nil {
|
||||
|
||||
@@ -1,13 +1,19 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/lib/file"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -38,10 +44,13 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
filePath := "sub dir/local test"
|
||||
r.WriteFile(filePath, "content", time.Now())
|
||||
|
||||
fd, err := os.Open(path.Join(r.LocalName, filePath))
|
||||
fd, err := file.Open(path.Join(r.LocalName, filePath))
|
||||
if err != nil {
|
||||
t.Fatalf("failed opening file %q: %v", filePath, err)
|
||||
}
|
||||
defer func() {
|
||||
require.NoError(t, fd.Close())
|
||||
}()
|
||||
|
||||
fi, err := fd.Stat()
|
||||
require.NoError(t, err)
|
||||
@@ -72,3 +81,108 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
func TestSymlink(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
f := r.Flocal.(*Fs)
|
||||
dir := f.root
|
||||
|
||||
// Write a file
|
||||
modTime1 := fstest.Time("2001-02-03T04:05:10.123123123Z")
|
||||
file1 := r.WriteFile("file.txt", "hello", modTime1)
|
||||
|
||||
// Write a symlink
|
||||
modTime2 := fstest.Time("2002-02-03T04:05:10.123123123Z")
|
||||
symlinkPath := filepath.Join(dir, "symlink.txt")
|
||||
require.NoError(t, os.Symlink("file.txt", symlinkPath))
|
||||
require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2))
|
||||
|
||||
// Object viewed as symlink
|
||||
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
||||
if runtime.GOOS == "windows" {
|
||||
file2.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
|
||||
// Object viewed as destination
|
||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||
|
||||
// Check with no symlink flags
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
fstest.CheckItems(t, r.Fremote)
|
||||
|
||||
// Set fs into "-L" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2d)
|
||||
fstest.CheckItems(t, r.Fremote)
|
||||
|
||||
// Set fs into "-l" mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = true
|
||||
f.lstat = os.Lstat
|
||||
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
}
|
||||
|
||||
// Create a symlink
|
||||
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
||||
file3 := r.WriteObjectTo(r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||
if runtime.GOOS == "windows" {
|
||||
file3.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||
}
|
||||
|
||||
// Check it got the correct contents
|
||||
symlinkPath = filepath.Join(dir, "symlink2.txt")
|
||||
fi, err := os.Lstat(symlinkPath)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, fi.Mode().IsRegular())
|
||||
linkText, err := os.Readlink(symlinkPath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file.txt", linkText)
|
||||
|
||||
// Check that NewObject gets the correct object
|
||||
o, err := r.Flocal.NewObject("symlink2.txt" + linkSuffix)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
||||
if runtime.GOOS != "windows" {
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
}
|
||||
|
||||
// Check that NewObject doesn't see the non suffixed version
|
||||
_, err = r.Flocal.NewObject("symlink2.txt")
|
||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Check reading the object
|
||||
in, err := o.Open()
|
||||
require.NoError(t, err)
|
||||
contents, err := ioutil.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "file.txt", string(contents))
|
||||
require.NoError(t, in.Close())
|
||||
|
||||
// Check reading the object with range
|
||||
in, err = o.Open(&fs.RangeOption{Start: 2, End: 5})
|
||||
require.NoError(t, err)
|
||||
contents, err = ioutil.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "file.txt"[2:5+1], string(contents))
|
||||
require.NoError(t, in.Close())
|
||||
}
|
||||
|
||||
func TestSymlinkError(t *testing.T) {
|
||||
m := configmap.Simple{
|
||||
"links": "true",
|
||||
"copy_links": "true",
|
||||
}
|
||||
_, err := NewFs("local", "/", m)
|
||||
assert.Equal(t, errLinksAndCopyLinks, err)
|
||||
}
|
||||
|
||||
@@ -285,6 +285,7 @@ type AsyncOperationStatus struct {
|
||||
|
||||
// GetID returns a normalized ID of the item
|
||||
// If DriveID is known it will be prefixed to the ID with # seperator
|
||||
// Can be parsed using onedrive.parseNormalizedID(normalizedID)
|
||||
func (i *Item) GetID() string {
|
||||
if i.IsRemote() && i.RemoteItem.ID != "" {
|
||||
return i.RemoteItem.ParentReference.DriveID + "#" + i.RemoteItem.ID
|
||||
|
||||
@@ -75,9 +75,8 @@ func init() {
|
||||
return
|
||||
}
|
||||
|
||||
// Are we running headless?
|
||||
if automatic, _ := m.Get(config.ConfigAutomatic); automatic != "" {
|
||||
// Yes, okay we are done
|
||||
// Stop if we are running non-interactive config
|
||||
if fs.Config.AutoConfirm {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -199,7 +198,7 @@ func init() {
|
||||
|
||||
fmt.Printf("Found drive '%s' of type '%s', URL: %s\nIs that okay?\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)
|
||||
// This does not work, YET :)
|
||||
if !config.Confirm() {
|
||||
if !config.ConfirmWithConfig(m, "config_drive_ok", true) {
|
||||
log.Fatalf("Cancelled by user")
|
||||
}
|
||||
|
||||
@@ -334,20 +333,10 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
|
||||
var opts rest.Opts
|
||||
if len(path) == 0 {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root",
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
|
||||
}
|
||||
}
|
||||
// readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
|
||||
// if `relPath` == "", it reads the metadata for the item with that ID.
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
opts := newOptsCall(normalizedID, "GET", ":/"+rest.URLPathEscape(replaceReservedChars(relPath)))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
@@ -356,6 +345,72 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Respon
|
||||
return info, resp, err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path (relative to the absolute root)
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
|
||||
firstSlashIndex := strings.IndexRune(path, '/')
|
||||
|
||||
if f.driveType != driveTypePersonal || firstSlashIndex == -1 {
|
||||
var opts rest.Opts
|
||||
if len(path) == 0 {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root",
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
|
||||
}
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info, resp, err
|
||||
}
|
||||
|
||||
// The following branch handles the case when we're using OneDrive Personal and the path is in a folder.
|
||||
// For OneDrive Personal, we need to consider the "shared with me" folders.
|
||||
// An item in such a folder can only be addressed by its ID relative to the sharer's driveID or
|
||||
// by its path relative to the folder's ID relative to the sharer's driveID.
|
||||
// Note: A "shared with me" folder can only be placed in the sharee's absolute root.
|
||||
// So we read metadata relative to a suitable folder's normalized ID.
|
||||
var dirCacheFoundRoot bool
|
||||
var rootNormalizedID string
|
||||
if f.dirCache != nil {
|
||||
var ok bool
|
||||
if rootNormalizedID, ok = f.dirCache.Get(""); ok {
|
||||
dirCacheFoundRoot = true
|
||||
}
|
||||
}
|
||||
|
||||
relPath, insideRoot := getRelativePathInsideBase(f.root, path)
|
||||
var firstDir, baseNormalizedID string
|
||||
if !insideRoot || !dirCacheFoundRoot {
|
||||
// We do not have the normalized ID in dirCache for our query to base on. Query it manually.
|
||||
firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
|
||||
info, resp, err := f.readMetaDataForPath(firstDir)
|
||||
if err != nil {
|
||||
return info, resp, err
|
||||
}
|
||||
baseNormalizedID = info.GetID()
|
||||
} else {
|
||||
if f.root != "" {
|
||||
// Read metadata based on root
|
||||
baseNormalizedID = rootNormalizedID
|
||||
} else {
|
||||
// Read metadata based on firstDir
|
||||
firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
|
||||
baseNormalizedID, err = f.dirCache.FindDir(firstDir, false)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return f.readMetaDataForPathRelativeToID(baseNormalizedID, relPath)
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
func errorHandler(resp *http.Response) error {
|
||||
// Decode error response
|
||||
@@ -437,11 +492,11 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
// Get rootID
|
||||
rootInfo, _, err := f.readMetaDataForPath("")
|
||||
if err != nil || rootInfo.ID == "" {
|
||||
if err != nil || rootInfo.GetID() == "" {
|
||||
return nil, errors.Wrap(err, "failed to get root")
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, rootInfo.ID, f)
|
||||
f.dirCache = dircache.New(root, rootInfo.GetID(), f)
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(false)
|
||||
@@ -514,18 +569,11 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
parent, ok := f.dirCache.GetInv(pathID)
|
||||
_, ok := f.dirCache.GetInv(pathID)
|
||||
if !ok {
|
||||
return "", false, errors.New("couldn't find parent ID")
|
||||
}
|
||||
path := leaf
|
||||
if parent != "" {
|
||||
path = parent + "/" + path
|
||||
}
|
||||
if f.dirCache.FoundRoot() {
|
||||
path = f.rootSlash() + path
|
||||
}
|
||||
info, resp, err := f.readMetaDataForPath(path)
|
||||
info, resp, err := f.readMetaDataForPathRelativeToID(pathID, leaf)
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
return "", false, nil
|
||||
@@ -867,13 +915,13 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
opts.ExtraHeaders = map[string]string{"Prefer": "respond-async"}
|
||||
opts.NoResponse = true
|
||||
|
||||
id, _, _ := parseDirID(directoryID)
|
||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
||||
|
||||
replacedLeaf := replaceReservedChars(leaf)
|
||||
copyReq := api.CopyItemRequest{
|
||||
Name: &replacedLeaf,
|
||||
ParentReference: api.ItemReference{
|
||||
DriveID: f.driveID,
|
||||
DriveID: dstDriveID,
|
||||
ID: id,
|
||||
},
|
||||
}
|
||||
@@ -940,15 +988,23 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
||||
_, srcObjDriveID, _ := parseNormalizedID(srcObj.id)
|
||||
|
||||
if dstDriveID != srcObjDriveID {
|
||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
||||
// "Items cannot be moved between Drives using this request."
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Move the object
|
||||
opts := newOptsCall(srcObj.id, "PATCH", "")
|
||||
|
||||
id, _, _ := parseDirID(directoryID)
|
||||
|
||||
move := api.MoveItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
ID: id,
|
||||
DriveID: dstDriveID,
|
||||
ID: id,
|
||||
},
|
||||
// We set the mod time too as it gets reset otherwise
|
||||
FileSystemInfo: &api.FileSystemInfoFacet{
|
||||
@@ -1024,7 +1080,20 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
parsedDstDirID, _, _ := parseDirID(dstDirectoryID)
|
||||
parsedDstDirID, dstDriveID, _ := parseNormalizedID(dstDirectoryID)
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, srcDriveID, _ := parseNormalizedID(srcID)
|
||||
|
||||
if dstDriveID != srcDriveID {
|
||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
||||
// "Items cannot be moved between Drives using this request."
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
@@ -1038,14 +1107,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get timestamps of src so they can be preserved
|
||||
srcInfo, _, err := srcFs.readMetaDataForPath(srcPath)
|
||||
srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(srcID, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1055,7 +1118,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
move := api.MoveItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
ID: parsedDstDirID,
|
||||
DriveID: dstDriveID,
|
||||
ID: parsedDstDirID,
|
||||
},
|
||||
// We set the mod time too as it gets reset otherwise
|
||||
FileSystemInfo: &api.FileSystemInfoFacet{
|
||||
@@ -1122,7 +1186,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
opts := newOptsCall(info.ID, "POST", "/createLink")
|
||||
opts := newOptsCall(info.GetID(), "POST", "/createLink")
|
||||
|
||||
share := api.CreateShareLinkRequest{
|
||||
Type: "view",
|
||||
@@ -1270,13 +1334,13 @@ func (o *Object) ModTime() time.Time {
|
||||
// setModTime sets the modification time of the local fs object
|
||||
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
||||
var opts rest.Opts
|
||||
_, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
_, drive, rootURL := parseDirID(directoryID)
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/root:/" + rest.URLPathEscape(o.srvPath()),
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf),
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
@@ -1344,7 +1408,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession(modTime time.Time) (response *api.CreateUploadResponse, err error) {
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
id, drive, rootURL := parseDirID(directoryID)
|
||||
id, drive, rootURL := parseNormalizedID(directoryID)
|
||||
var opts rest.Opts
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
@@ -1477,13 +1541,13 @@ func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (
|
||||
fs.Debugf(o, "Starting singlepart upload")
|
||||
var resp *http.Response
|
||||
var opts rest.Opts
|
||||
_, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
_, drive, rootURL := parseDirID(directoryID)
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
}
|
||||
@@ -1496,10 +1560,6 @@ func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (
|
||||
}
|
||||
}
|
||||
|
||||
if size == 0 {
|
||||
opts.Body = nil
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &info)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
@@ -1566,8 +1626,8 @@ func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
func newOptsCall(id string, method string, route string) (opts rest.Opts) {
|
||||
id, drive, rootURL := parseDirID(id)
|
||||
func newOptsCall(normalizedID string, method string, route string) (opts rest.Opts) {
|
||||
id, drive, rootURL := parseNormalizedID(normalizedID)
|
||||
|
||||
if drive != "" {
|
||||
return rest.Opts{
|
||||
@@ -1582,7 +1642,10 @@ func newOptsCall(id string, method string, route string) (opts rest.Opts) {
|
||||
}
|
||||
}
|
||||
|
||||
func parseDirID(ID string) (string, string, string) {
|
||||
// parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`)
|
||||
// and returns itemID, driveID, rootURL.
|
||||
// Such a normalized ID can come from (*Item).GetID()
|
||||
func parseNormalizedID(ID string) (string, string, string) {
|
||||
if strings.Index(ID, "#") >= 0 {
|
||||
s := strings.Split(ID, "#")
|
||||
return s[1], s[0], graphURL + "/drives"
|
||||
@@ -1590,6 +1653,21 @@ func parseDirID(ID string) (string, string, string) {
|
||||
return ID, "", ""
|
||||
}
|
||||
|
||||
// getRelativePathInsideBase checks if `target` is inside `base`. If so, it
|
||||
// returns a relative path for `target` based on `base` and a boolean `true`.
|
||||
// Otherwise returns "", false.
|
||||
func getRelativePathInsideBase(base, target string) (string, bool) {
|
||||
if base == "" {
|
||||
return target, true
|
||||
}
|
||||
|
||||
baseSlash := base + "/"
|
||||
if strings.HasPrefix(target+"/", baseSlash) {
|
||||
return target[len(baseSlash):], true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
|
||||
@@ -72,14 +72,54 @@ func init() {
|
||||
Help: "Number of connection retries.",
|
||||
Default: 3,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
|
||||
Note that "--qingstor-upload-concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
|
||||
If you are transferring large files over high speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
NB if you set this to > 1 then the checksums of multpart uploads
|
||||
become corrupted (the uploads themselves are not corrupted though).
|
||||
|
||||
If you are uploading small numbers of large file over high speed link
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.`,
|
||||
Default: 1,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Constants
|
||||
const (
|
||||
listLimitSize = 1000 // Number of items to read at once
|
||||
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
|
||||
listLimitSize = 1000 // Number of items to read at once
|
||||
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
|
||||
minChunkSize = fs.SizeSuffix(minMultiPartSize)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -92,12 +132,15 @@ func timestampToTime(tp int64) time.Time {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Zone string `config:"zone"`
|
||||
ConnectionRetries int `config:"connection_retries"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Zone string `config:"zone"`
|
||||
ConnectionRetries int `config:"connection_retries"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
}
|
||||
|
||||
// Fs represents a remote qingstor server
|
||||
@@ -227,6 +270,36 @@ func qsServiceConnection(opt *Options) (*qs.Service, error) {
|
||||
return qs.Init(cf)
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxUploadCutoff {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -235,6 +308,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "qingstor: chunk size")
|
||||
}
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "qingstor: upload cutoff")
|
||||
}
|
||||
bucket, key, err := qsParsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -913,16 +994,24 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
mimeType := fs.MimeType(src)
|
||||
|
||||
req := uploadInput{
|
||||
body: in,
|
||||
qsSvc: o.fs.svc,
|
||||
bucket: o.fs.bucket,
|
||||
zone: o.fs.zone,
|
||||
key: key,
|
||||
mimeType: mimeType,
|
||||
body: in,
|
||||
qsSvc: o.fs.svc,
|
||||
bucket: o.fs.bucket,
|
||||
zone: o.fs.zone,
|
||||
key: key,
|
||||
mimeType: mimeType,
|
||||
partSize: int64(o.fs.opt.ChunkSize),
|
||||
concurrency: o.fs.opt.UploadConcurrency,
|
||||
}
|
||||
uploader := newUploader(&req)
|
||||
|
||||
err = uploader.upload()
|
||||
size := src.Size()
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
if multipart {
|
||||
err = uploader.upload()
|
||||
} else {
|
||||
err = uploader.singlePartUpload(in, size)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
|
||||
// +build !plan9
|
||||
|
||||
package qingstor_test
|
||||
package qingstor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/qingstor"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -15,6 +15,19 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestQingStor:",
|
||||
NilObject: (*qingstor.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
@@ -152,11 +152,11 @@ func (u *uploader) init() {
|
||||
}
|
||||
|
||||
// singlePartUpload upload a single object that contentLength less than "defaultUploadPartSize"
|
||||
func (u *uploader) singlePartUpload(buf io.ReadSeeker) error {
|
||||
func (u *uploader) singlePartUpload(buf io.Reader, size int64) error {
|
||||
bucketInit, _ := u.bucketInit()
|
||||
|
||||
req := qs.PutObjectInput{
|
||||
ContentLength: &u.readerPos,
|
||||
ContentLength: &size,
|
||||
ContentType: &u.cfg.mimeType,
|
||||
Body: buf,
|
||||
}
|
||||
@@ -180,7 +180,7 @@ func (u *uploader) upload() error {
|
||||
reader, _, err := u.nextReader()
|
||||
if err == io.EOF { // single part
|
||||
fs.Debugf(u, "Uploading as single part object to QingStor")
|
||||
return u.singlePartUpload(reader)
|
||||
return u.singlePartUpload(reader, u.readerPos)
|
||||
} else if err != nil {
|
||||
return errors.Errorf("read upload data failed: %s", err)
|
||||
}
|
||||
@@ -392,6 +392,14 @@ func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) error {
|
||||
var nextChunkLen int
|
||||
reader, nextChunkLen, err = mu.nextReader()
|
||||
if err != nil && err != io.EOF {
|
||||
// empty ch
|
||||
go func() {
|
||||
for range ch {
|
||||
}
|
||||
}()
|
||||
// Wait for all goroutines finish
|
||||
close(ch)
|
||||
mu.wg.Wait()
|
||||
return err
|
||||
}
|
||||
if nextChunkLen == 0 && partNumber > 0 {
|
||||
|
||||
233
backend/s3/s3.go
233
backend/s3/s3.go
@@ -53,7 +53,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers (AWS, Ceph, Dreamhost, IBM COS, Minio)",
|
||||
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: fs.ConfigProvider,
|
||||
@@ -61,6 +61,9 @@ func init() {
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "AWS",
|
||||
Help: "Amazon Web Services (AWS) S3",
|
||||
}, {
|
||||
Value: "Alibaba",
|
||||
Help: "Alibaba Cloud Object Storage System (OSS) formerly Aliyun",
|
||||
}, {
|
||||
Value: "Ceph",
|
||||
Help: "Ceph Object Storage",
|
||||
@@ -76,6 +79,9 @@ func init() {
|
||||
}, {
|
||||
Value: "Minio",
|
||||
Help: "Minio Object Storage",
|
||||
}, {
|
||||
Value: "Netease",
|
||||
Help: "Netease Object Storage (NOS)",
|
||||
}, {
|
||||
Value: "Wasabi",
|
||||
Help: "Wasabi Object Storage",
|
||||
@@ -150,7 +156,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS",
|
||||
Provider: "!AWS,Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
|
||||
@@ -269,10 +275,73 @@ func init() {
|
||||
Value: "s3.tor01.objectstorage.service.networklayer.com",
|
||||
Help: "Toronto Single Site Private Endpoint",
|
||||
}},
|
||||
}, {
|
||||
// oss endpoints: https://help.aliyun.com/document_detail/31837.html
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for OSS API.",
|
||||
Provider: "Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "oss-cn-hangzhou.aliyuncs.com",
|
||||
Help: "East China 1 (Hangzhou)",
|
||||
}, {
|
||||
Value: "oss-cn-shanghai.aliyuncs.com",
|
||||
Help: "East China 2 (Shanghai)",
|
||||
}, {
|
||||
Value: "oss-cn-qingdao.aliyuncs.com",
|
||||
Help: "North China 1 (Qingdao)",
|
||||
}, {
|
||||
Value: "oss-cn-beijing.aliyuncs.com",
|
||||
Help: "North China 2 (Beijing)",
|
||||
}, {
|
||||
Value: "oss-cn-zhangjiakou.aliyuncs.com",
|
||||
Help: "North China 3 (Zhangjiakou)",
|
||||
}, {
|
||||
Value: "oss-cn-huhehaote.aliyuncs.com",
|
||||
Help: "North China 5 (Huhehaote)",
|
||||
}, {
|
||||
Value: "oss-cn-shenzhen.aliyuncs.com",
|
||||
Help: "South China 1 (Shenzhen)",
|
||||
}, {
|
||||
Value: "oss-cn-hongkong.aliyuncs.com",
|
||||
Help: "Hong Kong (Hong Kong)",
|
||||
}, {
|
||||
Value: "oss-us-west-1.aliyuncs.com",
|
||||
Help: "US West 1 (Silicon Valley)",
|
||||
}, {
|
||||
Value: "oss-us-east-1.aliyuncs.com",
|
||||
Help: "US East 1 (Virginia)",
|
||||
}, {
|
||||
Value: "oss-ap-southeast-1.aliyuncs.com",
|
||||
Help: "Southeast Asia Southeast 1 (Singapore)",
|
||||
}, {
|
||||
Value: "oss-ap-southeast-2.aliyuncs.com",
|
||||
Help: "Asia Pacific Southeast 2 (Sydney)",
|
||||
}, {
|
||||
Value: "oss-ap-southeast-3.aliyuncs.com",
|
||||
Help: "Southeast Asia Southeast 3 (Kuala Lumpur)",
|
||||
}, {
|
||||
Value: "oss-ap-southeast-5.aliyuncs.com",
|
||||
Help: "Asia Pacific Southeast 5 (Jakarta)",
|
||||
}, {
|
||||
Value: "oss-ap-northeast-1.aliyuncs.com",
|
||||
Help: "Asia Pacific Northeast 1 (Japan)",
|
||||
}, {
|
||||
Value: "oss-ap-south-1.aliyuncs.com",
|
||||
Help: "Asia Pacific South 1 (Mumbai)",
|
||||
}, {
|
||||
Value: "oss-eu-central-1.aliyuncs.com",
|
||||
Help: "Central Europe 1 (Frankfurt)",
|
||||
}, {
|
||||
Value: "oss-eu-west-1.aliyuncs.com",
|
||||
Help: "West Europe (London)",
|
||||
}, {
|
||||
Value: "oss-me-east-1.aliyuncs.com",
|
||||
Help: "Middle East 1 (Dubai)",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS",
|
||||
Provider: "!AWS,IBMCOS,Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-west-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -449,11 +518,13 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,IBMCOS",
|
||||
Provider: "!AWS,IBMCOS,Alibaba",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
|
||||
This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
|
||||
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
|
||||
Note that this ACL is applied when server side copying objects as S3
|
||||
@@ -499,6 +570,28 @@ doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS",
|
||||
Provider: "IBMCOS",
|
||||
}},
|
||||
}, {
|
||||
Name: "bucket_acl",
|
||||
Help: `Canned ACL used when creating buckets.
|
||||
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
|
||||
Note that this ACL is applied when only when creating buckets. If it
|
||||
isn't set then "acl" is used instead.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "private",
|
||||
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
|
||||
}, {
|
||||
Value: "public-read",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
|
||||
}, {
|
||||
Value: "public-read-write",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
|
||||
}, {
|
||||
Value: "authenticated-read",
|
||||
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
|
||||
}},
|
||||
}, {
|
||||
Name: "server_side_encryption",
|
||||
Help: "The server-side encryption algorithm used when storing this object in S3.",
|
||||
@@ -547,6 +640,24 @@ doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
Value: "GLACIER",
|
||||
Help: "Glacier storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in OSS.",
|
||||
Provider: "Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Default",
|
||||
}, {
|
||||
Value: "STANDARD",
|
||||
Help: "Standard storage class",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Archive storage mode.",
|
||||
}, {
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Infrequent access storage mode.",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload
|
||||
@@ -640,6 +751,7 @@ type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
LocationConstraint string `config:"location_constraint"`
|
||||
ACL string `config:"acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
ServerSideEncryption string `config:"server_side_encryption"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
@@ -714,23 +826,31 @@ func (f *Fs) Features() *fs.Features {
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
// See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
|
||||
var retryErrorCodes = []int{
|
||||
409, // Conflict - various states that could be resolved on a retry
|
||||
// 409, // Conflict - various states that could be resolved on a retry
|
||||
503, // Service Unavailable/Slow Down - "Reduce your request rate"
|
||||
}
|
||||
|
||||
//S3 is pretty resilient, and the built in retry handling is probably sufficient
|
||||
// as it should notice closed connections and timeouts which are the most likely
|
||||
// sort of failure modes
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
|
||||
func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
// If this is an awserr object, try and extract more useful information to determine if we should retry
|
||||
if awsError, ok := err.(awserr.Error); ok {
|
||||
// Simple case, check the original embedded error in case it's generically retriable
|
||||
if fserrors.ShouldRetry(awsError.OrigErr()) {
|
||||
return true, err
|
||||
}
|
||||
//Failing that, if it's a RequestFailure it's probably got an http status code we can check
|
||||
// Failing that, if it's a RequestFailure it's probably got an http status code we can check
|
||||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// 301 if wrong region for bucket
|
||||
if reqErr.StatusCode() == http.StatusMovedPermanently {
|
||||
urfbErr := f.updateRegionForBucket()
|
||||
if urfbErr != nil {
|
||||
fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr)
|
||||
return false, err
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
for _, e := range retryErrorCodes {
|
||||
if reqErr.StatusCode() == e {
|
||||
return true, err
|
||||
@@ -738,7 +858,7 @@ func shouldRetry(err error) (bool, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
//Ok, not an awserr, check for generic failure conditions
|
||||
// Ok, not an awserr, check for generic failure conditions
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
@@ -815,13 +935,21 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
if opt.Region == "" {
|
||||
opt.Region = "us-east-1"
|
||||
}
|
||||
if opt.Provider == "Alibaba" || opt.Provider == "Netease" {
|
||||
opt.ForcePathStyle = false
|
||||
}
|
||||
awsConfig := aws.NewConfig().
|
||||
WithRegion(opt.Region).
|
||||
WithMaxRetries(maxRetries).
|
||||
WithCredentials(cred).
|
||||
WithEndpoint(opt.Endpoint).
|
||||
WithHTTPClient(fshttp.NewClient(fs.Config)).
|
||||
WithS3ForcePathStyle(opt.ForcePathStyle)
|
||||
if opt.Region != "" {
|
||||
awsConfig.WithRegion(opt.Region)
|
||||
}
|
||||
if opt.Endpoint != "" {
|
||||
awsConfig.WithEndpoint(opt.Endpoint)
|
||||
}
|
||||
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
awsSessionOpts := session.Options{
|
||||
Config: *awsConfig,
|
||||
@@ -904,6 +1032,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ACL == "" {
|
||||
opt.ACL = "private"
|
||||
}
|
||||
if opt.BucketACL == "" {
|
||||
opt.BucketACL = opt.ACL
|
||||
}
|
||||
c, ses, err := s3Connection(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -932,7 +1066,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.c.HeadObject(&req)
|
||||
return shouldRetry(err)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.root = path.Dir(directory)
|
||||
@@ -982,6 +1116,51 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// Gets the bucket location
|
||||
func (f *Fs) getBucketLocation() (string, error) {
|
||||
req := s3.GetBucketLocationInput{
|
||||
Bucket: &f.bucket,
|
||||
}
|
||||
var resp *s3.GetBucketLocationOutput
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.c.GetBucketLocation(&req)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return s3.NormalizeBucketLocation(aws.StringValue(resp.LocationConstraint)), nil
|
||||
}
|
||||
|
||||
// Updates the region for the bucket by reading the region from the
|
||||
// bucket then updating the session.
|
||||
func (f *Fs) updateRegionForBucket() error {
|
||||
region, err := f.getBucketLocation()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "reading bucket location failed")
|
||||
}
|
||||
if aws.StringValue(f.c.Config.Endpoint) != "" {
|
||||
return errors.Errorf("can't set region to %q as endpoint is set", region)
|
||||
}
|
||||
if aws.StringValue(f.c.Config.Region) == region {
|
||||
return errors.Errorf("region is already %q - not updating", region)
|
||||
}
|
||||
|
||||
// Make a new session with the new region
|
||||
oldRegion := f.opt.Region
|
||||
f.opt.Region = region
|
||||
c, ses, err := s3Connection(&f.opt)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "creating new session failed")
|
||||
}
|
||||
f.c = c
|
||||
f.ses = ses
|
||||
|
||||
fs.Logf(f, "Switched region to %q from %q", region, oldRegion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object.
|
||||
type listFn func(remote string, object *s3.Object, isDirectory bool) error
|
||||
|
||||
@@ -1014,7 +1193,7 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) error {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.c.ListObjects(&req)
|
||||
return shouldRetry(err)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
||||
@@ -1143,7 +1322,7 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
var resp *s3.ListBucketsOutput
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.c.ListBuckets(&req)
|
||||
return shouldRetry(err)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1231,7 +1410,7 @@ func (f *Fs) dirExists() (bool, error) {
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.HeadBucket(&req)
|
||||
return shouldRetry(err)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
@@ -1262,7 +1441,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
}
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &f.bucket,
|
||||
ACL: &f.opt.ACL,
|
||||
ACL: &f.opt.BucketACL,
|
||||
}
|
||||
if f.opt.LocationConstraint != "" {
|
||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||
@@ -1271,7 +1450,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.CreateBucket(&req)
|
||||
return shouldRetry(err)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err, ok := err.(awserr.Error); ok {
|
||||
if err.Code() == "BucketAlreadyOwnedByYou" {
|
||||
@@ -1281,6 +1460,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
if err == nil {
|
||||
f.bucketOK = true
|
||||
f.bucketDeleted = false
|
||||
fs.Infof(f, "Bucket created with ACL %q", *req.ACL)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -1299,11 +1479,12 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.DeleteBucket(&req)
|
||||
return shouldRetry(err)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.bucketOK = false
|
||||
f.bucketDeleted = true
|
||||
fs.Infof(f, "Bucket deleted")
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -1359,7 +1540,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.c.CopyObject(&req)
|
||||
return shouldRetry(err)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1441,7 +1622,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
resp, err = o.fs.c.HeadObject(&req)
|
||||
return shouldRetry(err)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
||||
@@ -1537,7 +1718,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := o.fs.c.CopyObject(&req)
|
||||
return shouldRetry(err)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
@@ -1569,7 +1750,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
resp, err = o.fs.c.GetObject(&req)
|
||||
return shouldRetry(err)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err, ok := err.(awserr.RequestFailure); ok {
|
||||
if err.Code() == "InvalidObjectState" {
|
||||
@@ -1660,7 +1841,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = uploader.Upload(&req)
|
||||
return shouldRetry(err)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1716,11 +1897,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Do(httpReq)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
return o.fs.shouldRetry(err)
|
||||
}
|
||||
body, err := rest.ReadBody(resp)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
return o.fs.shouldRetry(err)
|
||||
}
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 299 {
|
||||
return false, nil
|
||||
@@ -1748,7 +1929,7 @@ func (o *Object) Remove() error {
|
||||
}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := o.fs.c.DeleteObject(&req)
|
||||
return shouldRetry(err)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -66,7 +66,22 @@ func init() {
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "key_file",
|
||||
Help: "Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.",
|
||||
Help: "Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.",
|
||||
}, {
|
||||
Name: "key_file_pass",
|
||||
Help: `The passphrase to decrypt the PEM-encoded private key file.
|
||||
|
||||
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
||||
in the new OpenSSH format can't be used.`,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "key_use_agent",
|
||||
Help: `When set forces the usage of the ssh-agent.
|
||||
|
||||
When key-file is also set, the ".pub" file of the specified key-file is read and only the associated key is
|
||||
requested from the ssh-agent. This allows to avoid ` + "`Too many authentication failures for *username*`" + ` errors
|
||||
when the ssh-agent contains many keys.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "use_insecure_cipher",
|
||||
Help: "Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.",
|
||||
@@ -122,6 +137,8 @@ type Options struct {
|
||||
Port string `config:"port"`
|
||||
Pass string `config:"pass"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
@@ -298,6 +315,18 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
||||
f.poolMu.Unlock()
|
||||
}
|
||||
|
||||
// shellExpand replaces a leading "~" with "${HOME}" and expands all environment
|
||||
// variables afterwards.
|
||||
func shellExpand(s string) string {
|
||||
if s != "" {
|
||||
if s[0] == '~' {
|
||||
s = "${HOME}" + s[1:]
|
||||
}
|
||||
s = os.ExpandEnv(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
@@ -325,8 +354,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc")
|
||||
}
|
||||
|
||||
keyFile := shellExpand(opt.KeyFile)
|
||||
// Add ssh agent-auth if no password or file specified
|
||||
if opt.Pass == "" && opt.KeyFile == "" {
|
||||
if (opt.Pass == "" && keyFile == "") || opt.KeyUseAgent {
|
||||
sshAgentClient, _, err := sshagent.New()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't connect to ssh-agent")
|
||||
@@ -335,16 +365,46 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't read ssh agent signers")
|
||||
}
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signers...))
|
||||
if keyFile != "" {
|
||||
pubBytes, err := ioutil.ReadFile(keyFile + ".pub")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read public key file")
|
||||
}
|
||||
pub, _, _, _, err := ssh.ParseAuthorizedKey(pubBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse public key file")
|
||||
}
|
||||
pubM := pub.Marshal()
|
||||
found := false
|
||||
for _, s := range signers {
|
||||
if bytes.Equal(pubM, s.PublicKey().Marshal()) {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(s))
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, errors.New("private key not found in the ssh-agent")
|
||||
}
|
||||
} else {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signers...))
|
||||
}
|
||||
}
|
||||
|
||||
// Load key file if specified
|
||||
if opt.KeyFile != "" {
|
||||
key, err := ioutil.ReadFile(opt.KeyFile)
|
||||
if keyFile != "" {
|
||||
key, err := ioutil.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read private key file")
|
||||
}
|
||||
signer, err := ssh.ParsePrivateKey(key)
|
||||
clearpass := ""
|
||||
if opt.KeyFilePass != "" {
|
||||
clearpass, err = obscure.Reveal(opt.KeyFilePass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
signer, err := ssh.ParsePrivateKeyWithPassphrase(key, []byte(clearpass))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse private key file")
|
||||
}
|
||||
@@ -505,9 +565,13 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
// If file is a symlink (not a regular file is the best cross platform test we can do), do a stat to
|
||||
// pick up the size and type of the destination, instead of the size and type of the symlink.
|
||||
if !info.Mode().IsRegular() {
|
||||
oldInfo := info
|
||||
info, err = f.stat(remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "stat of non-regular file/dir failed")
|
||||
if !os.IsNotExist(err) {
|
||||
fs.Errorf(remote, "stat of non-regular file/dir failed: %v", err)
|
||||
}
|
||||
info = oldInfo
|
||||
}
|
||||
}
|
||||
if info.IsDir() {
|
||||
|
||||
@@ -130,6 +130,15 @@ func init() {
|
||||
}, {
|
||||
Name: "auth_token",
|
||||
Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)",
|
||||
}, {
|
||||
Name: "application_credential_id",
|
||||
Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)",
|
||||
}, {
|
||||
Name: "application_credential_name",
|
||||
Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)",
|
||||
}, {
|
||||
Name: "application_credential_secret",
|
||||
Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)",
|
||||
}, {
|
||||
Name: "auth_version",
|
||||
Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)",
|
||||
@@ -173,23 +182,26 @@ provider.`,
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
User string `config:"user"`
|
||||
Key string `config:"key"`
|
||||
Auth string `config:"auth"`
|
||||
UserID string `config:"user_id"`
|
||||
Domain string `config:"domain"`
|
||||
Tenant string `config:"tenant"`
|
||||
TenantID string `config:"tenant_id"`
|
||||
TenantDomain string `config:"tenant_domain"`
|
||||
Region string `config:"region"`
|
||||
StorageURL string `config:"storage_url"`
|
||||
AuthToken string `config:"auth_token"`
|
||||
AuthVersion int `config:"auth_version"`
|
||||
StoragePolicy string `config:"storage_policy"`
|
||||
EndpointType string `config:"endpoint_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
NoChunk bool `config:"no_chunk"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
User string `config:"user"`
|
||||
Key string `config:"key"`
|
||||
Auth string `config:"auth"`
|
||||
UserID string `config:"user_id"`
|
||||
Domain string `config:"domain"`
|
||||
Tenant string `config:"tenant"`
|
||||
TenantID string `config:"tenant_id"`
|
||||
TenantDomain string `config:"tenant_domain"`
|
||||
Region string `config:"region"`
|
||||
StorageURL string `config:"storage_url"`
|
||||
AuthToken string `config:"auth_token"`
|
||||
AuthVersion int `config:"auth_version"`
|
||||
ApplicationCredentialId string `config:"application_credential_id"`
|
||||
ApplicationCredentialName string `config:"application_credential_name"`
|
||||
ApplicationCredentialSecret string `config:"application_credential_secret"`
|
||||
StoragePolicy string `config:"storage_policy"`
|
||||
EndpointType string `config:"endpoint_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
NoChunk bool `config:"no_chunk"`
|
||||
}
|
||||
|
||||
// Fs represents a remote swift server
|
||||
@@ -293,22 +305,25 @@ func parsePath(path string) (container, directory string, err error) {
|
||||
func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
||||
c := &swift.Connection{
|
||||
// Keep these in the same order as the Config for ease of checking
|
||||
UserName: opt.User,
|
||||
ApiKey: opt.Key,
|
||||
AuthUrl: opt.Auth,
|
||||
UserId: opt.UserID,
|
||||
Domain: opt.Domain,
|
||||
Tenant: opt.Tenant,
|
||||
TenantId: opt.TenantID,
|
||||
TenantDomain: opt.TenantDomain,
|
||||
Region: opt.Region,
|
||||
StorageUrl: opt.StorageURL,
|
||||
AuthToken: opt.AuthToken,
|
||||
AuthVersion: opt.AuthVersion,
|
||||
EndpointType: swift.EndpointType(opt.EndpointType),
|
||||
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(fs.Config),
|
||||
UserName: opt.User,
|
||||
ApiKey: opt.Key,
|
||||
AuthUrl: opt.Auth,
|
||||
UserId: opt.UserID,
|
||||
Domain: opt.Domain,
|
||||
Tenant: opt.Tenant,
|
||||
TenantId: opt.TenantID,
|
||||
TenantDomain: opt.TenantDomain,
|
||||
Region: opt.Region,
|
||||
StorageUrl: opt.StorageURL,
|
||||
AuthToken: opt.AuthToken,
|
||||
AuthVersion: opt.AuthVersion,
|
||||
ApplicationCredentialId: opt.ApplicationCredentialId,
|
||||
ApplicationCredentialName: opt.ApplicationCredentialName,
|
||||
ApplicationCredentialSecret: opt.ApplicationCredentialSecret,
|
||||
EndpointType: swift.EndpointType(opt.EndpointType),
|
||||
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(fs.Config),
|
||||
}
|
||||
if opt.EnvAuth {
|
||||
err := c.ApplyEnvironment()
|
||||
@@ -318,11 +333,13 @@ func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
||||
}
|
||||
StorageUrl, AuthToken := c.StorageUrl, c.AuthToken // nolint
|
||||
if !c.Authenticated() {
|
||||
if c.UserName == "" && c.UserId == "" {
|
||||
return nil, errors.New("user name or user id not found for authentication (and no storage_url+auth_token is provided)")
|
||||
}
|
||||
if c.ApiKey == "" {
|
||||
return nil, errors.New("key not found")
|
||||
if (c.ApplicationCredentialId != "" || c.ApplicationCredentialName != "") && c.ApplicationCredentialSecret == "" {
|
||||
if c.UserName == "" && c.UserId == "" {
|
||||
return nil, errors.New("user name or user id not found for authentication (and no storage_url+auth_token is provided)")
|
||||
}
|
||||
if c.ApiKey == "" {
|
||||
return nil, errors.New("key not found")
|
||||
}
|
||||
}
|
||||
if c.AuthUrl == "" {
|
||||
return nil, errors.New("auth not found")
|
||||
|
||||
@@ -376,6 +376,11 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}).Fill(f)
|
||||
features = features.Mask(f.wr) // mask the features just on the writable fs
|
||||
|
||||
// Really need the union of all remotes for these, so
|
||||
// re-instate and calculate separately.
|
||||
features.ChangeNotify = f.ChangeNotify
|
||||
features.DirCacheFlush = f.DirCacheFlush
|
||||
|
||||
// FIXME maybe should be masking the bools here?
|
||||
|
||||
// Clear ChangeNotify and DirCacheFlush if all are nil
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -65,11 +66,12 @@ type Response struct {
|
||||
// Note that status collects all the status values for which we just
|
||||
// check the first is OK.
|
||||
type Prop struct {
|
||||
Status []string `xml:"DAV: status"`
|
||||
Name string `xml:"DAV: prop>displayname,omitempty"`
|
||||
Type *xml.Name `xml:"DAV: prop>resourcetype>collection,omitempty"`
|
||||
Size int64 `xml:"DAV: prop>getcontentlength,omitempty"`
|
||||
Modified Time `xml:"DAV: prop>getlastmodified,omitempty"`
|
||||
Status []string `xml:"DAV: status"`
|
||||
Name string `xml:"DAV: prop>displayname,omitempty"`
|
||||
Type *xml.Name `xml:"DAV: prop>resourcetype>collection,omitempty"`
|
||||
Size int64 `xml:"DAV: prop>getcontentlength,omitempty"`
|
||||
Modified Time `xml:"DAV: prop>getlastmodified,omitempty"`
|
||||
Checksums []string `xml:"prop>checksums>checksum,omitempty"`
|
||||
}
|
||||
|
||||
// Parse a status of the form "HTTP/1.1 200 OK" or "HTTP/1.1 200"
|
||||
@@ -95,6 +97,26 @@ func (p *Prop) StatusOK() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Hashes returns a map of all checksums - may be nil
|
||||
func (p *Prop) Hashes() (hashes map[hash.Type]string) {
|
||||
if len(p.Checksums) == 0 {
|
||||
return nil
|
||||
}
|
||||
hashes = make(map[hash.Type]string)
|
||||
for _, checksums := range p.Checksums {
|
||||
checksums = strings.ToLower(checksums)
|
||||
for _, checksum := range strings.Split(checksums, " ") {
|
||||
switch {
|
||||
case strings.HasPrefix(checksum, "sha1:"):
|
||||
hashes[hash.SHA1] = checksum[5:]
|
||||
case strings.HasPrefix(checksum, "md5:"):
|
||||
hashes[hash.MD5] = checksum[4:]
|
||||
}
|
||||
}
|
||||
}
|
||||
return hashes
|
||||
}
|
||||
|
||||
// PropValue is a tagged name and value
|
||||
type PropValue struct {
|
||||
XMLName xml.Name `xml:""`
|
||||
@@ -187,3 +209,22 @@ func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Quota is used to read the bytes used and available
|
||||
//
|
||||
// <d:multistatus xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns" xmlns:oc="http://owncloud.org/ns" xmlns:nc="http://nextcloud.org/ns">
|
||||
// <d:response>
|
||||
// <d:href>/remote.php/webdav/</d:href>
|
||||
// <d:propstat>
|
||||
// <d:prop>
|
||||
// <d:quota-available-bytes>-3</d:quota-available-bytes>
|
||||
// <d:quota-used-bytes>376461895</d:quota-used-bytes>
|
||||
// </d:prop>
|
||||
// <d:status>HTTP/1.1 200 OK</d:status>
|
||||
// </d:propstat>
|
||||
// </d:response>
|
||||
// </d:multistatus>
|
||||
type Quota struct {
|
||||
Available int64 `xml:"DAV: response>propstat>prop>quota-available-bytes"`
|
||||
Used int64 `xml:"DAV: response>propstat>prop>quota-used-bytes"`
|
||||
}
|
||||
|
||||
@@ -2,23 +2,13 @@
|
||||
// object storage system.
|
||||
package webdav
|
||||
|
||||
// Owncloud: Getting Oc-Checksum:
|
||||
// SHA1:f572d396fae9206628714fb2ce00f72e94f2258f on HEAD but not on
|
||||
// nextcloud?
|
||||
|
||||
// docs for file webdav
|
||||
// https://docs.nextcloud.com/server/12/developer_manual/client_apis/WebDAV/index.html
|
||||
|
||||
// indicates checksums can be set as metadata here
|
||||
// https://github.com/nextcloud/server/issues/6129
|
||||
// owncloud seems to have checksums as metadata though - can read them
|
||||
|
||||
// SetModTime might be possible
|
||||
// https://stackoverflow.com/questions/3579608/webdav-can-a-client-modify-the-mtime-of-a-file
|
||||
// ...support for a PROPSET to lastmodified (mind the missing get) which does the utime() call might be an option.
|
||||
// For example the ownCloud WebDAV server does it that way.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -116,6 +106,7 @@ type Fs struct {
|
||||
canStream bool // set if can stream
|
||||
useOCMtime bool // set if can use X-OC-Mtime
|
||||
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
|
||||
hasChecksums bool // set if can use owncloud style checksums
|
||||
}
|
||||
|
||||
// Object describes a webdav object
|
||||
@@ -127,7 +118,8 @@ type Object struct {
|
||||
hasMetaData bool // whether info below has been set
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
sha1 string // SHA-1 of the object content
|
||||
sha1 string // SHA-1 of the object content if known
|
||||
md5 string // MD5 of the object content if known
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -194,6 +186,9 @@ func (f *Fs) readMetaDataForPath(path string, depth string) (info *api.Prop, err
|
||||
},
|
||||
NoRedirect: true,
|
||||
}
|
||||
if f.hasChecksums {
|
||||
opts.Body = bytes.NewBuffer(owncloudProps)
|
||||
}
|
||||
var result api.Multistatus
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -357,9 +352,11 @@ func (f *Fs) setQuirks(vendor string) error {
|
||||
f.canStream = true
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.hasChecksums = true
|
||||
case "nextcloud":
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.hasChecksums = true
|
||||
case "sharepoint":
|
||||
// To mount sharepoint, two Cookies are required
|
||||
// They have to be set instead of BasicAuth
|
||||
@@ -426,6 +423,22 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// Read the normal props, plus the checksums
|
||||
//
|
||||
// <oc:checksums><oc:checksum>SHA1:f572d396fae9206628714fb2ce00f72e94f2258f MD5:b1946ac92492d2347c6235b4d2611184 ADLER32:084b021f</oc:checksum></oc:checksums>
|
||||
var owncloudProps = []byte(`<?xml version="1.0"?>
|
||||
<d:propfind xmlns:d="DAV:" xmlns:oc="http://owncloud.org/ns" xmlns:nc="http://nextcloud.org/ns">
|
||||
<d:prop>
|
||||
<d:displayname />
|
||||
<d:getlastmodified />
|
||||
<d:getcontentlength />
|
||||
<d:resourcetype />
|
||||
<d:getcontenttype />
|
||||
<oc:checksums />
|
||||
</d:prop>
|
||||
</d:propfind>
|
||||
`)
|
||||
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// If directories is set it only sends directories
|
||||
@@ -445,6 +458,9 @@ func (f *Fs) listAll(dir string, directoriesOnly bool, filesOnly bool, depth str
|
||||
"Depth": depth,
|
||||
},
|
||||
}
|
||||
if f.hasChecksums {
|
||||
opts.Body = bytes.NewBuffer(owncloudProps)
|
||||
}
|
||||
var result api.Multistatus
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -847,9 +863,52 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
if f.hasChecksums {
|
||||
return hash.NewHashSet(hash.MD5, hash.SHA1)
|
||||
}
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PROPFIND",
|
||||
Path: "",
|
||||
ExtraHeaders: map[string]string{
|
||||
"Depth": "0",
|
||||
},
|
||||
}
|
||||
opts.Body = bytes.NewBuffer([]byte(`<?xml version="1.0" ?>
|
||||
<D:propfind xmlns:D="DAV:">
|
||||
<D:prop>
|
||||
<D:quota-available-bytes/>
|
||||
<D:quota-used-bytes/>
|
||||
</D:prop>
|
||||
</D:propfind>
|
||||
`))
|
||||
var q = api.Quota{
|
||||
Available: -1,
|
||||
Used: -1,
|
||||
}
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &q)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "about call failed")
|
||||
}
|
||||
usage := &fs.Usage{}
|
||||
if q.Available >= 0 && q.Used >= 0 {
|
||||
usage.Total = fs.NewUsageValue(q.Available + q.Used)
|
||||
}
|
||||
if q.Used >= 0 {
|
||||
usage.Used = fs.NewUsageValue(q.Used)
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -870,12 +929,17 @@ func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
// Hash returns the SHA1 or MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
if t != hash.SHA1 {
|
||||
return "", hash.ErrUnsupported
|
||||
if o.fs.hasChecksums {
|
||||
switch t {
|
||||
case hash.SHA1:
|
||||
return o.sha1, nil
|
||||
case hash.MD5:
|
||||
return o.md5, nil
|
||||
}
|
||||
}
|
||||
return o.sha1, nil
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
@@ -893,6 +957,11 @@ func (o *Object) setMetaData(info *api.Prop) (err error) {
|
||||
o.hasMetaData = true
|
||||
o.size = info.Size
|
||||
o.modTime = time.Time(info.Modified)
|
||||
if o.fs.hasChecksums {
|
||||
hashes := info.Hashes()
|
||||
o.sha1 = hashes[hash.SHA1]
|
||||
o.md5 = hashes[hash.MD5]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -972,9 +1041,21 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
||||
ContentType: fs.MimeType(src),
|
||||
}
|
||||
if o.fs.useOCMtime {
|
||||
opts.ExtraHeaders = map[string]string{
|
||||
"X-OC-Mtime": fmt.Sprintf("%f", float64(src.ModTime().UnixNano())/1E9),
|
||||
if o.fs.useOCMtime || o.fs.hasChecksums {
|
||||
opts.ExtraHeaders = map[string]string{}
|
||||
if o.fs.useOCMtime {
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime().UnixNano())/1E9)
|
||||
}
|
||||
if o.fs.hasChecksums {
|
||||
// Set an upload checksum - prefer SHA1
|
||||
//
|
||||
// This is used as an upload integrity test. If we set
|
||||
// only SHA1 here, owncloud will calculate the MD5 too.
|
||||
if sha1, _ := src.Hash(hash.SHA1); sha1 != "" {
|
||||
opts.ExtraHeaders["OC-Checksum"] = "SHA1:" + sha1
|
||||
} else if md5, _ := src.Hash(hash.MD5); md5 != "" {
|
||||
opts.ExtraHeaders["OC-Checksum"] = "MD5:" + md5
|
||||
}
|
||||
}
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
@@ -1018,5 +1099,6 @@ var (
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
|
||||
5
bin/build-xgo-cgofuse.sh
Executable file
5
bin/build-xgo-cgofuse.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
docker build -t rclone/xgo-cgofuse https://github.com/billziss-gh/cgofuse.git
|
||||
docker images
|
||||
docker push rclone/xgo-cgofuse
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
version="$1"
|
||||
if [ "$version" = "" ]; then
|
||||
echo "Syntax: $0 <version> [delete]"
|
||||
echo "Syntax: $0 <version, eg v1.42> [delete]"
|
||||
exit 1
|
||||
fi
|
||||
dry_run="--dry-run"
|
||||
@@ -14,4 +14,4 @@ else
|
||||
echo "Use '$0 $version delete' to actually delete files"
|
||||
fi
|
||||
|
||||
rclone ${dry_run} --fast-list -P --checkers 16 --transfers 16 delete --include "**/${version}**" memstore:beta-rclone-org
|
||||
rclone ${dry_run} -P --fast-list --checkers 16 --transfers 16 delete --include "**${version}**" memstore:beta-rclone-org
|
||||
|
||||
@@ -93,6 +93,15 @@ For example to make a swift remote of name myremote using auto config
|
||||
you would do:
|
||||
|
||||
rclone config create myremote swift env_auth true
|
||||
|
||||
Note that if the config process would normally ask a question the
|
||||
default is taken. Each time that happens rclone will print a message
|
||||
saying how to affect the value taken.
|
||||
|
||||
So for example if you wanted to configure a Google Drive remote but
|
||||
using remote authorization you would do this:
|
||||
|
||||
rclone config create mydrive drive config_is_local false
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 256, command, args)
|
||||
@@ -119,6 +128,11 @@ in pairs of <key> <value>.
|
||||
For example to update the env_auth field of a remote of name myremote you would do:
|
||||
|
||||
rclone config update myremote swift env_auth true
|
||||
|
||||
If the remote uses oauth the token will be updated, if you don't
|
||||
require this add an extra parameter thus:
|
||||
|
||||
rclone config update myremote swift env_auth true config_refresh_token false
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(3, 256, command, args)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -62,6 +63,28 @@ func checkMountEmpty(mountpoint string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check the root doesn't overlap the mountpoint
|
||||
func checkMountpointOverlap(root, mountpoint string) error {
|
||||
abs := func(x string) string {
|
||||
if absX, err := filepath.EvalSymlinks(x); err == nil {
|
||||
x = absX
|
||||
}
|
||||
if absX, err := filepath.Abs(x); err == nil {
|
||||
x = absX
|
||||
}
|
||||
x = filepath.ToSlash(x)
|
||||
if !strings.HasSuffix(x, "/") {
|
||||
x += "/"
|
||||
}
|
||||
return x
|
||||
}
|
||||
rootAbs, mountpointAbs := abs(root), abs(mountpoint)
|
||||
if strings.HasPrefix(rootAbs, mountpointAbs) || strings.HasPrefix(mountpointAbs, rootAbs) {
|
||||
return errors.Errorf("mount point %q and directory to be mounted %q mustn't overlap", mountpoint, root)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewMountCommand makes a mount command with the given name and Mount function
|
||||
func NewMountCommand(commandName string, Mount func(f fs.Fs, mountpoint string) error) *cobra.Command {
|
||||
var commandDefintion = &cobra.Command{
|
||||
@@ -220,7 +243,14 @@ be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
config.PassConfigKeyForDaemonization = true
|
||||
}
|
||||
|
||||
mountpoint := args[1]
|
||||
fdst := cmd.NewFsDir(args)
|
||||
if fdst.Name() == "" || fdst.Name() == "local" {
|
||||
err := checkMountpointOverlap(fdst.Root(), mountpoint)
|
||||
if err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Show stats if the user has specifically requested them
|
||||
if cmd.ShowStats() {
|
||||
@@ -230,7 +260,7 @@ be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
// Skip checkMountEmpty if --allow-non-empty flag is used or if
|
||||
// the Operating System is Windows
|
||||
if !AllowNonEmpty && runtime.GOOS != "windows" {
|
||||
err := checkMountEmpty(args[1])
|
||||
err := checkMountEmpty(mountpoint)
|
||||
if err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
@@ -253,7 +283,7 @@ be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
}
|
||||
}
|
||||
|
||||
err := Mount(fdst, args[1])
|
||||
err := Mount(fdst, mountpoint)
|
||||
if err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
@@ -296,7 +326,11 @@ func ClipBlocks(b *uint64) {
|
||||
var max uint64
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
max = (1 << 43) - 1
|
||||
if runtime.GOARCH == "386" {
|
||||
max = (1 << 32) - 1
|
||||
} else {
|
||||
max = (1 << 43) - 1
|
||||
}
|
||||
case "darwin":
|
||||
// OSX FUSE only supports 32 bit number of blocks
|
||||
// https://github.com/osxfuse/osxfuse/issues/396
|
||||
|
||||
@@ -17,7 +17,7 @@ var commandDefintion = &cobra.Command{
|
||||
Use: "rcd <path to files to serve>*",
|
||||
Short: `Run rclone listening to remote control commands only.`,
|
||||
Long: `
|
||||
This runs rclone so that it only listents to remote control commands.
|
||||
This runs rclone so that it only listens to remote control commands.
|
||||
|
||||
This is useful if you are controlling rclone via the rc API.
|
||||
|
||||
|
||||
451
cmd/serve/dlna/cd-service-desc.go
Normal file
451
cmd/serve/dlna/cd-service-desc.go
Normal file
@@ -0,0 +1,451 @@
|
||||
package dlna
|
||||
|
||||
const contentDirectoryServiceDescription = `<?xml version="1.0"?>
|
||||
<scpd xmlns="urn:schemas-upnp-org:service-1-0">
|
||||
<specVersion>
|
||||
<major>1</major>
|
||||
<minor>0</minor>
|
||||
</specVersion>
|
||||
<actionList>
|
||||
<action>
|
||||
<name>GetSearchCapabilities</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>SearchCaps</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>SearchCapabilities</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>GetSortCapabilities</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>SortCaps</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>SortCapabilities</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>GetSortExtensionCapabilities</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>SortExtensionCaps</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>SortExtensionCapabilities</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>GetFeatureList</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>FeatureList</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>FeatureList</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>GetSystemUpdateID</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>Id</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>SystemUpdateID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>Browse</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ObjectID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>BrowseFlag</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_BrowseFlag</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>Filter</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Filter</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>StartingIndex</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Index</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>RequestedCount</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Count</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>SortCriteria</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_SortCriteria</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>Result</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Result</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>NumberReturned</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Count</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>TotalMatches</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Count</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>UpdateID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_UpdateID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>Search</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ContainerID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>SearchCriteria</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_SearchCriteria</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>Filter</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Filter</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>StartingIndex</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Index</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>RequestedCount</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Count</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>SortCriteria</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_SortCriteria</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>Result</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Result</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>NumberReturned</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Count</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>TotalMatches</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Count</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>UpdateID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_UpdateID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>CreateObject</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ContainerID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>Elements</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Result</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>ObjectID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>Result</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Result</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>DestroyObject</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ObjectID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>UpdateObject</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ObjectID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>CurrentTagValue</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_TagValueList</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>NewTagValue</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_TagValueList</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>MoveObject</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ObjectID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>NewParentID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>NewObjectID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>ImportResource</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>SourceURI</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_URI</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>DestinationURI</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_URI</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>TransferID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_TransferID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>ExportResource</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>SourceURI</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_URI</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>DestinationURI</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_URI</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>TransferID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_TransferID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>StopTransferResource</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>TransferID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_TransferID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>DeleteResource</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ResourceURI</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_URI</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>GetTransferProgress</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>TransferID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_TransferID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>TransferStatus</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_TransferStatus</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>TransferLength</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_TransferLength</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>TransferTotal</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_TransferTotal</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>CreateReference</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ContainerID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>ObjectID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>NewID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
</actionList>
|
||||
<serviceStateTable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>SearchCapabilities</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>SortCapabilities</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>SortExtensionCapabilities</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="yes">
|
||||
<name>SystemUpdateID</name>
|
||||
<dataType>ui4</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="yes">
|
||||
<name>ContainerUpdateIDs</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="yes">
|
||||
<name>TransferIDs</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>FeatureList</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_ObjectID</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_Result</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_SearchCriteria</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_BrowseFlag</name>
|
||||
<dataType>string</dataType>
|
||||
<allowedValueList>
|
||||
<allowedValue>BrowseMetadata</allowedValue>
|
||||
<allowedValue>BrowseDirectChildren</allowedValue>
|
||||
</allowedValueList>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_Filter</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_SortCriteria</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_Index</name>
|
||||
<dataType>ui4</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_Count</name>
|
||||
<dataType>ui4</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_UpdateID</name>
|
||||
<dataType>ui4</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_TransferID</name>
|
||||
<dataType>ui4</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_TransferStatus</name>
|
||||
<dataType>string</dataType>
|
||||
<allowedValueList>
|
||||
<allowedValue>COMPLETED</allowedValue>
|
||||
<allowedValue>ERROR</allowedValue>
|
||||
<allowedValue>IN_PROGRESS</allowedValue>
|
||||
<allowedValue>STOPPED</allowedValue>
|
||||
</allowedValueList>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_TransferLength</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_TransferTotal</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_TagValueList</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_URI</name>
|
||||
<dataType>uri</dataType>
|
||||
</stateVariable>
|
||||
</serviceStateTable>
|
||||
</scpd>`
|
||||
240
cmd/serve/dlna/cds.go
Normal file
240
cmd/serve/dlna/cds.go
Normal file
@@ -0,0 +1,240 @@
|
||||
package dlna
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"github.com/anacrolix/dms/dlna"
|
||||
"github.com/anacrolix/dms/upnp"
|
||||
"github.com/anacrolix/dms/upnpav"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type contentDirectoryService struct {
|
||||
*server
|
||||
upnp.Eventing
|
||||
}
|
||||
|
||||
func (cds *contentDirectoryService) updateIDString() string {
|
||||
return fmt.Sprintf("%d", uint32(os.Getpid()))
|
||||
}
|
||||
|
||||
// Turns the given entry and DMS host into a UPnP object. A nil object is
|
||||
// returned if the entry is not of interest.
|
||||
func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fileInfo os.FileInfo, host string) (ret interface{}, err error) {
|
||||
obj := upnpav.Object{
|
||||
ID: cdsObject.ID(),
|
||||
Restricted: 1,
|
||||
ParentID: cdsObject.ParentID(),
|
||||
}
|
||||
|
||||
if fileInfo.IsDir() {
|
||||
obj.Class = "object.container.storageFolder"
|
||||
obj.Title = fileInfo.Name()
|
||||
ret = upnpav.Container{Object: obj}
|
||||
return
|
||||
}
|
||||
|
||||
if !fileInfo.Mode().IsRegular() {
|
||||
return
|
||||
}
|
||||
|
||||
// Hardcode "videoItem" so that files show up in VLC.
|
||||
obj.Class = "object.item.videoItem"
|
||||
obj.Title = fileInfo.Name()
|
||||
|
||||
item := upnpav.Item{
|
||||
Object: obj,
|
||||
Res: make([]upnpav.Resource, 0, 1),
|
||||
}
|
||||
|
||||
item.Res = append(item.Res, upnpav.Resource{
|
||||
URL: (&url.URL{
|
||||
Scheme: "http",
|
||||
Host: host,
|
||||
Path: resPath,
|
||||
RawQuery: url.Values{
|
||||
"path": {cdsObject.Path},
|
||||
}.Encode(),
|
||||
}).String(),
|
||||
// Hardcode "video/x-matroska" so that files show up in VLC.
|
||||
ProtocolInfo: fmt.Sprintf("http-get:*:video/x-matroska:%s", dlna.ContentFeatures{
|
||||
SupportRange: true,
|
||||
}.String()),
|
||||
Bitrate: 0,
|
||||
Duration: "",
|
||||
Size: uint64(fileInfo.Size()),
|
||||
Resolution: "",
|
||||
})
|
||||
|
||||
ret = item
|
||||
return
|
||||
}
|
||||
|
||||
// Returns all the upnpav objects in a directory.
|
||||
func (cds *contentDirectoryService) readContainer(o object, host string) (ret []interface{}, err error) {
|
||||
node, err := cds.vfs.Stat(o.Path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !node.IsDir() {
|
||||
err = errors.New("not a directory")
|
||||
return
|
||||
}
|
||||
|
||||
dir := node.(*vfs.Dir)
|
||||
dirEntries, err := dir.ReadDirAll()
|
||||
if err != nil {
|
||||
err = errors.New("failed to list directory")
|
||||
return
|
||||
}
|
||||
|
||||
sort.Sort(dirEntries)
|
||||
|
||||
for _, de := range dirEntries {
|
||||
child := object{
|
||||
path.Join(o.Path, de.Name()),
|
||||
}
|
||||
obj, err := cds.cdsObjectToUpnpavObject(child, de, host)
|
||||
if err != nil {
|
||||
log.Printf("error with %s: %s", child.FilePath(), err)
|
||||
continue
|
||||
}
|
||||
if obj != nil {
|
||||
ret = append(ret, obj)
|
||||
} else {
|
||||
log.Printf("bad %s", de)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type browse struct {
|
||||
ObjectID string
|
||||
BrowseFlag string
|
||||
Filter string
|
||||
StartingIndex int
|
||||
RequestedCount int
|
||||
}
|
||||
|
||||
// ContentDirectory object from ObjectID.
|
||||
func (cds *contentDirectoryService) objectFromID(id string) (o object, err error) {
|
||||
o.Path, err = url.QueryUnescape(id)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if o.Path == "0" {
|
||||
o.Path = "/"
|
||||
}
|
||||
o.Path = path.Clean(o.Path)
|
||||
if !path.IsAbs(o.Path) {
|
||||
err = fmt.Errorf("bad ObjectID %v", o.Path)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (cds *contentDirectoryService) Handle(action string, argsXML []byte, r *http.Request) (map[string]string, error) {
|
||||
host := r.Host
|
||||
|
||||
switch action {
|
||||
case "GetSystemUpdateID":
|
||||
return map[string]string{
|
||||
"Id": cds.updateIDString(),
|
||||
}, nil
|
||||
case "GetSortCapabilities":
|
||||
return map[string]string{
|
||||
"SortCaps": "dc:title",
|
||||
}, nil
|
||||
case "Browse":
|
||||
var browse browse
|
||||
if err := xml.Unmarshal([]byte(argsXML), &browse); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj, err := cds.objectFromID(browse.ObjectID)
|
||||
if err != nil {
|
||||
return nil, upnp.Errorf(upnpav.NoSuchObjectErrorCode, err.Error())
|
||||
}
|
||||
switch browse.BrowseFlag {
|
||||
case "BrowseDirectChildren":
|
||||
objs, err := cds.readContainer(obj, host)
|
||||
if err != nil {
|
||||
return nil, upnp.Errorf(upnpav.NoSuchObjectErrorCode, err.Error())
|
||||
}
|
||||
totalMatches := len(objs)
|
||||
objs = objs[func() (low int) {
|
||||
low = browse.StartingIndex
|
||||
if low > len(objs) {
|
||||
low = len(objs)
|
||||
}
|
||||
return
|
||||
}():]
|
||||
if browse.RequestedCount != 0 && int(browse.RequestedCount) < len(objs) {
|
||||
objs = objs[:browse.RequestedCount]
|
||||
}
|
||||
result, err := xml.Marshal(objs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]string{
|
||||
"TotalMatches": fmt.Sprint(totalMatches),
|
||||
"NumberReturned": fmt.Sprint(len(objs)),
|
||||
"Result": didlLite(string(result)),
|
||||
"UpdateID": cds.updateIDString(),
|
||||
}, nil
|
||||
default:
|
||||
return nil, upnp.Errorf(upnp.ArgumentValueInvalidErrorCode, "unhandled browse flag: %v", browse.BrowseFlag)
|
||||
}
|
||||
case "GetSearchCapabilities":
|
||||
return map[string]string{
|
||||
"SearchCaps": "",
|
||||
}, nil
|
||||
default:
|
||||
return nil, upnp.InvalidActionError
|
||||
}
|
||||
}
|
||||
|
||||
// Represents a ContentDirectory object.
|
||||
type object struct {
|
||||
Path string // The cleaned, absolute path for the object relative to the server.
|
||||
}
|
||||
|
||||
// Returns the actual local filesystem path for the object.
|
||||
func (o *object) FilePath() string {
|
||||
return filepath.FromSlash(o.Path)
|
||||
}
|
||||
|
||||
// Returns the ObjectID for the object. This is used in various ContentDirectory actions.
|
||||
func (o object) ID() string {
|
||||
if !path.IsAbs(o.Path) {
|
||||
log.Panicf("Relative object path: %s", o.Path)
|
||||
}
|
||||
if len(o.Path) == 1 {
|
||||
return "0"
|
||||
}
|
||||
return url.QueryEscape(o.Path)
|
||||
}
|
||||
|
||||
func (o *object) IsRoot() bool {
|
||||
return o.Path == "/"
|
||||
}
|
||||
|
||||
// Returns the object's parent ObjectID. Fortunately it can be deduced from the
|
||||
// ObjectID (for now).
|
||||
func (o object) ParentID() string {
|
||||
if o.IsRoot() {
|
||||
return "-1"
|
||||
}
|
||||
o.Path = path.Dir(o.Path)
|
||||
return o.ID()
|
||||
}
|
||||
440
cmd/serve/dlna/dlna.go
Normal file
440
cmd/serve/dlna/dlna.go
Normal file
@@ -0,0 +1,440 @@
|
||||
package dlna
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/anacrolix/dms/soap"
|
||||
"github.com/anacrolix/dms/ssdp"
|
||||
"github.com/anacrolix/dms/upnp"
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/cmd/serve/dlna/dlnaflags"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
dlnaflags.AddFlags(Command.Flags())
|
||||
vfsflags.AddFlags(Command.Flags())
|
||||
}
|
||||
|
||||
// Command definition for cobra.
|
||||
var Command = &cobra.Command{
|
||||
Use: "dlna remote:path",
|
||||
Short: `Serve remote:path over DLNA`,
|
||||
Long: `rclone serve dlna is a DLNA media server for media stored in a rclone remote. Many
|
||||
devices, such as the Xbox and PlayStation, can automatically discover this server in the LAN
|
||||
and play audio/video from it. VLC is also supported. Service discovery uses UDP multicast
|
||||
packets (SSDP) and will thus only work on LANs.
|
||||
|
||||
Rclone will list all files present in the remote, without filtering based on media formats or
|
||||
file extensions. Additionally, there is no media transcoding support. This means that some
|
||||
players might show files that they are not able to play back correctly.
|
||||
|
||||
` + dlnaflags.Help + vfs.Help,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
|
||||
cmd.Run(false, false, command, func() error {
|
||||
s := newServer(f, &dlnaflags.Opt)
|
||||
if err := s.Serve(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
s.Wait()
|
||||
return nil
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
const (
|
||||
serverField = "Linux/3.4 DLNADOC/1.50 UPnP/1.0 DMS/1.0"
|
||||
rootDeviceType = "urn:schemas-upnp-org:device:MediaServer:1"
|
||||
rootDeviceModelName = "rclone"
|
||||
resPath = "/res"
|
||||
rootDescPath = "/rootDesc.xml"
|
||||
serviceControlURL = "/ctl"
|
||||
)
|
||||
|
||||
// Groups the service definition with its XML description.
|
||||
type service struct {
|
||||
upnp.Service
|
||||
SCPD string
|
||||
}
|
||||
|
||||
// Exposed UPnP AV services.
|
||||
var services = []*service{
|
||||
{
|
||||
Service: upnp.Service{
|
||||
ServiceType: "urn:schemas-upnp-org:service:ContentDirectory:1",
|
||||
ServiceId: "urn:upnp-org:serviceId:ContentDirectory",
|
||||
ControlURL: serviceControlURL,
|
||||
},
|
||||
SCPD: contentDirectoryServiceDescription,
|
||||
},
|
||||
}
|
||||
|
||||
func devices() []string {
|
||||
return []string{
|
||||
"urn:schemas-upnp-org:device:MediaServer:1",
|
||||
}
|
||||
}
|
||||
|
||||
func serviceTypes() (ret []string) {
|
||||
for _, s := range services {
|
||||
ret = append(ret, s.ServiceType)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type server struct {
|
||||
// The service SOAP handler keyed by service URN.
|
||||
services map[string]UPnPService
|
||||
|
||||
Interfaces []net.Interface
|
||||
|
||||
HTTPConn net.Listener
|
||||
httpListenAddr string
|
||||
httpServeMux *http.ServeMux
|
||||
|
||||
rootDeviceUUID string
|
||||
rootDescXML []byte
|
||||
|
||||
FriendlyName string
|
||||
|
||||
// For waiting on the listener to close
|
||||
waitChan chan struct{}
|
||||
|
||||
// Time interval between SSPD announces
|
||||
AnnounceInterval time.Duration
|
||||
|
||||
f fs.Fs
|
||||
vfs *vfs.VFS
|
||||
}
|
||||
|
||||
func newServer(f fs.Fs, opt *dlnaflags.Options) *server {
|
||||
hostName, err := os.Hostname()
|
||||
if err != nil {
|
||||
hostName = ""
|
||||
} else {
|
||||
hostName = " (" + hostName + ")"
|
||||
}
|
||||
|
||||
s := &server{
|
||||
AnnounceInterval: 10 * time.Second,
|
||||
FriendlyName: "rclone" + hostName,
|
||||
|
||||
httpListenAddr: opt.ListenAddr,
|
||||
|
||||
f: f,
|
||||
vfs: vfs.New(f, &vfsflags.Opt),
|
||||
}
|
||||
|
||||
s.initServicesMap()
|
||||
s.listInterfaces()
|
||||
|
||||
s.httpServeMux = http.NewServeMux()
|
||||
s.rootDeviceUUID = makeDeviceUUID(s.FriendlyName)
|
||||
s.rootDescXML, err = xml.MarshalIndent(
|
||||
upnp.DeviceDesc{
|
||||
SpecVersion: upnp.SpecVersion{Major: 1, Minor: 0},
|
||||
Device: upnp.Device{
|
||||
DeviceType: rootDeviceType,
|
||||
FriendlyName: s.FriendlyName,
|
||||
Manufacturer: "rclone (rclone.org)",
|
||||
ModelName: rootDeviceModelName,
|
||||
UDN: s.rootDeviceUUID,
|
||||
ServiceList: func() (ss []upnp.Service) {
|
||||
for _, s := range services {
|
||||
ss = append(ss, s.Service)
|
||||
}
|
||||
return
|
||||
}(),
|
||||
},
|
||||
},
|
||||
" ", " ")
|
||||
if err != nil {
|
||||
// Contents are hardcoded, so this will never happen in production.
|
||||
log.Panicf("Marshal root descriptor XML: %v", err)
|
||||
}
|
||||
s.rootDescXML = append([]byte(`<?xml version="1.0"?>`), s.rootDescXML...)
|
||||
s.initMux(s.httpServeMux)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// UPnPService is the interface for the SOAP service.
|
||||
type UPnPService interface {
|
||||
Handle(action string, argsXML []byte, r *http.Request) (respArgs map[string]string, err error)
|
||||
Subscribe(callback []*url.URL, timeoutSeconds int) (sid string, actualTimeout int, err error)
|
||||
Unsubscribe(sid string) error
|
||||
}
|
||||
|
||||
// initServicesMap is called during initialization of the server to prepare some internal datastructures.
|
||||
func (s *server) initServicesMap() {
|
||||
urn, err := upnp.ParseServiceType(services[0].ServiceType)
|
||||
if err != nil {
|
||||
// The service type is hardcoded, so this error should never happen.
|
||||
log.Panicf("ParseServiceType: %v", err)
|
||||
}
|
||||
s.services = map[string]UPnPService{
|
||||
urn.Type: &contentDirectoryService{
|
||||
server: s,
|
||||
},
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// listInterfaces is called during initialization of the server to list the network interfaces
|
||||
// on the machine.
|
||||
func (s *server) listInterfaces() {
|
||||
ifs, err := net.Interfaces()
|
||||
if err != nil {
|
||||
fs.Errorf(s.f, "list network interfaces: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
var tmp []net.Interface
|
||||
for _, intf := range ifs {
|
||||
if intf.Flags&net.FlagUp == 0 || intf.MTU <= 0 {
|
||||
continue
|
||||
}
|
||||
s.Interfaces = append(s.Interfaces, intf)
|
||||
tmp = append(tmp, intf)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) initMux(mux *http.ServeMux) {
|
||||
mux.HandleFunc(resPath, func(w http.ResponseWriter, r *http.Request) {
|
||||
remotePath := r.URL.Query().Get("path")
|
||||
node, err := s.vfs.Stat(remotePath)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(node.Size(), 10))
|
||||
|
||||
file := node.(*vfs.File)
|
||||
in, err := file.Open(os.O_RDONLY)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
defer fs.CheckClose(in, &err)
|
||||
|
||||
http.ServeContent(w, r, remotePath, node.ModTime(), in)
|
||||
return
|
||||
})
|
||||
|
||||
mux.HandleFunc(rootDescPath, func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("content-type", `text/xml; charset="utf-8"`)
|
||||
w.Header().Set("content-length", fmt.Sprint(len(s.rootDescXML)))
|
||||
w.Header().Set("server", serverField)
|
||||
_, err := w.Write(s.rootDescXML)
|
||||
if err != nil {
|
||||
fs.Errorf(s, "Failed to serve root descriptor XML: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Install handlers to serve SCPD for each UPnP service.
|
||||
for _, s := range services {
|
||||
p := path.Join("/scpd", s.ServiceId)
|
||||
s.SCPDURL = p
|
||||
|
||||
mux.HandleFunc(s.SCPDURL, func(serviceDesc string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("content-type", `text/xml; charset="utf-8"`)
|
||||
http.ServeContent(w, r, ".xml", time.Time{}, bytes.NewReader([]byte(serviceDesc)))
|
||||
}
|
||||
}(s.SCPD))
|
||||
}
|
||||
|
||||
mux.HandleFunc(serviceControlURL, s.serviceControlHandler)
|
||||
}
|
||||
|
||||
// Handle a service control HTTP request.
|
||||
func (s *server) serviceControlHandler(w http.ResponseWriter, r *http.Request) {
|
||||
soapActionString := r.Header.Get("SOAPACTION")
|
||||
soapAction, err := upnp.ParseActionHTTPHeader(soapActionString)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
var env soap.Envelope
|
||||
if err := xml.NewDecoder(r.Body).Decode(&env); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", `text/xml; charset="utf-8"`)
|
||||
w.Header().Set("Ext", "")
|
||||
w.Header().Set("server", serverField)
|
||||
soapRespXML, code := func() ([]byte, int) {
|
||||
respArgs, err := s.soapActionResponse(soapAction, env.Body.Action, r)
|
||||
if err != nil {
|
||||
upnpErr := upnp.ConvertError(err)
|
||||
return mustMarshalXML(soap.NewFault("UPnPError", upnpErr)), 500
|
||||
}
|
||||
return marshalSOAPResponse(soapAction, respArgs), 200
|
||||
}()
|
||||
bodyStr := fmt.Sprintf(`<?xml version="1.0" encoding="utf-8" standalone="yes"?><s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"><s:Body>%s</s:Body></s:Envelope>`, soapRespXML)
|
||||
w.WriteHeader(code)
|
||||
if _, err := w.Write([]byte(bodyStr)); err != nil {
|
||||
log.Print(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Handle a SOAP request and return the response arguments or UPnP error.
|
||||
func (s *server) soapActionResponse(sa upnp.SoapAction, actionRequestXML []byte, r *http.Request) (map[string]string, error) {
|
||||
service, ok := s.services[sa.Type]
|
||||
if !ok {
|
||||
// TODO: What's the invalid service error?
|
||||
return nil, upnp.Errorf(upnp.InvalidActionErrorCode, "Invalid service: %s", sa.Type)
|
||||
}
|
||||
return service.Handle(sa.Action, actionRequestXML, r)
|
||||
}
|
||||
|
||||
// Serve runs the server - returns the error only if
|
||||
// the listener was not started; does not block, so
|
||||
// use s.Wait() to block on the listener indefinitely.
|
||||
func (s *server) Serve() (err error) {
|
||||
if s.HTTPConn == nil {
|
||||
s.HTTPConn, err = net.Listen("tcp", s.httpListenAddr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
s.startSSDP()
|
||||
}()
|
||||
|
||||
go func() {
|
||||
fs.Logf(s.f, "Serving HTTP on %s", s.HTTPConn.Addr().String())
|
||||
|
||||
err = s.serveHTTP()
|
||||
if err != nil {
|
||||
fs.Logf(s.f, "Error on serving HTTP server: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait blocks while the listener is open.
|
||||
func (s *server) Wait() {
|
||||
<-s.waitChan
|
||||
}
|
||||
|
||||
func (s *server) Close() {
|
||||
err := s.HTTPConn.Close()
|
||||
if err != nil {
|
||||
fs.Errorf(s.f, "Error closing HTTP server: %v", err)
|
||||
return
|
||||
}
|
||||
close(s.waitChan)
|
||||
}
|
||||
|
||||
// Run SSDP (multicast for server discovery) on all interfaces.
|
||||
func (s *server) startSSDP() {
|
||||
active := 0
|
||||
stopped := make(chan struct{})
|
||||
for _, intf := range s.Interfaces {
|
||||
active++
|
||||
go func(intf2 net.Interface) {
|
||||
defer func() {
|
||||
stopped <- struct{}{}
|
||||
}()
|
||||
s.ssdpInterface(intf2)
|
||||
}(intf)
|
||||
}
|
||||
for active > 0 {
|
||||
<-stopped
|
||||
active--
|
||||
}
|
||||
}
|
||||
|
||||
// Run SSDP server on an interface.
|
||||
func (s *server) ssdpInterface(intf net.Interface) {
|
||||
// Figure out which HTTP location to advertise based on the interface IP.
|
||||
advertiseLocationFn := func(ip net.IP) string {
|
||||
url := url.URL{
|
||||
Scheme: "http",
|
||||
Host: (&net.TCPAddr{
|
||||
IP: ip,
|
||||
Port: s.HTTPConn.Addr().(*net.TCPAddr).Port,
|
||||
}).String(),
|
||||
Path: rootDescPath,
|
||||
}
|
||||
return url.String()
|
||||
}
|
||||
|
||||
ssdpServer := ssdp.Server{
|
||||
Interface: intf,
|
||||
Devices: devices(),
|
||||
Services: serviceTypes(),
|
||||
Location: advertiseLocationFn,
|
||||
Server: serverField,
|
||||
UUID: s.rootDeviceUUID,
|
||||
NotifyInterval: s.AnnounceInterval,
|
||||
}
|
||||
|
||||
// An interface with these flags should be valid for SSDP.
|
||||
const ssdpInterfaceFlags = net.FlagUp | net.FlagMulticast
|
||||
|
||||
if err := ssdpServer.Init(); err != nil {
|
||||
if intf.Flags&ssdpInterfaceFlags != ssdpInterfaceFlags {
|
||||
// Didn't expect it to work anyway.
|
||||
return
|
||||
}
|
||||
if strings.Contains(err.Error(), "listen") {
|
||||
// OSX has a lot of dud interfaces. Failure to create a socket on
|
||||
// the interface are what we're expecting if the interface is no
|
||||
// good.
|
||||
return
|
||||
}
|
||||
log.Printf("Error creating ssdp server on %s: %s", intf.Name, err)
|
||||
return
|
||||
}
|
||||
defer ssdpServer.Close()
|
||||
log.Println("Started SSDP on", intf.Name)
|
||||
stopped := make(chan struct{})
|
||||
go func() {
|
||||
defer close(stopped)
|
||||
if err := ssdpServer.Serve(); err != nil {
|
||||
log.Printf("%q: %q\n", intf.Name, err)
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-s.waitChan:
|
||||
// Returning will close the server.
|
||||
case <-stopped:
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) serveHTTP() error {
|
||||
srv := &http.Server{
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
s.httpServeMux.ServeHTTP(w, r)
|
||||
}),
|
||||
}
|
||||
err := srv.Serve(s.HTTPConn)
|
||||
select {
|
||||
case <-s.waitChan:
|
||||
return nil
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
88
cmd/serve/dlna/dlna_test.go
Normal file
88
cmd/serve/dlna/dlna_test.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// +build go1.8
|
||||
|
||||
package dlna
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/vfs"
|
||||
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/cmd/serve/dlna/dlnaflags"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
dlnaServer *server
|
||||
)
|
||||
|
||||
const (
|
||||
testBindAddress = "localhost:51777"
|
||||
testURL = "http://" + testBindAddress + "/"
|
||||
)
|
||||
|
||||
func startServer(t *testing.T, f fs.Fs) {
|
||||
opt := dlnaflags.DefaultOpt
|
||||
opt.ListenAddr = testBindAddress
|
||||
dlnaServer = newServer(f, &opt)
|
||||
assert.NoError(t, dlnaServer.Serve())
|
||||
}
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
config.LoadConfig()
|
||||
|
||||
f, err := fs.NewFs("testdata/files")
|
||||
l, _ := f.List("")
|
||||
fmt.Println(l)
|
||||
require.NoError(t, err)
|
||||
|
||||
startServer(t, f)
|
||||
}
|
||||
|
||||
// Make sure that it serves rootDesc.xml (SCPD in uPnP parlance).
|
||||
func TestRootSCPD(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", testURL+"rootDesc.xml", nil)
|
||||
require.NoError(t, err)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
// Make sure that the SCPD contains a CDS service.
|
||||
require.Contains(t, string(body),
|
||||
"<serviceType>urn:schemas-upnp-org:service:ContentDirectory:1</serviceType>")
|
||||
}
|
||||
|
||||
// Make sure that it serves content from the remote.
|
||||
func TestServeContent(t *testing.T) {
|
||||
itemPath := "/small_jpeg.jpg"
|
||||
pathQuery := url.QueryEscape(itemPath)
|
||||
req, err := http.NewRequest("GET", testURL+"res?path="+pathQuery, nil)
|
||||
require.NoError(t, err)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
actualContents, err := ioutil.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Now compare the contents with the golden file.
|
||||
node, err := dlnaServer.vfs.Stat(itemPath)
|
||||
assert.NoError(t, err)
|
||||
goldenFile := node.(*vfs.File)
|
||||
goldenReader, err := goldenFile.Open(os.O_RDONLY)
|
||||
assert.NoError(t, err)
|
||||
defer fs.CheckClose(goldenReader, &err)
|
||||
goldenContents, err := ioutil.ReadAll(goldenReader)
|
||||
assert.NoError(t, err)
|
||||
|
||||
require.Equal(t, goldenContents, actualContents)
|
||||
}
|
||||
52
cmd/serve/dlna/dlna_util.go
Normal file
52
cmd/serve/dlna/dlna_util.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package dlna
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
|
||||
"github.com/anacrolix/dms/soap"
|
||||
"github.com/anacrolix/dms/upnp"
|
||||
)
|
||||
|
||||
func makeDeviceUUID(unique string) string {
|
||||
h := md5.New()
|
||||
if _, err := io.WriteString(h, unique); err != nil {
|
||||
log.Panicf("makeDeviceUUID write failed: %s", err)
|
||||
}
|
||||
buf := h.Sum(nil)
|
||||
return upnp.FormatUUID(buf)
|
||||
}
|
||||
|
||||
func didlLite(chardata string) string {
|
||||
return `<DIDL-Lite` +
|
||||
` xmlns:dc="http://purl.org/dc/elements/1.1/"` +
|
||||
` xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"` +
|
||||
` xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"` +
|
||||
` xmlns:dlna="urn:schemas-dlna-org:metadata-1-0/">` +
|
||||
chardata +
|
||||
`</DIDL-Lite>`
|
||||
}
|
||||
|
||||
func mustMarshalXML(value interface{}) []byte {
|
||||
ret, err := xml.MarshalIndent(value, "", " ")
|
||||
if err != nil {
|
||||
log.Panicf("mustMarshalXML failed to marshal %v: %s", value, err)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Marshal SOAP response arguments into a response XML snippet.
|
||||
func marshalSOAPResponse(sa upnp.SoapAction, args map[string]string) []byte {
|
||||
soapArgs := make([]soap.Arg, 0, len(args))
|
||||
for argName, value := range args {
|
||||
soapArgs = append(soapArgs, soap.Arg{
|
||||
XMLName: xml.Name{Local: argName},
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
return []byte(fmt.Sprintf(`<u:%[1]sResponse xmlns:u="%[2]s">%[3]s</u:%[1]sResponse>`,
|
||||
sa.Action, sa.ServiceURN.String(), mustMarshalXML(soapArgs)))
|
||||
}
|
||||
42
cmd/serve/dlna/dlnaflags/dlnaflags.go
Normal file
42
cmd/serve/dlna/dlnaflags/dlnaflags.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package dlnaflags
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Help contains the text for the command line help and manual.
|
||||
var Help = `
|
||||
### Server options
|
||||
|
||||
Use --addr to specify which IP address and port the server should
|
||||
listen on, eg --addr 1.2.3.4:8000 or --addr :8080 to listen to all
|
||||
IPs.
|
||||
|
||||
`
|
||||
|
||||
// Options is the type for DLNA serving options.
|
||||
type Options struct {
|
||||
ListenAddr string
|
||||
}
|
||||
|
||||
// DefaultOpt contains the defaults options for DLNA serving.
|
||||
var DefaultOpt = Options{
|
||||
ListenAddr: ":7879",
|
||||
}
|
||||
|
||||
// Opt contains the options for DLNA serving.
|
||||
var (
|
||||
Opt = DefaultOpt
|
||||
)
|
||||
|
||||
func addFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *Options) {
|
||||
rc.AddOption("dlna", &Opt)
|
||||
flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "ip:port or :port to bind the DLNA http server to.")
|
||||
}
|
||||
|
||||
// AddFlags add the command line flags for DLNA serving.
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
addFlagsPrefix(flagSet, "", &Opt)
|
||||
}
|
||||
BIN
cmd/serve/dlna/testdata/files/small_jpeg.jpg
vendored
Normal file
BIN
cmd/serve/dlna/testdata/files/small_jpeg.jpg
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 107 B |
@@ -3,6 +3,8 @@ package serve
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/ncw/rclone/cmd/serve/dlna"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/cmd/serve/ftp"
|
||||
"github.com/ncw/rclone/cmd/serve/http"
|
||||
@@ -19,6 +21,9 @@ func init() {
|
||||
if restic.Command != nil {
|
||||
Command.AddCommand(restic.Command)
|
||||
}
|
||||
if dlna.Command != nil {
|
||||
Command.AddCommand(dlna.Command)
|
||||
}
|
||||
if ftp.Command != nil {
|
||||
Command.AddCommand(ftp.Command)
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ Rclone
|
||||
|
||||
Rclone is a command line program to sync files and directories to and from:
|
||||
|
||||
* {{< provider name="Alibaba Cloud (Aliyun) Object Storage System (OSS)" home="https://www.alibabacloud.com/product/oss/" config="/s3/#alibaba-oss" >}}
|
||||
* {{< provider name="Amazon Drive" home="https://www.amazon.com/clouddrive" config="/amazonclouddrive/" >}} ([See note](/amazonclouddrive/#status))
|
||||
* {{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}}
|
||||
* {{< provider name="Backblaze B2" home="https://www.backblaze.com/b2/cloud-storage.html" config="/b2/" >}}
|
||||
@@ -43,6 +44,7 @@ Rclone is a command line program to sync files and directories to and from:
|
||||
* {{< provider name="put.io" home="https://put.io/" config="/webdav/#put-io" >}}
|
||||
* {{< provider name="QingStor" home="https://www.qingcloud.com/products/storage" config="/qingstor/" >}}
|
||||
* {{< provider name="Rackspace Cloud Files" home="https://www.rackspace.com/cloud/files" config="/swift/" >}}
|
||||
* {{< provider name="Scaleway" home="https://www.scaleway.com/object-storage/" config="/s3/#scaleway" >}}
|
||||
* {{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SFTP" config="/sftp/" >}}
|
||||
* {{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
|
||||
* {{< provider name="WebDAV" home="https://en.wikipedia.org/wiki/WebDAV" config="/webdav/" >}}
|
||||
|
||||
@@ -225,3 +225,11 @@ Contributors
|
||||
* Arkadius Stefanski <arkste@gmail.com>
|
||||
* Jay <dev@jaygoel.com>
|
||||
* andrea rota <a@xelera.eu>
|
||||
* nicolov <nicolov@users.noreply.github.com>
|
||||
* Dario Guzik <dario@guzik.com.ar>
|
||||
* qip <qip@users.noreply.github.com>
|
||||
* yair@unicorn <yair@unicorn>
|
||||
* Matt Robinson <brimstone@the.narro.ws>
|
||||
* kayrus <kay.diam@gmail.com>
|
||||
* Rémy Léone <remy.leone@gmail.com>
|
||||
* Wojciech Smigielski <wojciech.hieronim.smigielski@gmail.com>
|
||||
|
||||
@@ -437,8 +437,8 @@ Normally the config file is in your home directory as a file called
|
||||
older version). If `$XDG_CONFIG_HOME` is set it will be at
|
||||
`$XDG_CONFIG_HOME/rclone/rclone.conf`
|
||||
|
||||
If you run `rclone -h` and look at the help for the `--config` option
|
||||
you will see where the default location is for you.
|
||||
If you run `rclone config file` you will see where the default
|
||||
location is for you.
|
||||
|
||||
Use this flag to override the config location, eg `rclone
|
||||
--config=".myconfig" .config`.
|
||||
|
||||
@@ -15,8 +15,8 @@ work on all the remote storage systems.
|
||||
### Can I copy the config from one machine to another ###
|
||||
|
||||
Sure! Rclone stores all of its config in a single file. If you want
|
||||
to find this file, the simplest way is to run `rclone -h` and look at
|
||||
the help for the `--config` flag which will tell you where it is.
|
||||
to find this file, run `rclone config file` which will tell you where
|
||||
it is.
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for more info.
|
||||
|
||||
@@ -97,8 +97,6 @@ In general the variables are called `http_proxy` (for services reached
|
||||
over `http`) and `https_proxy` (for services reached over `https`). Most
|
||||
public services will be using `https`, but you may wish to set both.
|
||||
|
||||
If you ever use `FTP` then you would need to set `ftp_proxy`.
|
||||
|
||||
The content of the variable is `protocol://server:port`. The protocol
|
||||
value is the one used to talk to the proxy server, itself, and is commonly
|
||||
either `http` or `socks5`.
|
||||
@@ -122,6 +120,8 @@ e.g.
|
||||
export no_proxy=localhost,127.0.0.0/8,my.host.name
|
||||
export NO_PROXY=$no_proxy
|
||||
|
||||
Note that the ftp backend does not support `ftp_proxy` yet.
|
||||
|
||||
### Rclone gives x509: failed to load system roots and no roots provided error ###
|
||||
|
||||
This means that `rclone` can't file the SSL root certificates. Likely
|
||||
|
||||
@@ -175,3 +175,6 @@ Note that `--timeout` isn't supported (but `--contimeout` is).
|
||||
Note that `--bind` isn't supported.
|
||||
|
||||
FTP could support server side move but doesn't yet.
|
||||
|
||||
Note that the ftp backend does not support the `ftp_proxy` environment
|
||||
variable yet.
|
||||
|
||||
@@ -81,7 +81,8 @@ Normally rclone will ignore symlinks or junction points (which behave
|
||||
like symlinks under Windows).
|
||||
|
||||
If you supply `--copy-links` or `-L` then rclone will follow the
|
||||
symlink and copy the pointed to file or directory.
|
||||
symlink and copy the pointed to file or directory. Note that this
|
||||
flag is incompatible with `-links` / `-l`.
|
||||
|
||||
This flag applies to all commands.
|
||||
|
||||
@@ -116,6 +117,75 @@ $ rclone -L ls /tmp/a
|
||||
6 b/one
|
||||
```
|
||||
|
||||
#### --links, -l
|
||||
|
||||
Normally rclone will ignore symlinks or junction points (which behave
|
||||
like symlinks under Windows).
|
||||
|
||||
If you supply this flag then rclone will copy symbolic links from the local storage,
|
||||
and store them as text files, with a '.rclonelink' suffix in the remote storage.
|
||||
|
||||
The text file will contain the target of the symbolic link (see example).
|
||||
|
||||
This flag applies to all commands.
|
||||
|
||||
For example, supposing you have a directory structure like this
|
||||
|
||||
```
|
||||
$ tree /tmp/a
|
||||
/tmp/a
|
||||
├── file1 -> ./file4
|
||||
└── file2 -> /home/user/file3
|
||||
```
|
||||
|
||||
Copying the entire directory with '-l'
|
||||
|
||||
```
|
||||
$ rclone copyto -l /tmp/a/file1 remote:/tmp/a/
|
||||
```
|
||||
|
||||
The remote files are created with a '.rclonelink' suffix
|
||||
|
||||
```
|
||||
$ rclone ls remote:/tmp/a
|
||||
5 file1.rclonelink
|
||||
14 file2.rclonelink
|
||||
```
|
||||
|
||||
The remote files will contain the target of the symbolic links
|
||||
|
||||
```
|
||||
$ rclone cat remote:/tmp/a/file1.rclonelink
|
||||
./file4
|
||||
|
||||
$ rclone cat remote:/tmp/a/file2.rclonelink
|
||||
/home/user/file3
|
||||
```
|
||||
|
||||
Copying them back with '-l'
|
||||
|
||||
```
|
||||
$ rclone copyto -l remote:/tmp/a/ /tmp/b/
|
||||
|
||||
$ tree /tmp/b
|
||||
/tmp/b
|
||||
├── file1 -> ./file4
|
||||
└── file2 -> /home/user/file3
|
||||
```
|
||||
|
||||
However, if copied back without '-l'
|
||||
|
||||
```
|
||||
$ rclone copyto remote:/tmp/a/ /tmp/b/
|
||||
|
||||
$ tree /tmp/b
|
||||
/tmp/b
|
||||
├── file1.rclonelink
|
||||
└── file2.rclonelink
|
||||
````
|
||||
|
||||
Note that this flag is incompatible with `-copy-links` / `-L`.
|
||||
|
||||
### Restricting filesystems with --one-file-system
|
||||
|
||||
Normally rclone will recurse through filesystems as mounted.
|
||||
|
||||
@@ -36,7 +36,7 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| pCloud | MD5, SHA1 | Yes | No | No | W |
|
||||
| QingStor | MD5 | No | No | No | R/W |
|
||||
| SFTP | MD5, SHA1 ‡ | Yes | Depends | No | - |
|
||||
| WebDAV | - | Yes †† | Depends | No | - |
|
||||
| WebDAV | MD5, SHA1 ††| Yes ††† | Depends | No | - |
|
||||
| Yandex Disk | MD5 | Yes | No | No | R/W |
|
||||
| The local filesystem | All | Yes | Depends | No | - |
|
||||
|
||||
@@ -57,7 +57,9 @@ This is an SHA256 sum of all the 4MB block SHA256s.
|
||||
‡ SFTP supports checksums if the same login has shell access and `md5sum`
|
||||
or `sha1sum` as well as `echo` are in the remote's PATH.
|
||||
|
||||
†† WebDAV supports modtimes when used with Owncloud and Nextcloud only.
|
||||
†† WebDAV supports hashes when used with Owncloud and Nextcloud only.
|
||||
|
||||
††† WebDAV supports modtimes when used with Owncloud and Nextcloud only.
|
||||
|
||||
‡‡ Microsoft OneDrive Personal supports SHA1 hashes, whereas OneDrive
|
||||
for business and SharePoint server support Microsoft's own
|
||||
@@ -147,7 +149,7 @@ operations more efficient.
|
||||
| pCloud | Yes | Yes | Yes | Yes | Yes | No | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes |
|
||||
| QingStor | No | Yes | No | No | No | Yes | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | No |
|
||||
| SFTP | No | No | Yes | Yes | No | No | Yes | No [#2178](https://github.com/ncw/rclone/issues/2178) | No |
|
||||
| WebDAV | Yes | Yes | Yes | Yes | No | No | Yes ‡ | No [#2178](https://github.com/ncw/rclone/issues/2178) | No |
|
||||
| WebDAV | Yes | Yes | Yes | Yes | No | No | Yes ‡ | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes |
|
||||
| Yandex Disk | Yes | Yes | Yes | Yes | Yes | No | Yes | Yes | Yes |
|
||||
| The local filesystem | Yes | No | Yes | Yes | No | No | Yes | No | Yes |
|
||||
|
||||
@@ -218,5 +220,7 @@ on the particular cloud provider.
|
||||
This is used to fetch quota information from the remote, like bytes
|
||||
used/free/quota and bytes used in the trash.
|
||||
|
||||
This is also used to return the space used, available for `rclone mount`.
|
||||
|
||||
If the server can't do `About` then `rclone about` will return an
|
||||
error.
|
||||
|
||||
@@ -234,4 +234,50 @@ Number of connection retries.
|
||||
- Type: int
|
||||
- Default: 3
|
||||
|
||||
#### --qingstor-upload-cutoff
|
||||
|
||||
Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5GB.
|
||||
|
||||
- Config: upload_cutoff
|
||||
- Env Var: RCLONE_QINGSTOR_UPLOAD_CUTOFF
|
||||
- Type: SizeSuffix
|
||||
- Default: 200M
|
||||
|
||||
#### --qingstor-chunk-size
|
||||
|
||||
Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
|
||||
Note that "--qingstor-upload-concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
|
||||
If you are transferring large files over high speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_QINGSTOR_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
- Default: 4M
|
||||
|
||||
#### --qingstor-upload-concurrency
|
||||
|
||||
Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
If you are uploading small numbers of large file over high speed link
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.
|
||||
|
||||
- Config: upload_concurrency
|
||||
- Env Var: RCLONE_QINGSTOR_UPLOAD_CONCURRENCY
|
||||
- Type: int
|
||||
- Default: 4
|
||||
|
||||
<!--- autogenerated options stop -->
|
||||
|
||||
@@ -74,15 +74,14 @@ So first configure rclone on your desktop machine
|
||||
|
||||
to set up the config file.
|
||||
|
||||
Find the config file by running `rclone -h` and looking for the help for the `--config` option
|
||||
Find the config file by running `rclone config file`, for example
|
||||
|
||||
```
|
||||
$ rclone -h
|
||||
[snip]
|
||||
--config="/home/user/.rclone.conf": Config file.
|
||||
[snip]
|
||||
$ rclone config file
|
||||
Configuration file is stored at:
|
||||
/home/user/.rclone.conf
|
||||
```
|
||||
|
||||
Now transfer it to the remote box (scp, cut paste, ftp, sftp etc) and
|
||||
place it in the correct place (use `rclone -h` on the remote box to
|
||||
find out where).
|
||||
place it in the correct place (use `rclone config file` on the remote
|
||||
box to find out where).
|
||||
|
||||
@@ -10,6 +10,7 @@ date: "2016-07-11"
|
||||
The S3 backend can be used with a number of different providers:
|
||||
|
||||
* {{< provider name="AWS S3" home="https://aws.amazon.com/s3/" config="/s3/#amazon-s3" >}}
|
||||
* {{< provider name="Alibaba Cloud (Aliyun) Object Storage System (OSS)" home="https://www.alibabacloud.com/product/oss/" config="/s3/#alibaba-oss" >}}
|
||||
* {{< provider name="Ceph" home="http://ceph.com/" config="/s3/#ceph" >}}
|
||||
* {{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
|
||||
* {{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
|
||||
@@ -400,7 +401,7 @@ the object(s) in question before using rclone.
|
||||
<!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/s3/s3.go then run make backenddocs -->
|
||||
### Standard Options
|
||||
|
||||
Here are the standard options specific to s3 (Amazon S3 Compliant Storage Providers (AWS, Ceph, Dreamhost, IBM COS, Minio)).
|
||||
Here are the standard options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)).
|
||||
|
||||
#### --s3-provider
|
||||
|
||||
@@ -413,6 +414,8 @@ Choose your S3 provider.
|
||||
- Examples:
|
||||
- "AWS"
|
||||
- Amazon Web Services (AWS) S3
|
||||
- "Alibaba"
|
||||
- Alibaba Cloud Object Storage System (OSS) formerly Aliyun
|
||||
- "Ceph"
|
||||
- Ceph Object Storage
|
||||
- "DigitalOcean"
|
||||
@@ -423,6 +426,8 @@ Choose your S3 provider.
|
||||
- IBM COS S3
|
||||
- "Minio"
|
||||
- Minio Object Storage
|
||||
- "Netease"
|
||||
- Netease Object Storage (NOS)
|
||||
- "Wasabi"
|
||||
- Wasabi Object Storage
|
||||
- "Other"
|
||||
@@ -622,6 +627,54 @@ Specify if using an IBM COS On Premise.
|
||||
|
||||
#### --s3-endpoint
|
||||
|
||||
Endpoint for OSS API.
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_S3_ENDPOINT
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Examples:
|
||||
- "oss-cn-hangzhou.aliyuncs.com"
|
||||
- East China 1 (Hangzhou)
|
||||
- "oss-cn-shanghai.aliyuncs.com"
|
||||
- East China 2 (Shanghai)
|
||||
- "oss-cn-qingdao.aliyuncs.com"
|
||||
- North China 1 (Qingdao)
|
||||
- "oss-cn-beijing.aliyuncs.com"
|
||||
- North China 2 (Beijing)
|
||||
- "oss-cn-zhangjiakou.aliyuncs.com"
|
||||
- North China 3 (Zhangjiakou)
|
||||
- "oss-cn-huhehaote.aliyuncs.com"
|
||||
- North China 5 (Huhehaote)
|
||||
- "oss-cn-shenzhen.aliyuncs.com"
|
||||
- South China 1 (Shenzhen)
|
||||
- "oss-cn-hongkong.aliyuncs.com"
|
||||
- Hong Kong (Hong Kong)
|
||||
- "oss-us-west-1.aliyuncs.com"
|
||||
- US West 1 (Silicon Valley)
|
||||
- "oss-us-east-1.aliyuncs.com"
|
||||
- US East 1 (Virginia)
|
||||
- "oss-ap-southeast-1.aliyuncs.com"
|
||||
- Southeast Asia Southeast 1 (Singapore)
|
||||
- "oss-ap-southeast-2.aliyuncs.com"
|
||||
- Asia Pacific Southeast 2 (Sydney)
|
||||
- "oss-ap-southeast-3.aliyuncs.com"
|
||||
- Southeast Asia Southeast 3 (Kuala Lumpur)
|
||||
- "oss-ap-southeast-5.aliyuncs.com"
|
||||
- Asia Pacific Southeast 5 (Jakarta)
|
||||
- "oss-ap-northeast-1.aliyuncs.com"
|
||||
- Asia Pacific Northeast 1 (Japan)
|
||||
- "oss-ap-south-1.aliyuncs.com"
|
||||
- Asia Pacific South 1 (Mumbai)
|
||||
- "oss-eu-central-1.aliyuncs.com"
|
||||
- Central Europe 1 (Frankfurt)
|
||||
- "oss-eu-west-1.aliyuncs.com"
|
||||
- West Europe (London)
|
||||
- "oss-me-east-1.aliyuncs.com"
|
||||
- Middle East 1 (Dubai)
|
||||
|
||||
#### --s3-endpoint
|
||||
|
||||
Endpoint for S3 API.
|
||||
Required when using an S3 clone.
|
||||
|
||||
@@ -855,11 +908,27 @@ The storage class to use when storing new objects in S3.
|
||||
- "ONEZONE_IA"
|
||||
- One Zone Infrequent Access storage class
|
||||
- "GLACIER"
|
||||
- GLACIER storage class
|
||||
- Glacier storage class
|
||||
|
||||
#### --s3-storage-class
|
||||
|
||||
The storage class to use when storing new objects in OSS.
|
||||
|
||||
- Config: storage_class
|
||||
- Env Var: RCLONE_S3_STORAGE_CLASS
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Examples:
|
||||
- "Standard"
|
||||
- Standard storage class
|
||||
- "Archive"
|
||||
- Archive storage mode.
|
||||
- "IA"
|
||||
- Infrequent access storage mode.
|
||||
|
||||
### Advanced Options
|
||||
|
||||
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Providers (AWS, Ceph, Dreamhost, IBM COS, Minio)).
|
||||
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)).
|
||||
|
||||
#### --s3-upload-cutoff
|
||||
|
||||
@@ -1344,6 +1413,28 @@ So once set up, for example to copy files into a bucket
|
||||
rclone copy /path/to/files minio:bucket
|
||||
```
|
||||
|
||||
### Scaleway {#scaleway}
|
||||
|
||||
[Scaleway](https://www.scaleway.com/object-storage/) The Object Storage platform allows you to store anything from backups, logs and web assets to documents and photos.
|
||||
Files can be dropped from the Scaleway console or transferred through our API and CLI or using any S3-compatible tool.
|
||||
|
||||
Scaleway provides an S3 interface which can be configured for use with rclone like this:
|
||||
|
||||
```
|
||||
[scaleway]
|
||||
type = s3
|
||||
env_auth = false
|
||||
endpoint = s3.nl-ams.scw.cloud
|
||||
access_key_id = SCWXXXXXXXXXXXXXX
|
||||
secret_access_key = 1111111-2222-3333-44444-55555555555555
|
||||
region = nl-ams
|
||||
location_constraint =
|
||||
acl = private
|
||||
force_path_style = false
|
||||
server_side_encryption =
|
||||
storage_class =
|
||||
```
|
||||
|
||||
### Wasabi ###
|
||||
|
||||
[Wasabi](https://wasabi.com) is a cloud-based object storage service for a
|
||||
@@ -1458,30 +1549,41 @@ server_side_encryption =
|
||||
storage_class =
|
||||
```
|
||||
|
||||
### Aliyun OSS / Netease NOS ###
|
||||
### Alibaba OSS {#alibaba-oss}
|
||||
|
||||
This describes how to set up Aliyun OSS - Netease NOS is the same
|
||||
except for different endpoints.
|
||||
Here is an example of making an [Alibaba Cloud (Aliyun) OSS](https://www.alibabacloud.com/product/oss/)
|
||||
configuration. First run:
|
||||
|
||||
Note this is a pretty standard S3 setup, except for the setting of
|
||||
`force_path_style = false` in the advanced config.
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process.
|
||||
|
||||
```
|
||||
# rclone config
|
||||
e/n/d/r/c/s/q> n
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
name> oss
|
||||
Type of storage to configure.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
3 / Amazon S3 Compliant Storage Providers (AWS, Ceph, Dreamhost, IBM COS, Minio)
|
||||
[snip]
|
||||
4 / Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)
|
||||
\ "s3"
|
||||
[snip]
|
||||
Storage> s3
|
||||
Choose your S3 provider.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
8 / Any other S3 compatible provider
|
||||
\ "Other"
|
||||
provider> other
|
||||
1 / Amazon Web Services (AWS) S3
|
||||
\ "AWS"
|
||||
2 / Alibaba Cloud Object Storage System (OSS) formerly Aliyun
|
||||
\ "Alibaba"
|
||||
3 / Ceph Object Storage
|
||||
\ "Ceph"
|
||||
[snip]
|
||||
provider> Alibaba
|
||||
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
Only applies if access_key_id and secret_access_key is blank.
|
||||
Enter a boolean value (true or false). Press Enter for the default ("false").
|
||||
@@ -1494,70 +1596,71 @@ env_auth> 1
|
||||
AWS Access Key ID.
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
access_key_id> xxxxxxxxxxxx
|
||||
access_key_id> accesskeyid
|
||||
AWS Secret Access Key (password)
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
secret_access_key> xxxxxxxxxxxxxxxxx
|
||||
Region to connect to.
|
||||
Leave blank if you are using an S3 clone and you don't have a region.
|
||||
secret_access_key> secretaccesskey
|
||||
Endpoint for OSS API.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Use this if unsure. Will use v4 signatures and an empty region.
|
||||
\ ""
|
||||
2 / Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.
|
||||
\ "other-v2-signature"
|
||||
region> 1
|
||||
Endpoint for S3 API.
|
||||
Required when using an S3 clone.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
endpoint> oss-cn-shenzhen.aliyuncs.com
|
||||
Location constraint - must be set to match the Region.
|
||||
Leave blank if not sure. Used when creating buckets only.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
location_constraint>
|
||||
Canned ACL used when creating buckets and/or storing objects in S3.
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
1 / East China 1 (Hangzhou)
|
||||
\ "oss-cn-hangzhou.aliyuncs.com"
|
||||
2 / East China 2 (Shanghai)
|
||||
\ "oss-cn-shanghai.aliyuncs.com"
|
||||
3 / North China 1 (Qingdao)
|
||||
\ "oss-cn-qingdao.aliyuncs.com"
|
||||
[snip]
|
||||
endpoint> 1
|
||||
Canned ACL used when creating buckets and storing or copying objects.
|
||||
|
||||
Note that this ACL is applied when server side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Owner gets FULL_CONTROL. No one else has access rights (default).
|
||||
\ "private"
|
||||
2 / Owner gets FULL_CONTROL. The AllUsers group gets READ access.
|
||||
\ "public-read"
|
||||
/ Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.
|
||||
[snip]
|
||||
acl> 1
|
||||
The storage class to use when storing new objects in OSS.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Default
|
||||
\ ""
|
||||
2 / Standard storage class
|
||||
\ "STANDARD"
|
||||
3 / Archive storage mode.
|
||||
\ "GLACIER"
|
||||
4 / Infrequent access storage mode.
|
||||
\ "STANDARD_IA"
|
||||
storage_class> 1
|
||||
Edit advanced config? (y/n)
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
Chunk size to use for uploading
|
||||
Enter a size with suffix k,M,G,T. Press Enter for the default ("5M").
|
||||
chunk_size>
|
||||
Don't store MD5 checksum with object metadata
|
||||
Enter a boolean value (true or false). Press Enter for the default ("false").
|
||||
disable_checksum>
|
||||
An AWS session token
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
session_token>
|
||||
Concurrency for multipart uploads.
|
||||
Enter a signed integer. Press Enter for the default ("2").
|
||||
upload_concurrency>
|
||||
If true use path style access if false use virtual hosted style.
|
||||
Some providers (eg Aliyun OSS or Netease COS) require this.
|
||||
Enter a boolean value (true or false). Press Enter for the default ("true").
|
||||
force_path_style> false
|
||||
y/n> n
|
||||
Remote config
|
||||
--------------------
|
||||
[oss]
|
||||
type = s3
|
||||
provider = Other
|
||||
provider = Alibaba
|
||||
env_auth = false
|
||||
access_key_id = xxxxxxxxx
|
||||
secret_access_key = xxxxxxxxxxxxx
|
||||
endpoint = oss-cn-shenzhen.aliyuncs.com
|
||||
access_key_id = accesskeyid
|
||||
secret_access_key = secretaccesskey
|
||||
endpoint = oss-cn-hangzhou.aliyuncs.com
|
||||
acl = private
|
||||
force_path_style = false
|
||||
storage_class = Standard
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
### Netease NOS ###
|
||||
|
||||
For Netease NOS configure as per the configurator `rclone config`
|
||||
setting the provider `Netease`. This will automatically set
|
||||
`force_path_style = false` which is necessary for it to run properly.
|
||||
|
||||
@@ -124,11 +124,15 @@ The SFTP remote supports three authentication methods:
|
||||
* Key file
|
||||
* ssh-agent
|
||||
|
||||
Key files should be unencrypted PEM-encoded private key files. For
|
||||
instance `/home/$USER/.ssh/id_rsa`.
|
||||
Key files should be PEM-encoded private key files. For instance `/home/$USER/.ssh/id_rsa`.
|
||||
Only unencrypted OpenSSH or PEM encrypted files are supported.
|
||||
|
||||
If you don't specify `pass` or `key_file` then rclone will attempt to
|
||||
contact an ssh-agent.
|
||||
If you don't specify `pass` or `key_file` then rclone will attempt to contact an ssh-agent.
|
||||
|
||||
You can also specify `key_use_agent` to force the usage of an ssh-agent. In this case
|
||||
`key_file` can also be specified to force the usage of a specific key in the ssh-agent.
|
||||
|
||||
Using an ssh-agent is the only way to load encrypted OpenSSH keys at the moment.
|
||||
|
||||
If you set the `--sftp-ask-password` option, rclone will prompt for a
|
||||
password when needed and no password has been configured.
|
||||
@@ -204,13 +208,38 @@ SSH password, leave blank to use ssh-agent.
|
||||
|
||||
#### --sftp-key-file
|
||||
|
||||
Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
|
||||
- Config: key_file
|
||||
- Env Var: RCLONE_SFTP_KEY_FILE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --sftp-key-file-pass
|
||||
|
||||
The passphrase to decrypt the PEM-encoded private key file.
|
||||
|
||||
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
||||
in the new OpenSSH format can't be used.
|
||||
|
||||
- Config: key_file_pass
|
||||
- Env Var: RCLONE_SFTP_KEY_FILE_PASS
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --sftp-key-use-agent
|
||||
|
||||
When set forces the usage of the ssh-agent.
|
||||
|
||||
When key-file is also set, the ".pub" file of the specified key-file is read and only the associated key is
|
||||
requested from the ssh-agent. This allows to avoid `Too many authentication failures for *username*` errors
|
||||
when the ssh-agent contains many keys.
|
||||
|
||||
- Config: key_use_agent
|
||||
- Env Var: RCLONE_SFTP_KEY_USE_AGENT
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --sftp-use-insecure-cipher
|
||||
|
||||
Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
|
||||
@@ -329,6 +329,33 @@ User ID to log in - optional - most swift systems use user and leave this blank
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --swift-application-credential-id
|
||||
|
||||
Application Credential ID to log in - optional (v3 auth) (OS_APPLICATION_CREDENTIAL_ID).
|
||||
|
||||
- Config: application_credential_id
|
||||
- Env Var: RCLONE_SWIFT_APPLICATION_CREDENTIAL_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --swift-application-credential-name
|
||||
|
||||
Application Credential name to log in - optional (v3 auth) (OS_APPLICATION_CREDENTIAL_NAME).
|
||||
|
||||
- Config: application_credential_name
|
||||
- Env Var: RCLONE_SWIFT_APPLICATION_CREDENTIAL_NAME
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --swift-application-credential-secret
|
||||
|
||||
Application Credential secret to log in - optional (v3 auth) (OS_APPLICATION_CREDENTIAL_SECRET).
|
||||
|
||||
- Config: application_credential_secret
|
||||
- Env Var: RCLONE_SWIFT_APPLICATION_CREDENTIAL_SECRET
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --swift-domain
|
||||
|
||||
User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
|
||||
@@ -99,7 +99,11 @@ To copy a local directory to an WebDAV directory called backup
|
||||
Plain WebDAV does not support modified times. However when used with
|
||||
Owncloud or Nextcloud rclone will support modified times.
|
||||
|
||||
Hashes are not supported.
|
||||
Likewise plain WebDAV does not support hashes, however when used with
|
||||
Owncloud or Nexcloud rclone will support SHA1 and MD5 hashes.
|
||||
Depending on the exact version of Owncloud or Nextcloud hashes may
|
||||
appear on all objects, or only on objects which had a hash uploaded
|
||||
with them.
|
||||
|
||||
<!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/webdav/webdav.go then run make backenddocs -->
|
||||
### Standard Options
|
||||
@@ -253,7 +257,7 @@ pass = encryptedpassword
|
||||
|
||||
### dCache ###
|
||||
|
||||
dCache is a storage system with WebDAV doors that support, beside basic and x509,
|
||||
[dCache](https://www.dcache.org/) is a storage system with WebDAV doors that support, beside basic and x509,
|
||||
authentication with [Macaroons](https://www.dcache.org/manuals/workshop-2017-05-29-Umea/000-Final/anupam_macaroons_v02.pdf) (bearer tokens).
|
||||
|
||||
Configure as normal using the `other` type. Don't enter a username or
|
||||
@@ -271,5 +275,5 @@ pass =
|
||||
bearer_token = your-macaroon
|
||||
```
|
||||
|
||||
There is a [script](https://github.com/onnozweers/dcache-scripts/blob/master/get-share-link) that
|
||||
There is a [script](https://github.com/sara-nl/GridScripts/blob/master/get-macaroon) that
|
||||
obtains a Macaroon from a dCache WebDAV endpoint, and creates an rclone config file.
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/asyncreader"
|
||||
@@ -243,6 +244,24 @@ func (acc *Account) eta() (etaDuration time.Duration, ok bool) {
|
||||
return eta(acc.bytes, acc.size, acc.avg)
|
||||
}
|
||||
|
||||
// shortenName shortens in to size runes long
|
||||
// If size <= 0 then in is left untouched
|
||||
func shortenName(in string, size int) string {
|
||||
if size <= 0 {
|
||||
return in
|
||||
}
|
||||
if utf8.RuneCountInString(in) <= size {
|
||||
return in
|
||||
}
|
||||
name := []rune(in)
|
||||
size-- // don't count elipsis rune
|
||||
suffixLength := size / 2
|
||||
prefixLength := size - suffixLength
|
||||
suffixStart := len(name) - suffixLength
|
||||
name = append(append(name[:prefixLength], '…'), name[suffixStart:]...)
|
||||
return string(name)
|
||||
}
|
||||
|
||||
// String produces stats for this file
|
||||
func (acc *Account) String() string {
|
||||
a, b := acc.progress()
|
||||
@@ -257,16 +276,6 @@ func (acc *Account) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
name := []rune(acc.name)
|
||||
if fs.Config.StatsFileNameLength > 0 {
|
||||
if len(name) > fs.Config.StatsFileNameLength {
|
||||
suffixLength := fs.Config.StatsFileNameLength / 2
|
||||
prefixLength := fs.Config.StatsFileNameLength - suffixLength
|
||||
suffixStart := len(name) - suffixLength
|
||||
name = append(append(name[:prefixLength], '…'), name[suffixStart:]...)
|
||||
}
|
||||
}
|
||||
|
||||
if fs.Config.DataRateUnit == "bits" {
|
||||
cur = cur * 8
|
||||
}
|
||||
@@ -276,11 +285,11 @@ func (acc *Account) String() string {
|
||||
percentageDone = int(100 * float64(a) / float64(b))
|
||||
}
|
||||
|
||||
done := fmt.Sprintf("%2d%% /%s", percentageDone, fs.SizeSuffix(b))
|
||||
|
||||
return fmt.Sprintf("%45s: %s, %s/s, %s",
|
||||
string(name),
|
||||
done,
|
||||
return fmt.Sprintf("%*s:%3d%% /%s, %s/s, %s",
|
||||
fs.Config.StatsFileNameLength,
|
||||
shortenName(acc.name, fs.Config.StatsFileNameLength),
|
||||
percentageDone,
|
||||
fs.SizeSuffix(b),
|
||||
fs.SizeSuffix(cur),
|
||||
etas,
|
||||
)
|
||||
|
||||
@@ -2,10 +2,12 @@ package accounting
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/asyncreader"
|
||||
@@ -208,3 +210,46 @@ func TestAccountMaxTransfer(t *testing.T) {
|
||||
assert.Equal(t, ErrorMaxTransferLimitReached, err)
|
||||
assert.True(t, fserrors.IsFatalError(err))
|
||||
}
|
||||
|
||||
func TestShortenName(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
size int
|
||||
want string
|
||||
}{
|
||||
{"", 0, ""},
|
||||
{"abcde", 10, "abcde"},
|
||||
{"abcde", 0, "abcde"},
|
||||
{"abcde", -1, "abcde"},
|
||||
{"abcde", 5, "abcde"},
|
||||
{"abcde", 4, "ab…e"},
|
||||
{"abcde", 3, "a…e"},
|
||||
{"abcde", 2, "a…"},
|
||||
{"abcde", 1, "…"},
|
||||
{"abcdef", 6, "abcdef"},
|
||||
{"abcdef", 5, "ab…ef"},
|
||||
{"abcdef", 4, "ab…f"},
|
||||
{"abcdef", 3, "a…f"},
|
||||
{"abcdef", 2, "a…"},
|
||||
{"áßcdèf", 1, "…"},
|
||||
{"áßcdè", 5, "áßcdè"},
|
||||
{"áßcdè", 4, "áß…è"},
|
||||
{"áßcdè", 3, "á…è"},
|
||||
{"áßcdè", 2, "á…"},
|
||||
{"áßcdè", 1, "…"},
|
||||
{"áßcdèł", 6, "áßcdèł"},
|
||||
{"áßcdèł", 5, "áß…èł"},
|
||||
{"áßcdèł", 4, "áß…ł"},
|
||||
{"áßcdèł", 3, "á…ł"},
|
||||
{"áßcdèł", 2, "á…"},
|
||||
{"áßcdèł", 1, "…"},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("in=%q, size=%d", test.in, test.size), func(t *testing.T) {
|
||||
got := shortenName(test.in, test.size)
|
||||
assert.Equal(t, test.want, got)
|
||||
if test.size > 0 {
|
||||
assert.True(t, utf8.RuneCountInString(got) <= test.size, "too big")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,8 +92,8 @@ type StatsInfo struct {
|
||||
// NewStats cretates an initialised StatsInfo
|
||||
func NewStats() *StatsInfo {
|
||||
return &StatsInfo{
|
||||
checking: newStringSet(fs.Config.Checkers),
|
||||
transferring: newStringSet(fs.Config.Transfers),
|
||||
checking: newStringSet(fs.Config.Checkers, "checking"),
|
||||
transferring: newStringSet(fs.Config.Transfers, "transferring"),
|
||||
start: time.Now(),
|
||||
inProgress: newInProgress(),
|
||||
}
|
||||
@@ -320,6 +320,13 @@ func (s *StatsInfo) GetLastError() error {
|
||||
return s.lastError
|
||||
}
|
||||
|
||||
// GetChecks returns the number of checks
|
||||
func (s *StatsInfo) GetChecks() int64 {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.checks
|
||||
}
|
||||
|
||||
// FatalError sets the fatalError flag
|
||||
func (s *StatsInfo) FatalError() {
|
||||
s.mu.Lock()
|
||||
|
||||
@@ -1,21 +1,26 @@
|
||||
package accounting
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// stringSet holds a set of strings
|
||||
type stringSet struct {
|
||||
mu sync.RWMutex
|
||||
items map[string]struct{}
|
||||
name string
|
||||
}
|
||||
|
||||
// newStringSet creates a new empty string set of capacity size
|
||||
func newStringSet(size int) *stringSet {
|
||||
func newStringSet(size int, name string) *stringSet {
|
||||
return &stringSet{
|
||||
items: make(map[string]struct{}, size),
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,7 +62,11 @@ func (ss *stringSet) Strings() []string {
|
||||
if acc := Stats.inProgress.get(name); acc != nil {
|
||||
out = acc.String()
|
||||
} else {
|
||||
out = name
|
||||
out = fmt.Sprintf("%*s: %s",
|
||||
fs.Config.StatsFileNameLength,
|
||||
shortenName(name, fs.Config.StatsFileNameLength),
|
||||
ss.name,
|
||||
)
|
||||
}
|
||||
strings = append(strings, " * "+out)
|
||||
}
|
||||
|
||||
@@ -85,6 +85,7 @@ type ConfigInfo struct {
|
||||
MaxBacklog int
|
||||
StatsOneLine bool
|
||||
Progress bool
|
||||
Cookie bool
|
||||
}
|
||||
|
||||
// NewConfig creates a new config with everything set to the default
|
||||
@@ -109,7 +110,7 @@ func NewConfig() *ConfigInfo {
|
||||
c.BufferSize = SizeSuffix(16 << 20)
|
||||
c.UserAgent = "rclone/" + Version
|
||||
c.StreamingUploadCutoff = SizeSuffix(100 * 1024)
|
||||
c.StatsFileNameLength = 40
|
||||
c.StatsFileNameLength = 45
|
||||
c.AskPassword = true
|
||||
c.TPSLimitBurst = 1
|
||||
c.MaxTransfer = -1
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/Unknwon/goconfig"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/driveletter"
|
||||
@@ -57,8 +58,8 @@ const (
|
||||
// ConfigTokenURL is the config key used to store the token server endpoint
|
||||
ConfigTokenURL = "token_url"
|
||||
|
||||
// ConfigAutomatic indicates that we want non-interactive configuration
|
||||
ConfigAutomatic = "config_automatic"
|
||||
// ConfigAuthorize indicates that we just want "rclone authorize"
|
||||
ConfigAuthorize = "config_authorize"
|
||||
)
|
||||
|
||||
// Global
|
||||
@@ -639,21 +640,38 @@ func Command(commands []string) byte {
|
||||
}
|
||||
}
|
||||
|
||||
// ConfirmWithDefault asks the user for Yes or No and returns true or false.
|
||||
//
|
||||
// If AutoConfirm is set, it will return the Default value passed in
|
||||
func ConfirmWithDefault(Default bool) bool {
|
||||
if fs.Config.AutoConfirm {
|
||||
return Default
|
||||
}
|
||||
return Command([]string{"yYes", "nNo"}) == 'y'
|
||||
}
|
||||
|
||||
// Confirm asks the user for Yes or No and returns true or false
|
||||
//
|
||||
// If AutoConfirm is set, it will return true
|
||||
func Confirm() bool {
|
||||
return ConfirmWithDefault(true)
|
||||
return Command([]string{"yYes", "nNo"}) == 'y'
|
||||
}
|
||||
|
||||
// ConfirmWithConfig asks the user for Yes or No and returns true or
|
||||
// false.
|
||||
//
|
||||
// If AutoConfirm is set, it will look up the value in m and return
|
||||
// that, but if it isn't set then it will return the Default value
|
||||
// passed in
|
||||
func ConfirmWithConfig(m configmap.Getter, configName string, Default bool) bool {
|
||||
if fs.Config.AutoConfirm {
|
||||
configString, ok := m.Get(configName)
|
||||
if ok {
|
||||
configValue, err := strconv.ParseBool(configString)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to parse config parameter %s=%q as boolean - using default %v: %v", configName, configString, Default, err)
|
||||
} else {
|
||||
Default = configValue
|
||||
}
|
||||
}
|
||||
answer := "No"
|
||||
if Default {
|
||||
answer = "Yes"
|
||||
}
|
||||
fmt.Printf("Auto confirm is set: answering %s, override by setting config parameter %s=%v\n", answer, configName, !Default)
|
||||
return Default
|
||||
}
|
||||
return Confirm()
|
||||
}
|
||||
|
||||
// Choose one of the defaults or type a new string if newOk is set
|
||||
@@ -943,8 +961,6 @@ func CreateRemote(name string, provider string, keyValues rc.Params) error {
|
||||
getConfigData().DeleteSection(name)
|
||||
// Set the type
|
||||
getConfigData().SetValue(name, "type", provider)
|
||||
// Show this is automatically configured
|
||||
getConfigData().SetValue(name, ConfigAutomatic, "yes")
|
||||
// Set the remaining values
|
||||
return UpdateRemote(name, keyValues)
|
||||
}
|
||||
@@ -1227,6 +1243,7 @@ func SetPassword() {
|
||||
// rclone authorize "fs name"
|
||||
// rclone authorize "fs name" "client id" "client secret"
|
||||
func Authorize(args []string) {
|
||||
defer suppressConfirm()()
|
||||
switch len(args) {
|
||||
case 1, 3:
|
||||
default:
|
||||
@@ -1243,8 +1260,8 @@ func Authorize(args []string) {
|
||||
// Make sure we delete it
|
||||
defer DeleteRemote(name)
|
||||
|
||||
// Indicate that we want fully automatic configuration.
|
||||
getConfigData().SetValue(name, ConfigAutomatic, "yes")
|
||||
// Indicate that we are running rclone authorize
|
||||
getConfigData().SetValue(name, ConfigAuthorize, "true")
|
||||
if len(args) == 3 {
|
||||
getConfigData().SetValue(name, ConfigClientID, args[1])
|
||||
getConfigData().SetValue(name, ConfigClientSecret, args[2])
|
||||
|
||||
@@ -40,7 +40,7 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.IntVarP(flagSet, &fs.Config.Transfers, "transfers", "", fs.Config.Transfers, "Number of file transfers to run in parallel.")
|
||||
flags.StringVarP(flagSet, &config.ConfigPath, "config", "", config.ConfigPath, "Config file.")
|
||||
flags.StringVarP(flagSet, &config.CacheDir, "cache-dir", "", config.CacheDir, "Directory rclone will use for caching.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.CheckSum, "checksum", "c", fs.Config.CheckSum, "Skip based on checksum & size, not mod-time & size")
|
||||
flags.BoolVarP(flagSet, &fs.Config.CheckSum, "checksum", "c", fs.Config.CheckSum, "Skip based on checksum (if available) & size, not mod-time & size")
|
||||
flags.BoolVarP(flagSet, &fs.Config.SizeOnly, "size-only", "", fs.Config.SizeOnly, "Skip based on size only, not mod-time or checksum")
|
||||
flags.BoolVarP(flagSet, &fs.Config.IgnoreTimes, "ignore-times", "I", fs.Config.IgnoreTimes, "Don't skip files that match size and time - transfer all files")
|
||||
flags.BoolVarP(flagSet, &fs.Config.IgnoreExisting, "ignore-existing", "", fs.Config.IgnoreExisting, "Skip all files that exist on destination")
|
||||
@@ -87,6 +87,7 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.IntVarP(flagSet, &fs.Config.MaxBacklog, "max-backlog", "", fs.Config.MaxBacklog, "Maximum number of objects in sync or check backlog.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.StatsOneLine, "stats-one-line", "", fs.Config.StatsOneLine, "Make the stats fit on one line.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.Progress, "progress", "P", fs.Config.Progress, "Show progress during transfer.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.Cookie, "use-cookies", "", fs.Config.Cookie, "Enable session cookiejar.")
|
||||
}
|
||||
|
||||
// SetFlags converts any flags into config which weren't straight foward
|
||||
|
||||
@@ -7,12 +7,14 @@ import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
"net/http/httputil"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"golang.org/x/net/publicsuffix"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
@@ -22,9 +24,10 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
transport http.RoundTripper
|
||||
noTransport sync.Once
|
||||
tpsBucket *rate.Limiter // for limiting number of http transactions per second
|
||||
transport http.RoundTripper
|
||||
noTransport sync.Once
|
||||
tpsBucket *rate.Limiter // for limiting number of http transactions per second
|
||||
cookieJar, _ = cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
|
||||
)
|
||||
|
||||
// StartHTTPTokenBucket starts the token bucket if necessary
|
||||
@@ -142,9 +145,13 @@ func NewTransport(ci *fs.ConfigInfo) http.RoundTripper {
|
||||
|
||||
// NewClient returns an http.Client with the correct timeouts
|
||||
func NewClient(ci *fs.ConfigInfo) *http.Client {
|
||||
return &http.Client{
|
||||
transport := &http.Client{
|
||||
Transport: NewTransport(ci),
|
||||
}
|
||||
if ci.Cookie {
|
||||
transport.Jar = cookieJar
|
||||
}
|
||||
return transport
|
||||
}
|
||||
|
||||
// Transport is a our http Transport which wraps an http.Transport
|
||||
|
||||
@@ -12,7 +12,6 @@ func MimeTypeFromName(remote string) (mimeType string) {
|
||||
if !strings.ContainsRune(mimeType, '/') {
|
||||
mimeType = "application/octet-stream"
|
||||
}
|
||||
mimeType = strings.Replace(mimeType, "; charset=utf-8", "", -1) // Remove charset
|
||||
return mimeType
|
||||
}
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// CheckHashes checks the two files to see if they have common
|
||||
@@ -104,6 +105,8 @@ func sizeDiffers(src, dst fs.ObjectInfo) bool {
|
||||
return src.Size() != dst.Size()
|
||||
}
|
||||
|
||||
var checksumWarning sync.Once
|
||||
|
||||
func equal(src fs.ObjectInfo, dst fs.Object, sizeOnly, checkSum bool) bool {
|
||||
if sizeDiffers(src, dst) {
|
||||
fs.Debugf(src, "Sizes differ (src %d vs dst %d)", src.Size(), dst.Size())
|
||||
@@ -125,6 +128,9 @@ func equal(src fs.ObjectInfo, dst fs.Object, sizeOnly, checkSum bool) bool {
|
||||
return false
|
||||
}
|
||||
if ht == hash.None {
|
||||
checksumWarning.Do(func() {
|
||||
fs.Logf(dst.Fs(), "--checksum is in use but the source and destination have no hashes in common; falling back to --size-only")
|
||||
})
|
||||
fs.Debugf(src, "Size of src and dst objects identical")
|
||||
} else {
|
||||
fs.Debugf(src, "Size and %v of src and dst objects identical", ht)
|
||||
@@ -572,6 +578,7 @@ type checkMarch struct {
|
||||
noHashes int32
|
||||
srcFilesMissing int32
|
||||
dstFilesMissing int32
|
||||
matches int32
|
||||
}
|
||||
|
||||
// DstOnly have an object which is in the destination only
|
||||
@@ -639,6 +646,7 @@ func (c *checkMarch) Match(dst, src fs.DirEntry) (recurse bool) {
|
||||
if differ {
|
||||
atomic.AddInt32(&c.differences, 1)
|
||||
} else {
|
||||
atomic.AddInt32(&c.matches, 1)
|
||||
fs.Debugf(dstX, "OK")
|
||||
}
|
||||
if noHash {
|
||||
@@ -706,6 +714,9 @@ func CheckFn(fdst, fsrc fs.Fs, check checkFn, oneway bool) error {
|
||||
if c.noHashes > 0 {
|
||||
fs.Logf(fdst, "%d hashes could not be checked", c.noHashes)
|
||||
}
|
||||
if c.matches > 0 {
|
||||
fs.Logf(fdst, "%d matching files", c.matches)
|
||||
}
|
||||
if c.differences > 0 {
|
||||
return errors.Errorf("%d differences found", c.differences)
|
||||
}
|
||||
@@ -1575,3 +1586,81 @@ func (l *ListFormat) Format(entry fs.DirEntry) (result string) {
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// DirMove renames srcRemote to dstRemote
|
||||
//
|
||||
// It does this by loading the directory tree into memory (using ListR
|
||||
// if available) and doing renames in parallel.
|
||||
func DirMove(f fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
// Use DirMove if possible
|
||||
if doDirMove := f.Features().DirMove; doDirMove != nil {
|
||||
return doDirMove(f, srcRemote, dstRemote)
|
||||
}
|
||||
|
||||
// Load the directory tree into memory
|
||||
tree, err := walk.NewDirTree(f, srcRemote, true, -1)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "RenameDir tree walk")
|
||||
}
|
||||
|
||||
// Get the directories in sorted order
|
||||
dirs := tree.Dirs()
|
||||
|
||||
// Make the destination directories - must be done in order not in parallel
|
||||
for _, dir := range dirs {
|
||||
dstPath := dstRemote + dir[len(srcRemote):]
|
||||
err := f.Mkdir(dstPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "RenameDir mkdir")
|
||||
}
|
||||
}
|
||||
|
||||
// Rename the files in parallel
|
||||
type rename struct {
|
||||
o fs.Object
|
||||
newPath string
|
||||
}
|
||||
renames := make(chan rename, fs.Config.Transfers)
|
||||
g, ctx := errgroup.WithContext(context.Background())
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
g.Go(func() error {
|
||||
for job := range renames {
|
||||
dstOverwritten, _ := f.NewObject(job.newPath)
|
||||
_, err := Move(f, dstOverwritten, job.newPath, job.o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
for dir, entries := range tree {
|
||||
dstPath := dstRemote + dir[len(srcRemote):]
|
||||
for _, entry := range entries {
|
||||
if o, ok := entry.(fs.Object); ok {
|
||||
renames <- rename{o, path.Join(dstPath, path.Base(o.Remote()))}
|
||||
}
|
||||
}
|
||||
}
|
||||
close(renames)
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "RenameDir renames")
|
||||
}
|
||||
|
||||
// Remove the source directories in reverse order
|
||||
for i := len(dirs) - 1; i >= 0; i-- {
|
||||
err := f.Rmdir(dirs[i])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "RenameDir rmdir")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -25,8 +25,10 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -271,11 +273,17 @@ func testCheck(t *testing.T, checkFunction func(fdst, fsrc fs.Fs, oneway bool) e
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
check := func(i int, wantErrors int64, oneway bool) {
|
||||
check := func(i int, wantErrors int64, wantChecks int64, oneway bool) {
|
||||
fs.Debugf(r.Fremote, "%d: Starting check test", i)
|
||||
oldErrors := accounting.Stats.GetErrors()
|
||||
accounting.Stats.ResetCounters()
|
||||
var buf bytes.Buffer
|
||||
log.SetOutput(&buf)
|
||||
defer func() {
|
||||
log.SetOutput(os.Stderr)
|
||||
}()
|
||||
err := checkFunction(r.Fremote, r.Flocal, oneway)
|
||||
gotErrors := accounting.Stats.GetErrors() - oldErrors
|
||||
gotErrors := accounting.Stats.GetErrors()
|
||||
gotChecks := accounting.Stats.GetChecks()
|
||||
if wantErrors == 0 && err != nil {
|
||||
t.Errorf("%d: Got error when not expecting one: %v", i, err)
|
||||
}
|
||||
@@ -285,21 +293,27 @@ func testCheck(t *testing.T, checkFunction func(fdst, fsrc fs.Fs, oneway bool) e
|
||||
if wantErrors != gotErrors {
|
||||
t.Errorf("%d: Expecting %d errors but got %d", i, wantErrors, gotErrors)
|
||||
}
|
||||
if gotChecks > 0 && !strings.Contains(buf.String(), "matching files") {
|
||||
t.Errorf("%d: Total files matching line missing", i)
|
||||
}
|
||||
if wantChecks != gotChecks {
|
||||
t.Errorf("%d: Expecting %d total matching files but got %d", i, wantChecks, gotChecks)
|
||||
}
|
||||
fs.Debugf(r.Fremote, "%d: Ending check test", i)
|
||||
}
|
||||
|
||||
file1 := r.WriteBoth("rutabaga", "is tasty", t3)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
check(1, 0, false)
|
||||
check(1, 0, 1, false)
|
||||
|
||||
file2 := r.WriteFile("potato2", "------------------------------------------------------------", t1)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
check(2, 1, false)
|
||||
check(2, 1, 1, false)
|
||||
|
||||
file3 := r.WriteObject("empty space", "", t2)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file3)
|
||||
check(3, 2, false)
|
||||
check(3, 2, 1, false)
|
||||
|
||||
file2r := file2
|
||||
if fs.Config.SizeOnly {
|
||||
@@ -308,16 +322,16 @@ func testCheck(t *testing.T, checkFunction func(fdst, fsrc fs.Fs, oneway bool) e
|
||||
r.WriteObject("potato2", "------------------------------------------------------------", t1)
|
||||
}
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2r, file3)
|
||||
check(4, 1, false)
|
||||
check(4, 1, 2, false)
|
||||
|
||||
r.WriteFile("empty space", "", t2)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||
check(5, 0, false)
|
||||
check(5, 0, 3, false)
|
||||
|
||||
file4 := r.WriteObject("remotepotato", "------------------------------------------------------------", t1)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2r, file3, file4)
|
||||
check(6, 1, false)
|
||||
check(7, 0, true)
|
||||
check(6, 1, 3, false)
|
||||
check(7, 0, 3, true)
|
||||
}
|
||||
|
||||
func TestCheck(t *testing.T) {
|
||||
@@ -942,3 +956,90 @@ func TestListFormat(t *testing.T) {
|
||||
assert.Equal(t, fmt.Sprintf("%d", items[1].Size())+"|subdir/|"+items[1].ModTime().Local().Format("2006-01-02 15:04:05"), list.Format(items[1]))
|
||||
|
||||
}
|
||||
|
||||
func TestDirMove(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
// Make some files and dirs
|
||||
r.ForceMkdir(r.Fremote)
|
||||
files := []fstest.Item{
|
||||
r.WriteObject("A1/one", "one", t1),
|
||||
r.WriteObject("A1/two", "two", t2),
|
||||
r.WriteObject("A1/B1/three", "three", t3),
|
||||
r.WriteObject("A1/B1/C1/four", "four", t1),
|
||||
r.WriteObject("A1/B1/C2/five", "five", t2),
|
||||
}
|
||||
require.NoError(t, operations.Mkdir(r.Fremote, "A1/B2"))
|
||||
require.NoError(t, operations.Mkdir(r.Fremote, "A1/B1/C3"))
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
t,
|
||||
r.Fremote,
|
||||
files,
|
||||
[]string{
|
||||
"A1",
|
||||
"A1/B1",
|
||||
"A1/B2",
|
||||
"A1/B1/C1",
|
||||
"A1/B1/C2",
|
||||
"A1/B1/C3",
|
||||
},
|
||||
fs.GetModifyWindow(r.Fremote),
|
||||
)
|
||||
|
||||
require.NoError(t, operations.DirMove(r.Fremote, "A1", "A2"))
|
||||
|
||||
for i := range files {
|
||||
files[i].Path = strings.Replace(files[i].Path, "A1/", "A2/", -1)
|
||||
files[i].WinPath = ""
|
||||
}
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
t,
|
||||
r.Fremote,
|
||||
files,
|
||||
[]string{
|
||||
"A2",
|
||||
"A2/B1",
|
||||
"A2/B2",
|
||||
"A2/B1/C1",
|
||||
"A2/B1/C2",
|
||||
"A2/B1/C3",
|
||||
},
|
||||
fs.GetModifyWindow(r.Fremote),
|
||||
)
|
||||
|
||||
// Disable DirMove
|
||||
features := r.Fremote.Features()
|
||||
oldDirMove := features.DirMove
|
||||
features.DirMove = nil
|
||||
defer func() {
|
||||
features.DirMove = oldDirMove
|
||||
}()
|
||||
|
||||
require.NoError(t, operations.DirMove(r.Fremote, "A2", "A3"))
|
||||
|
||||
for i := range files {
|
||||
files[i].Path = strings.Replace(files[i].Path, "A2/", "A3/", -1)
|
||||
files[i].WinPath = ""
|
||||
}
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
t,
|
||||
r.Fremote,
|
||||
files,
|
||||
[]string{
|
||||
"A3",
|
||||
"A3/B1",
|
||||
"A3/B2",
|
||||
"A3/B1/C1",
|
||||
"A3/B1/C2",
|
||||
"A3/B1/C3",
|
||||
},
|
||||
fs.GetModifyWindow(r.Fremote),
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"log"
|
||||
"path"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
@@ -27,11 +28,12 @@ type Test struct {
|
||||
//
|
||||
// FIXME make bucket based remotes set sub-dir automatically???
|
||||
type Backend struct {
|
||||
Backend string // name of the backend directory
|
||||
Remote string // name of the test remote
|
||||
SubDir bool // set to test with -sub-dir
|
||||
FastList bool // set to test with -fast-list
|
||||
OneOnly bool // set to run only one backend test at once
|
||||
Backend string // name of the backend directory
|
||||
Remote string // name of the test remote
|
||||
SubDir bool // set to test with -sub-dir
|
||||
FastList bool // set to test with -fast-list
|
||||
OneOnly bool // set to run only one backend test at once
|
||||
Ignore []string // test names to ignore the failure of
|
||||
}
|
||||
|
||||
// MakeRuns creates Run objects the Backend and Test
|
||||
@@ -47,6 +49,10 @@ func (b *Backend) MakeRuns(t *Test) (runs []*Run) {
|
||||
if b.FastList && t.FastList {
|
||||
fastlists = append(fastlists, true)
|
||||
}
|
||||
ignore := make(map[string]struct{}, len(b.Ignore))
|
||||
for _, item := range b.Ignore {
|
||||
ignore[item] = struct{}{}
|
||||
}
|
||||
for _, subdir := range subdirs {
|
||||
for _, fastlist := range fastlists {
|
||||
run := &Run{
|
||||
@@ -58,6 +64,7 @@ func (b *Backend) MakeRuns(t *Test) (runs []*Run) {
|
||||
NoRetries: t.NoRetries,
|
||||
OneOnly: b.OneOnly,
|
||||
NoBinary: t.NoBinary,
|
||||
Ignore: ignore,
|
||||
}
|
||||
if t.AddBackend {
|
||||
run.Path = path.Join(run.Path, b.Backend)
|
||||
@@ -119,7 +126,12 @@ func (c *Config) filterBackendsByRemotes(remotes []string) {
|
||||
}
|
||||
if !found {
|
||||
log.Printf("Remote %q not found - inserting with default flags", name)
|
||||
newBackends = append(newBackends, Backend{Remote: name})
|
||||
// Lookup which backend
|
||||
fsInfo, _, _, _, err := fs.ConfigFs(name)
|
||||
if err != nil {
|
||||
log.Fatalf("couldn't find remote %q: %v", name, err)
|
||||
}
|
||||
newBackends = append(newBackends, Backend{Backend: fsInfo.FileName(), Remote: name})
|
||||
}
|
||||
}
|
||||
c.Backends = newBackends
|
||||
|
||||
@@ -53,6 +53,30 @@ backends:
|
||||
remote: "TestS3:"
|
||||
subdir: true
|
||||
fastlist: true
|
||||
- backend: "s3"
|
||||
remote: "TestS3Minio:"
|
||||
subdir: true
|
||||
fastlist: true
|
||||
- backend: "s3"
|
||||
remote: "TestS3Wasabi:"
|
||||
subdir: true
|
||||
fastlist: true
|
||||
- backend: "s3"
|
||||
remote: "TestS3DigitalOcean:"
|
||||
subdir: true
|
||||
fastlist: true
|
||||
ignore:
|
||||
- TestIntegration/FsMkdir/FsPutFiles/FsCopy
|
||||
- backend: "s3"
|
||||
remote: "TestS3Ceph:"
|
||||
subdir: true
|
||||
fastlist: true
|
||||
ignore:
|
||||
- TestIntegration/FsMkdir/FsPutFiles/FsCopy
|
||||
- backend: "s3"
|
||||
remote: "TestS3Alibaba:"
|
||||
subdir: true
|
||||
fastlist: true
|
||||
- backend: "sftp"
|
||||
remote: "TestSftp:"
|
||||
subdir: false
|
||||
@@ -61,6 +85,12 @@ backends:
|
||||
remote: "TestSwift:"
|
||||
subdir: true
|
||||
fastlist: true
|
||||
- backend: "swift"
|
||||
remote: "TestSwiftCeph:"
|
||||
subdir: true
|
||||
fastlist: true
|
||||
ignore:
|
||||
- TestIntegration/FsMkdir/FsPutFiles/FsCopy
|
||||
- backend: "yandex"
|
||||
remote: "TestYandex:"
|
||||
subdir: false
|
||||
@@ -98,6 +128,8 @@ backends:
|
||||
remote: "TestMega:"
|
||||
subdir: false
|
||||
fastlist: false
|
||||
ignore:
|
||||
- TestIntegration/FsMkdir/FsPutFiles/PublicLink
|
||||
- backend: "opendrive"
|
||||
remote: "TestOpenDrive:"
|
||||
subdir: false
|
||||
|
||||
@@ -45,6 +45,7 @@ type Run struct {
|
||||
NoRetries bool // don't retry if set
|
||||
OneOnly bool // only run test for this backend at once
|
||||
NoBinary bool // set to not build a binary
|
||||
Ignore map[string]struct{}
|
||||
// Internals
|
||||
cmdLine []string
|
||||
cmdString string
|
||||
@@ -138,9 +139,15 @@ func (r *Run) findFailures() {
|
||||
oldFailedTests := r.failedTests
|
||||
r.failedTests = nil
|
||||
excludeParents := map[string]struct{}{}
|
||||
ignored := 0
|
||||
for _, matches := range failRe.FindAllSubmatch(r.output, -1) {
|
||||
failedTest := string(matches[1])
|
||||
r.failedTests = append(r.failedTests, failedTest)
|
||||
// Skip any ignored failures
|
||||
if _, found := r.Ignore[failedTest]; found {
|
||||
ignored++
|
||||
} else {
|
||||
r.failedTests = append(r.failedTests, failedTest)
|
||||
}
|
||||
// Find all the parents of this test
|
||||
parts := strings.Split(failedTest, "/")
|
||||
for i := len(parts) - 1; i >= 1; i-- {
|
||||
@@ -155,6 +162,12 @@ func (r *Run) findFailures() {
|
||||
}
|
||||
}
|
||||
r.failedTests = newTests
|
||||
if len(r.failedTests) == 0 && ignored > 0 {
|
||||
log.Printf("%q - Found %d ignored errors only - marking as good", r.cmdString, ignored)
|
||||
r.err = nil
|
||||
r.dumpOutput()
|
||||
return
|
||||
}
|
||||
if len(r.failedTests) != 0 {
|
||||
r.runFlag = testsToRegexp(r.failedTests)
|
||||
} else {
|
||||
|
||||
5
go.mod
5
go.mod
@@ -9,6 +9,7 @@ require (
|
||||
github.com/Unknwon/goconfig v0.0.0-20181105214110-56bd8ab18619
|
||||
github.com/a8m/tree v0.0.0-20180321023834-3cf936ce15d6
|
||||
github.com/abbot/go-http-auth v0.4.0
|
||||
github.com/anacrolix/dms v0.0.0-20180117034613-8af4925bffb5
|
||||
github.com/aws/aws-sdk-go v1.15.81
|
||||
github.com/billziss-gh/cgofuse v1.1.0
|
||||
github.com/coreos/bbolt v0.0.0-20180318001526-af9db2027c98
|
||||
@@ -17,7 +18,7 @@ require (
|
||||
github.com/djherbis/times v1.1.0
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9 // indirect
|
||||
github.com/goftp/server v0.0.0-20180914132916-1fd52c8552f1
|
||||
github.com/goftp/server v0.0.0-20190111142836-88de73f463af
|
||||
github.com/google/go-querystring v1.0.0 // indirect
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
@@ -29,7 +30,7 @@ require (
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.3 // indirect
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2
|
||||
github.com/ncw/swift v1.0.43
|
||||
github.com/ncw/swift v1.0.44
|
||||
github.com/nsf/termbox-go v0.0.0-20181027232701-60ab7e3d12ed
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
|
||||
6
go.sum
6
go.sum
@@ -14,6 +14,8 @@ github.com/a8m/tree v0.0.0-20180321023834-3cf936ce15d6 h1:UCQe3W9LxwL2ff5r0PqQfS
|
||||
github.com/a8m/tree v0.0.0-20180321023834-3cf936ce15d6/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg=
|
||||
github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0=
|
||||
github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM=
|
||||
github.com/anacrolix/dms v0.0.0-20180117034613-8af4925bffb5 h1:lmyFvZXNGOmsKCYXNwzDLWafnxeewxsFwdsvTvSC1sg=
|
||||
github.com/anacrolix/dms v0.0.0-20180117034613-8af4925bffb5/go.mod h1:DGqLjaZ3ziKKNRt+U5Q9PLWJ52Q/4rxfaaH/b3QYKaE=
|
||||
github.com/aws/aws-sdk-go v1.15.81 h1:va7uoFaV9uKAtZ6BTmp1u7paoMsizYRRLvRuoC07nQ8=
|
||||
github.com/aws/aws-sdk-go v1.15.81/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM=
|
||||
github.com/billziss-gh/cgofuse v1.1.0 h1:tATn9ZDvuPcOVlvR4tJitGHgAqy1y18+4mKmRfdfjec=
|
||||
@@ -37,6 +39,8 @@ github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9 h1:cC0Hbb+18DJ4i
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9/go.mod h1:GpOj6zuVBG3Inr9qjEnuVTgBlk2lZ1S9DcoFiXWyKss=
|
||||
github.com/goftp/server v0.0.0-20180914132916-1fd52c8552f1 h1:WjgeEHEDLGx56ndxS6FYi6qFjZGajSVHPuEPdpJ60cI=
|
||||
github.com/goftp/server v0.0.0-20180914132916-1fd52c8552f1/go.mod h1:k/SS6VWkxY7dHPhoMQ8IdRu8L4lQtmGbhyXGg+vCnXE=
|
||||
github.com/goftp/server v0.0.0-20190111142836-88de73f463af h1:PJxb1aA1z+Ohy2j28L92+ng9phXpZVFRFbPkfmJcRGo=
|
||||
github.com/goftp/server v0.0.0-20190111142836-88de73f463af/go.mod h1:k/SS6VWkxY7dHPhoMQ8IdRu8L4lQtmGbhyXGg+vCnXE=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||
@@ -70,6 +74,8 @@ github.com/ncw/swift v1.0.42 h1:ztvRb6hs52IHOcaYt73f9lXYLIeIuWgdooRDhdyllGI=
|
||||
github.com/ncw/swift v1.0.42/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/ncw/swift v1.0.43 h1:TZn2l/bPV0CqG+/G5BFh/ROWnyX7dL2D0URaOjNQRsw=
|
||||
github.com/ncw/swift v1.0.43/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/ncw/swift v1.0.44 h1:EKvOTvUxElbpDWqxsyVaVGvc2IfuOqQnRmjnR2AGhQ4=
|
||||
github.com/ncw/swift v1.0.44/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/nsf/termbox-go v0.0.0-20181027232701-60ab7e3d12ed h1:bAVGG6B+R5qpSylrrA+BAMrzYkdAoiTaKPVxRB+4cyM=
|
||||
github.com/nsf/termbox-go v0.0.0-20181027232701-60ab7e3d12ed/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd h1:+iAPaTbi1gZpcpDwe/BW1fx7Xoesv69hLNGPheoyhBs=
|
||||
|
||||
22
lib/file/file.go
Normal file
22
lib/file/file.go
Normal file
@@ -0,0 +1,22 @@
|
||||
// Package file provides a version of os.OpenFile, the handles of
|
||||
// which can be renamed and deleted under Windows.
|
||||
package file
|
||||
|
||||
import "os"
|
||||
|
||||
// Open opens the named file for reading. If successful, methods on
|
||||
// the returned file can be used for reading; the associated file
|
||||
// descriptor has mode O_RDONLY.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func Open(name string) (*os.File, error) {
|
||||
return OpenFile(name, os.O_RDONLY, 0)
|
||||
}
|
||||
|
||||
// Create creates the named file with mode 0666 (before umask), truncating
|
||||
// it if it already exists. If successful, methods on the returned
|
||||
// File can be used for I/O; the associated file descriptor has mode
|
||||
// O_RDWR.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func Create(name string) (*os.File, error) {
|
||||
return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
}
|
||||
15
lib/file/file_other.go
Normal file
15
lib/file/file_other.go
Normal file
@@ -0,0 +1,15 @@
|
||||
//+build !windows
|
||||
|
||||
package file
|
||||
|
||||
import "os"
|
||||
|
||||
// OpenFile is the generalized open call; most users will use Open or Create
|
||||
// instead. It opens the named file with specified flag (O_RDONLY etc.) and
|
||||
// perm (before umask), if applicable. If successful, methods on the returned
|
||||
// File can be used for I/O. If there is an error, it will be of type
|
||||
// *PathError.
|
||||
//
|
||||
// Under both Unix and Windows this will allow open files to be
|
||||
// renamed and or deleted.
|
||||
var OpenFile = os.OpenFile
|
||||
154
lib/file/file_test.go
Normal file
154
lib/file/file_test.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package file
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Create a test directory then tidy up
|
||||
func testDir(t *testing.T) (string, func()) {
|
||||
dir, err := ioutil.TempDir("", "rclone-test")
|
||||
require.NoError(t, err)
|
||||
return dir, func() {
|
||||
assert.NoError(t, os.RemoveAll(dir))
|
||||
}
|
||||
}
|
||||
|
||||
// This lists dir and checks the listing is as expected without checking the size
|
||||
func checkListingNoSize(t *testing.T, dir string, want []string) {
|
||||
var got []string
|
||||
nodes, err := ioutil.ReadDir(dir)
|
||||
require.NoError(t, err)
|
||||
for _, node := range nodes {
|
||||
got = append(got, fmt.Sprintf("%s,%v", node.Name(), node.IsDir()))
|
||||
}
|
||||
assert.Equal(t, want, got)
|
||||
}
|
||||
|
||||
// This lists dir and checks the listing is as expected
|
||||
func checkListing(t *testing.T, dir string, want []string) {
|
||||
var got []string
|
||||
nodes, err := ioutil.ReadDir(dir)
|
||||
require.NoError(t, err)
|
||||
for _, node := range nodes {
|
||||
got = append(got, fmt.Sprintf("%s,%d,%v", node.Name(), node.Size(), node.IsDir()))
|
||||
}
|
||||
assert.Equal(t, want, got)
|
||||
}
|
||||
|
||||
// Test we can rename an open file
|
||||
func TestOpenFileRename(t *testing.T) {
|
||||
dir, tidy := testDir(t)
|
||||
defer tidy()
|
||||
|
||||
filepath := path.Join(dir, "file1")
|
||||
f, err := Create(filepath)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = f.Write([]byte("hello"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
checkListingNoSize(t, dir, []string{
|
||||
"file1,false",
|
||||
})
|
||||
|
||||
// Delete the file first
|
||||
assert.NoError(t, os.Remove(filepath))
|
||||
|
||||
// .. then close it
|
||||
assert.NoError(t, f.Close())
|
||||
|
||||
checkListing(t, dir, nil)
|
||||
}
|
||||
|
||||
// Test we can delete an open file
|
||||
func TestOpenFileDelete(t *testing.T) {
|
||||
dir, tidy := testDir(t)
|
||||
defer tidy()
|
||||
|
||||
filepath := path.Join(dir, "file1")
|
||||
f, err := Create(filepath)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = f.Write([]byte("hello"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
checkListingNoSize(t, dir, []string{
|
||||
"file1,false",
|
||||
})
|
||||
|
||||
// Rename the file while open
|
||||
filepath2 := path.Join(dir, "file2")
|
||||
assert.NoError(t, os.Rename(filepath, filepath2))
|
||||
|
||||
checkListingNoSize(t, dir, []string{
|
||||
"file2,false",
|
||||
})
|
||||
|
||||
// .. then close it
|
||||
assert.NoError(t, f.Close())
|
||||
|
||||
checkListing(t, dir, []string{
|
||||
"file2,5,false",
|
||||
})
|
||||
}
|
||||
|
||||
// Smoke test the Open, OpenFile and Create functions
|
||||
func TestOpenFileOperations(t *testing.T) {
|
||||
dir, tidy := testDir(t)
|
||||
defer tidy()
|
||||
|
||||
filepath := path.Join(dir, "file1")
|
||||
|
||||
// Create the file
|
||||
|
||||
f, err := Create(filepath)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = f.Write([]byte("hello"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NoError(t, f.Close())
|
||||
|
||||
checkListing(t, dir, []string{
|
||||
"file1,5,false",
|
||||
})
|
||||
|
||||
// Append onto the file
|
||||
|
||||
f, err = OpenFile(filepath, os.O_RDWR|os.O_APPEND, 0666)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = f.Write([]byte("HI"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NoError(t, f.Close())
|
||||
|
||||
checkListing(t, dir, []string{
|
||||
"file1,7,false",
|
||||
})
|
||||
|
||||
// Read it back in
|
||||
|
||||
f, err = Open(filepath)
|
||||
require.NoError(t, err)
|
||||
var b = make([]byte, 10)
|
||||
n, err := f.Read(b)
|
||||
assert.True(t, err == io.EOF || err == nil)
|
||||
assert.Equal(t, 7, n)
|
||||
assert.Equal(t, "helloHI", string(b[:n]))
|
||||
|
||||
assert.NoError(t, f.Close())
|
||||
|
||||
checkListing(t, dir, []string{
|
||||
"file1,7,false",
|
||||
})
|
||||
|
||||
}
|
||||
66
lib/file/file_windows.go
Normal file
66
lib/file/file_windows.go
Normal file
@@ -0,0 +1,66 @@
|
||||
//+build windows
|
||||
|
||||
package file
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// OpenFile is the generalized open call; most users will use Open or Create
|
||||
// instead. It opens the named file with specified flag (O_RDONLY etc.) and
|
||||
// perm (before umask), if applicable. If successful, methods on the returned
|
||||
// File can be used for I/O. If there is an error, it will be of type
|
||||
// *PathError.
|
||||
//
|
||||
// Under both Unix and Windows this will allow open files to be
|
||||
// renamed and or deleted.
|
||||
func OpenFile(path string, mode int, perm os.FileMode) (*os.File, error) {
|
||||
// This code copied from syscall_windows.go in the go source and then
|
||||
// modified to support renaming and deleting open files by adding
|
||||
// FILE_SHARE_DELETE.
|
||||
//
|
||||
// https://docs.microsoft.com/en-us/windows/desktop/api/fileapi/nf-fileapi-createfilea#file_share_delete
|
||||
if len(path) == 0 {
|
||||
return nil, syscall.ERROR_FILE_NOT_FOUND
|
||||
}
|
||||
pathp, err := syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var access uint32
|
||||
switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) {
|
||||
case syscall.O_RDONLY:
|
||||
access = syscall.GENERIC_READ
|
||||
case syscall.O_WRONLY:
|
||||
access = syscall.GENERIC_WRITE
|
||||
case syscall.O_RDWR:
|
||||
access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
|
||||
}
|
||||
if mode&syscall.O_CREAT != 0 {
|
||||
access |= syscall.GENERIC_WRITE
|
||||
}
|
||||
if mode&syscall.O_APPEND != 0 {
|
||||
access &^= syscall.GENERIC_WRITE
|
||||
access |= syscall.FILE_APPEND_DATA
|
||||
}
|
||||
sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE)
|
||||
var createmode uint32
|
||||
switch {
|
||||
case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL):
|
||||
createmode = syscall.CREATE_NEW
|
||||
case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC):
|
||||
createmode = syscall.CREATE_ALWAYS
|
||||
case mode&syscall.O_CREAT == syscall.O_CREAT:
|
||||
createmode = syscall.OPEN_ALWAYS
|
||||
case mode&syscall.O_TRUNC == syscall.O_TRUNC:
|
||||
createmode = syscall.TRUNCATE_EXISTING
|
||||
default:
|
||||
createmode = syscall.OPEN_EXISTING
|
||||
}
|
||||
h, e := syscall.CreateFile(pathp, access, sharemode, nil, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return os.NewFile(uintptr(h), path), nil
|
||||
}
|
||||
@@ -358,18 +358,26 @@ func ConfigErrorCheck(id, name string, m configmap.Mapper, errorHandler func(*ht
|
||||
|
||||
func doConfig(id, name string, m configmap.Mapper, errorHandler func(*http.Request) AuthError, oauthConfig *oauth2.Config, offline bool, opts []oauth2.AuthCodeOption) error {
|
||||
oauthConfig, changed := overrideCredentials(name, m, oauthConfig)
|
||||
auto, ok := m.Get(config.ConfigAutomatic)
|
||||
automatic := ok && auto != ""
|
||||
authorizeOnlyValue, ok := m.Get(config.ConfigAuthorize)
|
||||
authorizeOnly := ok && authorizeOnlyValue != "" // set if being run by "rclone authorize"
|
||||
|
||||
// See if already have a token
|
||||
tokenString, ok := m.Get("token")
|
||||
if ok && tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !config.Confirm() {
|
||||
if !config.ConfirmWithConfig(m, "config_refresh_token", true) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Ask the user whether they are using a local machine
|
||||
isLocal := func() bool {
|
||||
fmt.Printf("Use auto config?\n")
|
||||
fmt.Printf(" * Say Y if not sure\n")
|
||||
fmt.Printf(" * Say N if you are working on a remote or headless machine\n")
|
||||
return config.ConfirmWithConfig(m, "config_is_local", true)
|
||||
}
|
||||
|
||||
// Detect whether we should use internal web server
|
||||
useWebServer := false
|
||||
switch oauthConfig.RedirectURL {
|
||||
@@ -378,14 +386,10 @@ func doConfig(id, name string, m configmap.Mapper, errorHandler func(*http.Reque
|
||||
fmt.Printf("Make sure your Redirect URL is set to %q in your custom config.\n", oauthConfig.RedirectURL)
|
||||
}
|
||||
useWebServer = true
|
||||
if automatic {
|
||||
if authorizeOnly {
|
||||
break
|
||||
}
|
||||
fmt.Printf("Use auto config?\n")
|
||||
fmt.Printf(" * Say Y if not sure\n")
|
||||
fmt.Printf(" * Say N if you are working on a remote or headless machine\n")
|
||||
auto := config.Confirm()
|
||||
if !auto {
|
||||
if !isLocal() {
|
||||
fmt.Printf("For this to work, you will need rclone available on a machine that has a web browser available.\n")
|
||||
fmt.Printf("Execute the following on your machine:\n")
|
||||
if changed {
|
||||
@@ -407,12 +411,9 @@ func doConfig(id, name string, m configmap.Mapper, errorHandler func(*http.Reque
|
||||
return PutToken(name, m, token, true)
|
||||
}
|
||||
case TitleBarRedirectURL:
|
||||
useWebServer = automatic
|
||||
if !automatic {
|
||||
fmt.Printf("Use auto config?\n")
|
||||
fmt.Printf(" * Say Y if not sure\n")
|
||||
fmt.Printf(" * Say N if you are working on a remote or headless machine or Y didn't work\n")
|
||||
useWebServer = config.Confirm()
|
||||
useWebServer = authorizeOnly
|
||||
if !authorizeOnly {
|
||||
useWebServer = isLocal()
|
||||
}
|
||||
if useWebServer {
|
||||
// copy the config and set to use the internal webserver
|
||||
@@ -479,12 +480,12 @@ func doConfig(id, name string, m configmap.Mapper, errorHandler func(*http.Reque
|
||||
}
|
||||
|
||||
// Print code if we do automatic retrieval
|
||||
if automatic {
|
||||
if authorizeOnly {
|
||||
result, err := json.Marshal(token)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to marshal token")
|
||||
}
|
||||
fmt.Printf("Paste the following into your remote machine --->\n%s\n<---End paste", result)
|
||||
fmt.Printf("Paste the following into your remote machine --->\n%s\n<---End paste\n", result)
|
||||
}
|
||||
return PutToken(name, m, token, true)
|
||||
}
|
||||
|
||||
@@ -2,12 +2,14 @@
|
||||
package pacer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
// Pacer state
|
||||
@@ -15,6 +17,8 @@ type Pacer struct {
|
||||
mu sync.Mutex // Protecting read/writes
|
||||
minSleep time.Duration // minimum sleep time
|
||||
maxSleep time.Duration // maximum sleep time
|
||||
burst int // number of calls to send without rate limiting
|
||||
limiter *rate.Limiter // rate limiter for the minsleep
|
||||
decayConstant uint // decay constant
|
||||
attackConstant uint // attack constant
|
||||
pacer chan struct{} // To pace the operations
|
||||
@@ -76,7 +80,6 @@ type Paced func() (bool, error)
|
||||
// New returns a Pacer with sensible defaults
|
||||
func New() *Pacer {
|
||||
p := &Pacer{
|
||||
minSleep: 10 * time.Millisecond,
|
||||
maxSleep: 2 * time.Second,
|
||||
decayConstant: 2,
|
||||
attackConstant: 1,
|
||||
@@ -86,6 +89,7 @@ func New() *Pacer {
|
||||
p.sleepTime = p.minSleep
|
||||
p.SetPacer(DefaultPacer)
|
||||
p.SetMaxConnections(fs.Config.Checkers + fs.Config.Transfers)
|
||||
p.SetMinSleep(10 * time.Millisecond)
|
||||
|
||||
// Put the first pacing token in
|
||||
p.pacer <- struct{}{}
|
||||
@@ -114,6 +118,16 @@ func (p *Pacer) SetMinSleep(t time.Duration) *Pacer {
|
||||
defer p.mu.Unlock()
|
||||
p.minSleep = t
|
||||
p.sleepTime = p.minSleep
|
||||
p.limiter = rate.NewLimiter(rate.Every(p.minSleep), p.burst)
|
||||
return p
|
||||
}
|
||||
|
||||
// SetBurst sets the burst with no limiting of the pacer
|
||||
func (p *Pacer) SetBurst(n int) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.burst = n
|
||||
p.limiter = rate.NewLimiter(rate.Every(p.minSleep), p.burst)
|
||||
return p
|
||||
}
|
||||
|
||||
@@ -216,11 +230,19 @@ func (p *Pacer) beginCall() {
|
||||
|
||||
p.mu.Lock()
|
||||
// Restart the timer
|
||||
go func(t time.Duration) {
|
||||
go func(sleepTime, minSleep time.Duration) {
|
||||
// fs.Debugf(f, "New sleep for %v at %v", t, time.Now())
|
||||
time.Sleep(t)
|
||||
// Sleep the minimum time with the rate limiter
|
||||
if minSleep > 0 && sleepTime >= minSleep {
|
||||
_ = p.limiter.Wait(context.Background())
|
||||
sleepTime -= minSleep
|
||||
}
|
||||
// Then sleep the remaining time
|
||||
if sleepTime > 0 {
|
||||
time.Sleep(sleepTime)
|
||||
}
|
||||
p.pacer <- struct{}{}
|
||||
}(p.sleepTime)
|
||||
}(p.sleepTime, p.minSleep)
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
|
||||
@@ -198,7 +198,17 @@ func (api *Client) Call(opts *Opts) (resp *http.Response, err error) {
|
||||
if opts.Parameters != nil && len(opts.Parameters) > 0 {
|
||||
url += "?" + opts.Parameters.Encode()
|
||||
}
|
||||
req, err := http.NewRequest(opts.Method, url, opts.Body)
|
||||
body := opts.Body
|
||||
// If length is set and zero then nil out the body to stop use
|
||||
// use of chunked encoding and insert a "Content-Length: 0"
|
||||
// header.
|
||||
//
|
||||
// If we don't do this we get "Content-Length" headers for all
|
||||
// files except 0 length files.
|
||||
if opts.ContentLength != nil && *opts.ContentLength == 0 {
|
||||
body = nil
|
||||
}
|
||||
req, err := http.NewRequest(opts.Method, url, body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
24
vendor/github.com/anacrolix/dms/LICENSE
generated
vendored
Normal file
24
vendor/github.com/anacrolix/dms/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
Copyright (c) 2012, Matt Joiner <anacrolix@gmail.com>.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
* Neither the name of the <organization> nor the
|
||||
names of its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
|
||||
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
102
vendor/github.com/anacrolix/dms/dlna/dlna.go
generated
vendored
Normal file
102
vendor/github.com/anacrolix/dms/dlna/dlna.go
generated
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
package dlna
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
TimeSeekRangeDomain = "TimeSeekRange.dlna.org"
|
||||
ContentFeaturesDomain = "contentFeatures.dlna.org"
|
||||
TransferModeDomain = "transferMode.dlna.org"
|
||||
)
|
||||
|
||||
type ContentFeatures struct {
|
||||
ProfileName string
|
||||
SupportTimeSeek bool
|
||||
SupportRange bool
|
||||
// Play speeds, DLNA.ORG_PS would go here if supported.
|
||||
Transcoded bool
|
||||
}
|
||||
|
||||
func BinaryInt(b bool) uint {
|
||||
if b {
|
||||
return 1
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// flags are in hex. trailing 24 zeroes, 26 are after the space
|
||||
// "DLNA.ORG_OP=" time-seek-range-supp bytes-range-header-supp
|
||||
func (cf ContentFeatures) String() (ret string) {
|
||||
//DLNA.ORG_PN=[a-zA-Z0-9_]*
|
||||
params := make([]string, 0, 2)
|
||||
if cf.ProfileName != "" {
|
||||
params = append(params, "DLNA.ORG_PN="+cf.ProfileName)
|
||||
}
|
||||
params = append(params, fmt.Sprintf(
|
||||
"DLNA.ORG_OP=%b%b;DLNA.ORG_CI=%b",
|
||||
BinaryInt(cf.SupportTimeSeek),
|
||||
BinaryInt(cf.SupportRange),
|
||||
BinaryInt(cf.Transcoded)))
|
||||
return strings.Join(params, ";")
|
||||
}
|
||||
|
||||
func ParseNPTTime(s string) (time.Duration, error) {
|
||||
var h, m, sec, ms time.Duration
|
||||
n, err := fmt.Sscanf(s, "%d:%2d:%2d.%3d", &h, &m, &sec, &ms)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if n < 3 {
|
||||
return -1, fmt.Errorf("invalid npt time: %s", s)
|
||||
}
|
||||
ret := time.Duration(h) * time.Hour
|
||||
ret += time.Duration(m) * time.Minute
|
||||
ret += sec * time.Second
|
||||
ret += ms * time.Millisecond
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func FormatNPTTime(npt time.Duration) string {
|
||||
npt /= time.Millisecond
|
||||
ms := npt % 1000
|
||||
npt /= 1000
|
||||
s := npt % 60
|
||||
npt /= 60
|
||||
m := npt % 60
|
||||
npt /= 60
|
||||
h := npt
|
||||
return fmt.Sprintf("%02d:%02d:%02d.%03d", h, m, s, ms)
|
||||
}
|
||||
|
||||
type NPTRange struct {
|
||||
Start, End time.Duration
|
||||
}
|
||||
|
||||
func ParseNPTRange(s string) (ret NPTRange, err error) {
|
||||
ss := strings.SplitN(s, "-", 2)
|
||||
if ss[0] != "" {
|
||||
ret.Start, err = ParseNPTTime(ss[0])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if ss[1] != "" {
|
||||
ret.End, err = ParseNPTTime(ss[1])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (me NPTRange) String() (ret string) {
|
||||
ret = me.Start.String() + "-"
|
||||
if me.End >= 0 {
|
||||
ret += me.End.String()
|
||||
}
|
||||
return
|
||||
}
|
||||
68
vendor/github.com/anacrolix/dms/soap/soap.go
generated
vendored
Normal file
68
vendor/github.com/anacrolix/dms/soap/soap.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
package soap
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
)
|
||||
|
||||
const (
|
||||
EncodingStyle = "http://schemas.xmlsoap.org/soap/encoding/"
|
||||
EnvelopeNS = "http://schemas.xmlsoap.org/soap/envelope/"
|
||||
)
|
||||
|
||||
type Arg struct {
|
||||
XMLName xml.Name
|
||||
Value string `xml:",chardata"`
|
||||
}
|
||||
|
||||
type Action struct {
|
||||
XMLName xml.Name
|
||||
Args []Arg
|
||||
}
|
||||
|
||||
type Body struct {
|
||||
Action []byte `xml:",innerxml"`
|
||||
}
|
||||
|
||||
type UPnPError struct {
|
||||
XMLName xml.Name `xml:"urn:schemas-upnp-org:control-1-0 UPnPError"`
|
||||
Code uint `xml:"errorCode"`
|
||||
Desc string `xml:"errorDescription"`
|
||||
}
|
||||
|
||||
type FaultDetail struct {
|
||||
XMLName xml.Name `xml:"detail"`
|
||||
Data interface{}
|
||||
}
|
||||
|
||||
type Fault struct {
|
||||
XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault"`
|
||||
FaultCode string `xml:"faultcode"`
|
||||
FaultString string `xml:"faultstring"`
|
||||
Detail FaultDetail `xml:"detail"`
|
||||
}
|
||||
|
||||
func NewFault(s string, detail interface{}) *Fault {
|
||||
return &Fault{
|
||||
FaultCode: EnvelopeNS + ":Client",
|
||||
FaultString: s,
|
||||
Detail: FaultDetail{
|
||||
Data: detail,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type Envelope struct {
|
||||
XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Envelope"`
|
||||
EncodingStyle string `xml:"encodingStyle,attr"`
|
||||
Body Body `xml:"http://schemas.xmlsoap.org/soap/envelope/ Body"`
|
||||
}
|
||||
|
||||
/* XML marshalling of nested namespaces is broken.
|
||||
|
||||
func NewEnvelope(action []byte) Envelope {
|
||||
return Envelope{
|
||||
EncodingStyle: EncodingStyle,
|
||||
Body: Body{action},
|
||||
}
|
||||
}
|
||||
*/
|
||||
330
vendor/github.com/anacrolix/dms/ssdp/ssdp.go
generated
vendored
Normal file
330
vendor/github.com/anacrolix/dms/ssdp/ssdp.go
generated
vendored
Normal file
@@ -0,0 +1,330 @@
|
||||
package ssdp
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/ipv4"
|
||||
)
|
||||
|
||||
const (
|
||||
AddrString = "239.255.255.250:1900"
|
||||
rootDevice = "upnp:rootdevice"
|
||||
aliveNTS = "ssdp:alive"
|
||||
byebyeNTS = "ssdp:byebye"
|
||||
)
|
||||
|
||||
var (
|
||||
NetAddr *net.UDPAddr
|
||||
)
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
NetAddr, err = net.ResolveUDPAddr("udp4", AddrString)
|
||||
if err != nil {
|
||||
log.Panicf("Could not resolve %s: %s", AddrString, err)
|
||||
}
|
||||
}
|
||||
|
||||
type badStringError struct {
|
||||
what string
|
||||
str string
|
||||
}
|
||||
|
||||
func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) }
|
||||
|
||||
func ReadRequest(b *bufio.Reader) (req *http.Request, err error) {
|
||||
tp := textproto.NewReader(b)
|
||||
var s string
|
||||
if s, err = tp.ReadLine(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
var f []string
|
||||
// TODO a split that only allows N values?
|
||||
if f = strings.SplitN(s, " ", 3); len(f) < 3 {
|
||||
return nil, &badStringError{"malformed request line", s}
|
||||
}
|
||||
if f[1] != "*" {
|
||||
return nil, &badStringError{"bad URL request", f[1]}
|
||||
}
|
||||
req = &http.Request{
|
||||
Method: f[0],
|
||||
}
|
||||
var ok bool
|
||||
if req.ProtoMajor, req.ProtoMinor, ok = http.ParseHTTPVersion(strings.TrimSpace(f[2])); !ok {
|
||||
return nil, &badStringError{"malformed HTTP version", f[2]}
|
||||
}
|
||||
|
||||
mimeHeader, err := tp.ReadMIMEHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header = http.Header(mimeHeader)
|
||||
return
|
||||
}
|
||||
|
||||
type Server struct {
|
||||
conn *net.UDPConn
|
||||
Interface net.Interface
|
||||
Server string
|
||||
Services []string
|
||||
Devices []string
|
||||
Location func(net.IP) string
|
||||
UUID string
|
||||
NotifyInterval time.Duration
|
||||
closed chan struct{}
|
||||
}
|
||||
|
||||
func makeConn(ifi net.Interface) (ret *net.UDPConn, err error) {
|
||||
ret, err = net.ListenMulticastUDP("udp", &ifi, NetAddr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
p := ipv4.NewPacketConn(ret)
|
||||
if err := p.SetMulticastTTL(2); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
if err := p.SetMulticastLoopback(true); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (me *Server) serve() {
|
||||
for {
|
||||
b := make([]byte, me.Interface.MTU)
|
||||
n, addr, err := me.conn.ReadFromUDP(b)
|
||||
select {
|
||||
case <-me.closed:
|
||||
return
|
||||
default:
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("error reading from UDP socket: %s", err)
|
||||
break
|
||||
}
|
||||
go me.handle(b[:n], addr)
|
||||
}
|
||||
}
|
||||
|
||||
func (me *Server) Init() (err error) {
|
||||
me.closed = make(chan struct{})
|
||||
me.conn, err = makeConn(me.Interface)
|
||||
return
|
||||
}
|
||||
|
||||
func (me *Server) Close() {
|
||||
close(me.closed)
|
||||
me.sendByeBye()
|
||||
me.conn.Close()
|
||||
}
|
||||
|
||||
func (me *Server) Serve() (err error) {
|
||||
go me.serve()
|
||||
for {
|
||||
addrs, err := me.Interface.Addrs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
ip := func() net.IP {
|
||||
switch val := addr.(type) {
|
||||
case *net.IPNet:
|
||||
return val.IP
|
||||
case *net.IPAddr:
|
||||
return val.IP
|
||||
}
|
||||
panic(fmt.Sprint("unexpected addr type:", addr))
|
||||
}()
|
||||
extraHdrs := [][2]string{
|
||||
{"CACHE-CONTROL", fmt.Sprintf("max-age=%d", 5*me.NotifyInterval/2/time.Second)},
|
||||
{"LOCATION", me.Location(ip)},
|
||||
}
|
||||
me.notifyAll(aliveNTS, extraHdrs)
|
||||
}
|
||||
time.Sleep(me.NotifyInterval)
|
||||
}
|
||||
}
|
||||
|
||||
func (me *Server) usnFromTarget(target string) string {
|
||||
if target == me.UUID {
|
||||
return target
|
||||
}
|
||||
return me.UUID + "::" + target
|
||||
}
|
||||
|
||||
func (me *Server) makeNotifyMessage(target, nts string, extraHdrs [][2]string) []byte {
|
||||
lines := [...][2]string{
|
||||
{"HOST", AddrString},
|
||||
{"NT", target},
|
||||
{"NTS", nts},
|
||||
{"SERVER", me.Server},
|
||||
{"USN", me.usnFromTarget(target)},
|
||||
}
|
||||
buf := &bytes.Buffer{}
|
||||
fmt.Fprint(buf, "NOTIFY * HTTP/1.1\r\n")
|
||||
writeHdr := func(keyValue [2]string) {
|
||||
fmt.Fprintf(buf, "%s: %s\r\n", keyValue[0], keyValue[1])
|
||||
}
|
||||
for _, pair := range lines {
|
||||
writeHdr(pair)
|
||||
}
|
||||
for _, pair := range extraHdrs {
|
||||
writeHdr(pair)
|
||||
}
|
||||
fmt.Fprint(buf, "\r\n")
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (me *Server) send(buf []byte, addr *net.UDPAddr) {
|
||||
if n, err := me.conn.WriteToUDP(buf, addr); err != nil {
|
||||
log.Printf("error writing to UDP socket: %s", err)
|
||||
} else if n != len(buf) {
|
||||
log.Printf("short write: %d/%d bytes", n, len(buf))
|
||||
}
|
||||
}
|
||||
|
||||
func (me *Server) delayedSend(delay time.Duration, buf []byte, addr *net.UDPAddr) {
|
||||
go func() {
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
me.send(buf, addr)
|
||||
case <-me.closed:
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (me *Server) log(args ...interface{}) {
|
||||
args = append([]interface{}{me.Interface.Name + ":"}, args...)
|
||||
log.Print(args...)
|
||||
}
|
||||
|
||||
func (me *Server) sendByeBye() {
|
||||
for _, type_ := range me.allTypes() {
|
||||
buf := me.makeNotifyMessage(type_, byebyeNTS, nil)
|
||||
me.send(buf, NetAddr)
|
||||
}
|
||||
}
|
||||
|
||||
func (me *Server) notifyAll(nts string, extraHdrs [][2]string) {
|
||||
for _, type_ := range me.allTypes() {
|
||||
buf := me.makeNotifyMessage(type_, nts, extraHdrs)
|
||||
delay := time.Duration(rand.Int63n(int64(100 * time.Millisecond)))
|
||||
me.delayedSend(delay, buf, NetAddr)
|
||||
}
|
||||
}
|
||||
|
||||
func (me *Server) allTypes() (ret []string) {
|
||||
for _, a := range [][]string{
|
||||
{rootDevice, me.UUID},
|
||||
me.Devices,
|
||||
me.Services,
|
||||
} {
|
||||
ret = append(ret, a...)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (me *Server) handle(buf []byte, sender *net.UDPAddr) {
|
||||
req, err := ReadRequest(bufio.NewReader(bytes.NewReader(buf)))
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
if req.Method != "M-SEARCH" || req.Header.Get("man") != `"ssdp:discover"` {
|
||||
return
|
||||
}
|
||||
var mx uint
|
||||
if req.Header.Get("Host") == AddrString {
|
||||
mxHeader := req.Header.Get("mx")
|
||||
i, err := strconv.ParseUint(mxHeader, 0, 0)
|
||||
if err != nil {
|
||||
log.Printf("Invalid mx header %q: %s", mxHeader, err)
|
||||
return
|
||||
}
|
||||
mx = uint(i)
|
||||
} else {
|
||||
mx = 1
|
||||
}
|
||||
types := func(st string) []string {
|
||||
if st == "ssdp:all" {
|
||||
return me.allTypes()
|
||||
}
|
||||
for _, t := range me.allTypes() {
|
||||
if t == st {
|
||||
return []string{t}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}(req.Header.Get("st"))
|
||||
for _, ip := range func() (ret []net.IP) {
|
||||
addrs, err := me.Interface.Addrs()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
if ip, ok := func() (net.IP, bool) {
|
||||
switch data := addr.(type) {
|
||||
case *net.IPNet:
|
||||
if data.Contains(sender.IP) {
|
||||
return data.IP, true
|
||||
}
|
||||
return nil, false
|
||||
case *net.IPAddr:
|
||||
return data.IP, true
|
||||
}
|
||||
panic(addr)
|
||||
}(); ok {
|
||||
ret = append(ret, ip)
|
||||
}
|
||||
}
|
||||
return
|
||||
}() {
|
||||
for _, type_ := range types {
|
||||
resp := me.makeResponse(ip, type_, req)
|
||||
delay := time.Duration(rand.Int63n(int64(time.Second) * int64(mx)))
|
||||
me.delayedSend(delay, resp, sender)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (me *Server) makeResponse(ip net.IP, targ string, req *http.Request) (ret []byte) {
|
||||
resp := &http.Response{
|
||||
StatusCode: 200,
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: make(http.Header),
|
||||
Request: req,
|
||||
}
|
||||
for _, pair := range [...][2]string{
|
||||
{"CACHE-CONTROL", fmt.Sprintf("max-age=%d", 5*me.NotifyInterval/2/time.Second)},
|
||||
{"EXT", ""},
|
||||
{"LOCATION", me.Location(ip)},
|
||||
{"SERVER", me.Server},
|
||||
{"ST", targ},
|
||||
{"USN", me.usnFromTarget(targ)},
|
||||
} {
|
||||
resp.Header.Set(pair[0], pair[1])
|
||||
}
|
||||
buf := &bytes.Buffer{}
|
||||
if err := resp.Write(buf); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
91
vendor/github.com/anacrolix/dms/upnp/eventing.go
generated
vendored
Normal file
91
vendor/github.com/anacrolix/dms/upnp/eventing.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
package upnp
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TODO: Why use namespace prefixes in PropertySet et al? Because the spec
|
||||
// uses them, and I believe the Golang standard library XML spec implementers
|
||||
// incorrectly assume that you can get away with just xmlns="".
|
||||
|
||||
// propertyset is the root element sent in an event callback.
|
||||
type PropertySet struct {
|
||||
XMLName struct{} `xml:"e:propertyset"`
|
||||
Properties []Property
|
||||
// This should be set to `"urn:schemas-upnp-org:event-1-0"`.
|
||||
Space string `xml:"xmlns:e,attr"`
|
||||
}
|
||||
|
||||
// propertys provide namespacing to the contained variables.
|
||||
type Property struct {
|
||||
XMLName struct{} `xml:"e:property"`
|
||||
Variable Variable
|
||||
}
|
||||
|
||||
// Represents an evented state variable that has sendEvents="yes" in its
|
||||
// service spec.
|
||||
type Variable struct {
|
||||
XMLName xml.Name
|
||||
Value string `xml:",chardata"`
|
||||
}
|
||||
|
||||
type subscriber struct {
|
||||
sid string
|
||||
nextSeq uint32 // 0 for initial event, wraps from Uint32Max to 1.
|
||||
urls []*url.URL
|
||||
expiry time.Time
|
||||
}
|
||||
|
||||
// Intended to eventually be an embeddable implementation for managing
|
||||
// eventing for a service. Not complete.
|
||||
type Eventing struct {
|
||||
subscribers map[string]*subscriber
|
||||
}
|
||||
|
||||
func (me *Eventing) Subscribe(callback []*url.URL, timeoutSeconds int) (sid string, actualTimeout int, err error) {
|
||||
var uuid [16]byte
|
||||
io.ReadFull(rand.Reader, uuid[:])
|
||||
sid = FormatUUID(uuid[:])
|
||||
if _, ok := me.subscribers[sid]; ok {
|
||||
err = fmt.Errorf("already subscribed: %s", sid)
|
||||
return
|
||||
}
|
||||
ssr := &subscriber{
|
||||
sid: sid,
|
||||
urls: callback,
|
||||
expiry: time.Now().Add(time.Duration(timeoutSeconds) * time.Second),
|
||||
}
|
||||
if me.subscribers == nil {
|
||||
me.subscribers = make(map[string]*subscriber)
|
||||
}
|
||||
me.subscribers[sid] = ssr
|
||||
actualTimeout = int(ssr.expiry.Sub(time.Now()) / time.Second)
|
||||
return
|
||||
}
|
||||
|
||||
func (me *Eventing) Unsubscribe(sid string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var callbackURLRegexp = regexp.MustCompile("<(.*?)>")
|
||||
|
||||
// Parse the CALLBACK HTTP header in an event subscription request. See UPnP
|
||||
// Device Architecture 4.1.2.
|
||||
func ParseCallbackURLs(callback string) (ret []*url.URL) {
|
||||
for _, match := range callbackURLRegexp.FindAllStringSubmatch(callback, -1) {
|
||||
_url, err := url.Parse(match[1])
|
||||
if err != nil {
|
||||
log.Printf("bad callback url: %q", match[1])
|
||||
continue
|
||||
}
|
||||
ret = append(ret, _url)
|
||||
}
|
||||
return
|
||||
}
|
||||
159
vendor/github.com/anacrolix/dms/upnp/upnp.go
generated
vendored
Normal file
159
vendor/github.com/anacrolix/dms/upnp/upnp.go
generated
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
package upnp
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var serviceURNRegexp *regexp.Regexp = regexp.MustCompile(`^urn:schemas-upnp-org:service:(\w+):(\d+)$`)
|
||||
|
||||
type ServiceURN struct {
|
||||
Type string
|
||||
Version uint64
|
||||
}
|
||||
|
||||
func (me ServiceURN) String() string {
|
||||
return fmt.Sprintf("urn:schemas-upnp-org:service:%s:%d", me.Type, me.Version)
|
||||
}
|
||||
|
||||
func ParseServiceType(s string) (ret ServiceURN, err error) {
|
||||
matches := serviceURNRegexp.FindStringSubmatch(s)
|
||||
if matches == nil {
|
||||
err = errors.New(s)
|
||||
return
|
||||
}
|
||||
if len(matches) != 3 {
|
||||
log.Panicf("Invalid serviceURNRegexp ?")
|
||||
}
|
||||
ret.Type = matches[1]
|
||||
ret.Version, err = strconv.ParseUint(matches[2], 0, 0)
|
||||
return
|
||||
}
|
||||
|
||||
type SoapAction struct {
|
||||
ServiceURN
|
||||
Action string
|
||||
}
|
||||
|
||||
func ParseActionHTTPHeader(s string) (ret SoapAction, err error) {
|
||||
if s[0] != '"' || s[len(s)-1] != '"' {
|
||||
return
|
||||
}
|
||||
s = s[1 : len(s)-1]
|
||||
hashIndex := strings.LastIndex(s, "#")
|
||||
if hashIndex == -1 {
|
||||
return
|
||||
}
|
||||
ret.Action = s[hashIndex+1:]
|
||||
ret.ServiceURN, err = ParseServiceType(s[:hashIndex])
|
||||
return
|
||||
}
|
||||
|
||||
type SpecVersion struct {
|
||||
Major int `xml:"major"`
|
||||
Minor int `xml:"minor"`
|
||||
}
|
||||
|
||||
type Icon struct {
|
||||
Mimetype string `xml:"mimetype"`
|
||||
Width int `xml:"width"`
|
||||
Height int `xml:"height"`
|
||||
Depth int `xml:"depth"`
|
||||
URL string `xml:"url"`
|
||||
}
|
||||
|
||||
type Service struct {
|
||||
XMLName xml.Name `xml:"service"`
|
||||
ServiceType string `xml:"serviceType"`
|
||||
ServiceId string `xml:"serviceId"`
|
||||
SCPDURL string
|
||||
ControlURL string `xml:"controlURL"`
|
||||
EventSubURL string `xml:"eventSubURL"`
|
||||
}
|
||||
|
||||
type Device struct {
|
||||
DeviceType string `xml:"deviceType"`
|
||||
FriendlyName string `xml:"friendlyName"`
|
||||
Manufacturer string `xml:"manufacturer"`
|
||||
ModelName string `xml:"modelName"`
|
||||
UDN string
|
||||
IconList []Icon `xml:"iconList>icon"`
|
||||
ServiceList []Service `xml:"serviceList>service"`
|
||||
}
|
||||
|
||||
type DeviceDesc struct {
|
||||
XMLName xml.Name `xml:"urn:schemas-upnp-org:device-1-0 root"`
|
||||
SpecVersion SpecVersion `xml:"specVersion"`
|
||||
Device Device `xml:"device"`
|
||||
}
|
||||
|
||||
type Error struct {
|
||||
XMLName xml.Name `xml:"urn:schemas-upnp-org:control-1-0 UPnPError"`
|
||||
Code uint `xml:"errorCode"`
|
||||
Desc string `xml:"errorDescription"`
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%d %s", e.Code, e.Desc)
|
||||
}
|
||||
|
||||
const (
|
||||
InvalidActionErrorCode = 401
|
||||
ActionFailedErrorCode = 501
|
||||
ArgumentValueInvalidErrorCode = 600
|
||||
)
|
||||
|
||||
var (
|
||||
InvalidActionError = Errorf(401, "Invalid Action")
|
||||
ArgumentValueInvalidError = Errorf(600, "The argument value is invalid")
|
||||
)
|
||||
|
||||
// Errorf creates an UPNP error from the given code and description
|
||||
func Errorf(code uint, tpl string, args ...interface{}) *Error {
|
||||
return &Error{Code: code, Desc: fmt.Sprintf(tpl, args...)}
|
||||
}
|
||||
|
||||
// ConvertError converts any error to an UPNP error
|
||||
func ConvertError(err error) *Error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if e, ok := err.(*Error); ok {
|
||||
return e
|
||||
}
|
||||
return Errorf(ActionFailedErrorCode, err.Error())
|
||||
}
|
||||
|
||||
type Action struct {
|
||||
Name string
|
||||
Arguments []Argument
|
||||
}
|
||||
|
||||
type Argument struct {
|
||||
Name string
|
||||
Direction string
|
||||
RelatedStateVar string
|
||||
}
|
||||
|
||||
type SCPD struct {
|
||||
XMLName xml.Name `xml:"urn:schemas-upnp-org:service-1-0 scpd"`
|
||||
SpecVersion SpecVersion `xml:"specVersion"`
|
||||
ActionList []Action `xml:"actionList>action"`
|
||||
ServiceStateTable []StateVariable `xml:"serviceStateTable>stateVariable"`
|
||||
}
|
||||
|
||||
type StateVariable struct {
|
||||
SendEvents string `xml:"sendEvents,attr"`
|
||||
Name string `xml:"name"`
|
||||
DataType string `xml:"dataType"`
|
||||
AllowedValues *[]string `xml:"allowedValueList>allowedValue,omitempty"`
|
||||
}
|
||||
|
||||
func FormatUUID(buf []byte) string {
|
||||
return fmt.Sprintf("uuid:%x-%x-%x-%x-%x", buf[:4], buf[4:6], buf[6:8], buf[8:10], buf[10:16])
|
||||
}
|
||||
45
vendor/github.com/anacrolix/dms/upnpav/upnpav.go
generated
vendored
Normal file
45
vendor/github.com/anacrolix/dms/upnpav/upnpav.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
package upnpav
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
)
|
||||
|
||||
const (
|
||||
NoSuchObjectErrorCode = 701
|
||||
)
|
||||
|
||||
type Resource struct {
|
||||
XMLName xml.Name `xml:"res"`
|
||||
ProtocolInfo string `xml:"protocolInfo,attr"`
|
||||
URL string `xml:",chardata"`
|
||||
Size uint64 `xml:"size,attr,omitempty"`
|
||||
Bitrate uint `xml:"bitrate,attr,omitempty"`
|
||||
Duration string `xml:"duration,attr,omitempty"`
|
||||
Resolution string `xml:"resolution,attr,omitempty"`
|
||||
}
|
||||
|
||||
type Container struct {
|
||||
Object
|
||||
XMLName xml.Name `xml:"container"`
|
||||
ChildCount int `xml:"childCount,attr"`
|
||||
}
|
||||
|
||||
type Item struct {
|
||||
Object
|
||||
XMLName xml.Name `xml:"item"`
|
||||
Res []Resource
|
||||
}
|
||||
|
||||
type Object struct {
|
||||
ID string `xml:"id,attr"`
|
||||
ParentID string `xml:"parentID,attr"`
|
||||
Restricted int `xml:"restricted,attr"` // indicates whether the object is modifiable
|
||||
Class string `xml:"upnp:class"`
|
||||
Icon string `xml:"upnp:icon,omitempty"`
|
||||
Title string `xml:"dc:title"`
|
||||
Artist string `xml:"upnp:artist,omitempty"`
|
||||
Album string `xml:"upnp:album,omitempty"`
|
||||
Genre string `xml:"upnp:genre,omitempty"`
|
||||
AlbumArtURI string `xml:"upnp:albumArtURI,omitempty"`
|
||||
Searchable int `xml:"searchable,attr"`
|
||||
}
|
||||
15
vendor/github.com/goftp/server/socket.go
generated
vendored
15
vendor/github.com/goftp/server/socket.go
generated
vendored
@@ -14,6 +14,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DataSocket describes a data socket is used to send non-control data between the client and
|
||||
@@ -208,13 +209,23 @@ func (socket *ftpPassiveSocket) GoListenAndServe(sessionID string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
var listener net.Listener
|
||||
listener, err = net.ListenTCP("tcp", laddr)
|
||||
var tcplistener *net.TCPListener
|
||||
tcplistener, err = net.ListenTCP("tcp", laddr)
|
||||
if err != nil {
|
||||
socket.logger.Print(sessionID, err)
|
||||
return
|
||||
}
|
||||
|
||||
// The timeout, for a remote client to establish connection
|
||||
// with a PASV style data connection.
|
||||
const acceptTimeout = 60 * time.Second
|
||||
err = tcplistener.SetDeadline(time.Now().Add(acceptTimeout))
|
||||
if err != nil {
|
||||
socket.logger.Print(sessionID, err)
|
||||
return
|
||||
}
|
||||
|
||||
var listener net.Listener = tcplistener
|
||||
add := listener.Addr()
|
||||
parts := strings.Split(add.String(), ":")
|
||||
port, err := strconv.Atoi(parts[len(parts)-1])
|
||||
|
||||
3
vendor/github.com/ncw/swift/README.md
generated
vendored
3
vendor/github.com/ncw/swift/README.md
generated
vendored
@@ -139,7 +139,7 @@ Contributors
|
||||
- Cezar Sa Espinola <cezarsa@gmail.com>
|
||||
- Sam Gunaratne <samgzeit@gmail.com>
|
||||
- Richard Scothern <richard.scothern@gmail.com>
|
||||
- Michel Couillard <couillard.michel@voxlog.ca>
|
||||
- Michel Couillard <!--<couillard.michel@voxlog.ca>--> <michel.couillard@gmail.com>
|
||||
- Christopher Waldon <ckwaldon@us.ibm.com>
|
||||
- dennis <dai.haojun@gmail.com>
|
||||
- hag <hannes.georg@xing.com>
|
||||
@@ -152,3 +152,4 @@ Contributors
|
||||
- Charles Hsu <charles0126@gmail.com>
|
||||
- Omar Ali <omarali@users.noreply.github.com>
|
||||
- Andreas Andersen <andreas@softwaredesign.se>
|
||||
- kayrus <kay.diam@gmail.com>
|
||||
|
||||
114
vendor/github.com/ncw/swift/auth_v3.go
generated
vendored
114
vendor/github.com/ncw/swift/auth_v3.go
generated
vendored
@@ -3,14 +3,16 @@ package swift
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
v3AuthMethodToken = "token"
|
||||
v3AuthMethodPassword = "password"
|
||||
v3CatalogTypeObjectStore = "object-store"
|
||||
v3AuthMethodToken = "token"
|
||||
v3AuthMethodPassword = "password"
|
||||
v3AuthMethodApplicationCredential = "application_credential"
|
||||
v3CatalogTypeObjectStore = "object-store"
|
||||
)
|
||||
|
||||
// V3 Authentication request
|
||||
@@ -19,9 +21,10 @@ const (
|
||||
type v3AuthRequest struct {
|
||||
Auth struct {
|
||||
Identity struct {
|
||||
Methods []string `json:"methods"`
|
||||
Password *v3AuthPassword `json:"password,omitempty"`
|
||||
Token *v3AuthToken `json:"token,omitempty"`
|
||||
Methods []string `json:"methods"`
|
||||
Password *v3AuthPassword `json:"password,omitempty"`
|
||||
Token *v3AuthToken `json:"token,omitempty"`
|
||||
ApplicationCredential *v3AuthApplicationCredential `json:"application_credential,omitempty"`
|
||||
} `json:"identity"`
|
||||
Scope *v3Scope `json:"scope,omitempty"`
|
||||
} `json:"auth"`
|
||||
@@ -63,6 +66,13 @@ type v3AuthPassword struct {
|
||||
User v3User `json:"user"`
|
||||
}
|
||||
|
||||
type v3AuthApplicationCredential struct {
|
||||
Id string `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Secret string `json:"secret,omitempty"`
|
||||
User *v3User `json:"user,omitempty"`
|
||||
}
|
||||
|
||||
// V3 Authentication response
|
||||
type v3AuthResponse struct {
|
||||
Token struct {
|
||||
@@ -117,7 +127,57 @@ func (auth *v3Auth) Request(c *Connection) (*http.Request, error) {
|
||||
|
||||
v3 := v3AuthRequest{}
|
||||
|
||||
if c.UserName == "" && c.UserId == "" {
|
||||
if (c.ApplicationCredentialId != "" || c.ApplicationCredentialName != "") && c.ApplicationCredentialSecret != "" {
|
||||
var user *v3User
|
||||
|
||||
if c.ApplicationCredentialId != "" {
|
||||
c.ApplicationCredentialName = ""
|
||||
user = &v3User{}
|
||||
}
|
||||
|
||||
if user == nil && c.UserId != "" {
|
||||
// UserID could be used without the domain information
|
||||
user = &v3User{
|
||||
Id: c.UserId,
|
||||
}
|
||||
}
|
||||
|
||||
if user == nil && c.UserName == "" {
|
||||
// Make sure that Username or UserID are provided
|
||||
return nil, fmt.Errorf("UserID or Name should be provided")
|
||||
}
|
||||
|
||||
if user == nil && c.DomainId != "" {
|
||||
user = &v3User{
|
||||
Name: c.UserName,
|
||||
Domain: &v3Domain{
|
||||
Id: c.DomainId,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if user == nil && c.Domain != "" {
|
||||
user = &v3User{
|
||||
Name: c.UserName,
|
||||
Domain: &v3Domain{
|
||||
Name: c.Domain,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure that DomainID or DomainName are provided among Username
|
||||
if user == nil {
|
||||
return nil, fmt.Errorf("DomainID or Domain should be provided")
|
||||
}
|
||||
|
||||
v3.Auth.Identity.Methods = []string{v3AuthMethodApplicationCredential}
|
||||
v3.Auth.Identity.ApplicationCredential = &v3AuthApplicationCredential{
|
||||
Id: c.ApplicationCredentialId,
|
||||
Name: c.ApplicationCredentialName,
|
||||
Secret: c.ApplicationCredentialSecret,
|
||||
User: user,
|
||||
}
|
||||
} else if c.UserName == "" && c.UserId == "" {
|
||||
v3.Auth.Identity.Methods = []string{v3AuthMethodToken}
|
||||
v3.Auth.Identity.Token = &v3AuthToken{Id: c.ApiKey}
|
||||
} else {
|
||||
@@ -140,27 +200,29 @@ func (auth *v3Auth) Request(c *Connection) (*http.Request, error) {
|
||||
v3.Auth.Identity.Password.User.Domain = domain
|
||||
}
|
||||
|
||||
if c.TrustId != "" {
|
||||
v3.Auth.Scope = &v3Scope{Trust: &v3Trust{Id: c.TrustId}}
|
||||
} else if c.TenantId != "" || c.Tenant != "" {
|
||||
if v3.Auth.Identity.Methods[0] != v3AuthMethodApplicationCredential {
|
||||
if c.TrustId != "" {
|
||||
v3.Auth.Scope = &v3Scope{Trust: &v3Trust{Id: c.TrustId}}
|
||||
} else if c.TenantId != "" || c.Tenant != "" {
|
||||
|
||||
v3.Auth.Scope = &v3Scope{Project: &v3Project{}}
|
||||
v3.Auth.Scope = &v3Scope{Project: &v3Project{}}
|
||||
|
||||
if c.TenantId != "" {
|
||||
v3.Auth.Scope.Project.Id = c.TenantId
|
||||
} else if c.Tenant != "" {
|
||||
v3.Auth.Scope.Project.Name = c.Tenant
|
||||
switch {
|
||||
case c.TenantDomain != "":
|
||||
v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.TenantDomain}
|
||||
case c.TenantDomainId != "":
|
||||
v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.TenantDomainId}
|
||||
case c.Domain != "":
|
||||
v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.Domain}
|
||||
case c.DomainId != "":
|
||||
v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.DomainId}
|
||||
default:
|
||||
v3.Auth.Scope.Project.Domain = &v3Domain{Name: "Default"}
|
||||
if c.TenantId != "" {
|
||||
v3.Auth.Scope.Project.Id = c.TenantId
|
||||
} else if c.Tenant != "" {
|
||||
v3.Auth.Scope.Project.Name = c.Tenant
|
||||
switch {
|
||||
case c.TenantDomain != "":
|
||||
v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.TenantDomain}
|
||||
case c.TenantDomainId != "":
|
||||
v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.TenantDomainId}
|
||||
case c.Domain != "":
|
||||
v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.Domain}
|
||||
case c.DomainId != "":
|
||||
v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.DomainId}
|
||||
default:
|
||||
v3.Auth.Scope.Project.Domain = &v3Domain{Name: "Default"}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
49
vendor/github.com/ncw/swift/swift.go
generated
vendored
49
vendor/github.com/ncw/swift/swift.go
generated
vendored
@@ -96,26 +96,29 @@ const (
|
||||
type Connection struct {
|
||||
// Parameters - fill these in before calling Authenticate
|
||||
// They are all optional except UserName, ApiKey and AuthUrl
|
||||
Domain string // User's domain name
|
||||
DomainId string // User's domain Id
|
||||
UserName string // UserName for api
|
||||
UserId string // User Id
|
||||
ApiKey string // Key for api access
|
||||
AuthUrl string // Auth URL
|
||||
Retries int // Retries on error (default is 3)
|
||||
UserAgent string // Http User agent (default goswift/1.0)
|
||||
ConnectTimeout time.Duration // Connect channel timeout (default 10s)
|
||||
Timeout time.Duration // Data channel timeout (default 60s)
|
||||
Region string // Region to use eg "LON", "ORD" - default is use first region (v2,v3 auth only)
|
||||
AuthVersion int // Set to 1, 2 or 3 or leave at 0 for autodetect
|
||||
Internal bool // Set this to true to use the the internal / service network
|
||||
Tenant string // Name of the tenant (v2,v3 auth only)
|
||||
TenantId string // Id of the tenant (v2,v3 auth only)
|
||||
EndpointType EndpointType // Endpoint type (v2,v3 auth only) (default is public URL unless Internal is set)
|
||||
TenantDomain string // Name of the tenant's domain (v3 auth only), only needed if it differs from the user domain
|
||||
TenantDomainId string // Id of the tenant's domain (v3 auth only), only needed if it differs the from user domain
|
||||
TrustId string // Id of the trust (v3 auth only)
|
||||
Transport http.RoundTripper `json:"-" xml:"-"` // Optional specialised http.Transport (eg. for Google Appengine)
|
||||
Domain string // User's domain name
|
||||
DomainId string // User's domain Id
|
||||
UserName string // UserName for api
|
||||
UserId string // User Id
|
||||
ApiKey string // Key for api access
|
||||
ApplicationCredentialId string // Application Credential ID
|
||||
ApplicationCredentialName string // Application Credential Name
|
||||
ApplicationCredentialSecret string // Application Credential Secret
|
||||
AuthUrl string // Auth URL
|
||||
Retries int // Retries on error (default is 3)
|
||||
UserAgent string // Http User agent (default goswift/1.0)
|
||||
ConnectTimeout time.Duration // Connect channel timeout (default 10s)
|
||||
Timeout time.Duration // Data channel timeout (default 60s)
|
||||
Region string // Region to use eg "LON", "ORD" - default is use first region (v2,v3 auth only)
|
||||
AuthVersion int // Set to 1, 2 or 3 or leave at 0 for autodetect
|
||||
Internal bool // Set this to true to use the the internal / service network
|
||||
Tenant string // Name of the tenant (v2,v3 auth only)
|
||||
TenantId string // Id of the tenant (v2,v3 auth only)
|
||||
EndpointType EndpointType // Endpoint type (v2,v3 auth only) (default is public URL unless Internal is set)
|
||||
TenantDomain string // Name of the tenant's domain (v3 auth only), only needed if it differs from the user domain
|
||||
TenantDomainId string // Id of the tenant's domain (v3 auth only), only needed if it differs the from user domain
|
||||
TrustId string // Id of the trust (v3 auth only)
|
||||
Transport http.RoundTripper `json:"-" xml:"-"` // Optional specialised http.Transport (eg. for Google Appengine)
|
||||
// These are filled in after Authenticate is called as are the defaults for above
|
||||
StorageUrl string
|
||||
AuthToken string
|
||||
@@ -194,6 +197,9 @@ func setFromEnv(param interface{}, name string) (err error) {
|
||||
// OS_USERNAME - UserName for api
|
||||
// OS_USER_ID - User Id
|
||||
// OS_PASSWORD - Key for api access
|
||||
// OS_APPLICATION_CREDENTIAL_ID - Application Credential ID
|
||||
// OS_APPLICATION_CREDENTIAL_NAME - Application Credential Name
|
||||
// OS_APPLICATION_CREDENTIAL_SECRET - Application Credential Secret
|
||||
// OS_USER_DOMAIN_NAME - User's domain name
|
||||
// OS_USER_DOMAIN_ID - User's domain Id
|
||||
// OS_PROJECT_NAME - Name of the project
|
||||
@@ -227,6 +233,9 @@ func (c *Connection) ApplyEnvironment() (err error) {
|
||||
{&c.UserName, "OS_USERNAME"},
|
||||
{&c.UserId, "OS_USER_ID"},
|
||||
{&c.ApiKey, "OS_PASSWORD"},
|
||||
{&c.ApplicationCredentialId, "OS_APPLICATION_CREDENTIAL_ID"},
|
||||
{&c.ApplicationCredentialName, "OS_APPLICATION_CREDENTIAL_NAME"},
|
||||
{&c.ApplicationCredentialSecret, "OS_APPLICATION_CREDENTIAL_SECRET"},
|
||||
{&c.AuthUrl, "OS_AUTH_URL"},
|
||||
{&c.Retries, "GOSWIFT_RETRIES"},
|
||||
{&c.UserAgent, "GOSWIFT_USER_AGENT"},
|
||||
|
||||
41
vendor/golang.org/x/net/bpf/asm.go
generated
vendored
Normal file
41
vendor/golang.org/x/net/bpf/asm.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bpf
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Assemble converts insts into raw instructions suitable for loading
|
||||
// into a BPF virtual machine.
|
||||
//
|
||||
// Currently, no optimization is attempted, the assembled program flow
|
||||
// is exactly as provided.
|
||||
func Assemble(insts []Instruction) ([]RawInstruction, error) {
|
||||
ret := make([]RawInstruction, len(insts))
|
||||
var err error
|
||||
for i, inst := range insts {
|
||||
ret[i], err = inst.Assemble()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("assembling instruction %d: %s", i+1, err)
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Disassemble attempts to parse raw back into
|
||||
// Instructions. Unrecognized RawInstructions are assumed to be an
|
||||
// extension not implemented by this package, and are passed through
|
||||
// unchanged to the output. The allDecoded value reports whether insts
|
||||
// contains no RawInstructions.
|
||||
func Disassemble(raw []RawInstruction) (insts []Instruction, allDecoded bool) {
|
||||
insts = make([]Instruction, len(raw))
|
||||
allDecoded = true
|
||||
for i, r := range raw {
|
||||
insts[i] = r.Disassemble()
|
||||
if _, ok := insts[i].(RawInstruction); ok {
|
||||
allDecoded = false
|
||||
}
|
||||
}
|
||||
return insts, allDecoded
|
||||
}
|
||||
222
vendor/golang.org/x/net/bpf/constants.go
generated
vendored
Normal file
222
vendor/golang.org/x/net/bpf/constants.go
generated
vendored
Normal file
@@ -0,0 +1,222 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bpf
|
||||
|
||||
// A Register is a register of the BPF virtual machine.
|
||||
type Register uint16
|
||||
|
||||
const (
|
||||
// RegA is the accumulator register. RegA is always the
|
||||
// destination register of ALU operations.
|
||||
RegA Register = iota
|
||||
// RegX is the indirection register, used by LoadIndirect
|
||||
// operations.
|
||||
RegX
|
||||
)
|
||||
|
||||
// An ALUOp is an arithmetic or logic operation.
|
||||
type ALUOp uint16
|
||||
|
||||
// ALU binary operation types.
|
||||
const (
|
||||
ALUOpAdd ALUOp = iota << 4
|
||||
ALUOpSub
|
||||
ALUOpMul
|
||||
ALUOpDiv
|
||||
ALUOpOr
|
||||
ALUOpAnd
|
||||
ALUOpShiftLeft
|
||||
ALUOpShiftRight
|
||||
aluOpNeg // Not exported because it's the only unary ALU operation, and gets its own instruction type.
|
||||
ALUOpMod
|
||||
ALUOpXor
|
||||
)
|
||||
|
||||
// A JumpTest is a comparison operator used in conditional jumps.
|
||||
type JumpTest uint16
|
||||
|
||||
// Supported operators for conditional jumps.
|
||||
// K can be RegX for JumpIfX
|
||||
const (
|
||||
// K == A
|
||||
JumpEqual JumpTest = iota
|
||||
// K != A
|
||||
JumpNotEqual
|
||||
// K > A
|
||||
JumpGreaterThan
|
||||
// K < A
|
||||
JumpLessThan
|
||||
// K >= A
|
||||
JumpGreaterOrEqual
|
||||
// K <= A
|
||||
JumpLessOrEqual
|
||||
// K & A != 0
|
||||
JumpBitsSet
|
||||
// K & A == 0
|
||||
JumpBitsNotSet
|
||||
)
|
||||
|
||||
// An Extension is a function call provided by the kernel that
|
||||
// performs advanced operations that are expensive or impossible
|
||||
// within the BPF virtual machine.
|
||||
//
|
||||
// Extensions are only implemented by the Linux kernel.
|
||||
//
|
||||
// TODO: should we prune this list? Some of these extensions seem
|
||||
// either broken or near-impossible to use correctly, whereas other
|
||||
// (len, random, ifindex) are quite useful.
|
||||
type Extension int
|
||||
|
||||
// Extension functions available in the Linux kernel.
|
||||
const (
|
||||
// extOffset is the negative maximum number of instructions used
|
||||
// to load instructions by overloading the K argument.
|
||||
extOffset = -0x1000
|
||||
// ExtLen returns the length of the packet.
|
||||
ExtLen Extension = 1
|
||||
// ExtProto returns the packet's L3 protocol type.
|
||||
ExtProto Extension = 0
|
||||
// ExtType returns the packet's type (skb->pkt_type in the kernel)
|
||||
//
|
||||
// TODO: better documentation. How nice an API do we want to
|
||||
// provide for these esoteric extensions?
|
||||
ExtType Extension = 4
|
||||
// ExtPayloadOffset returns the offset of the packet payload, or
|
||||
// the first protocol header that the kernel does not know how to
|
||||
// parse.
|
||||
ExtPayloadOffset Extension = 52
|
||||
// ExtInterfaceIndex returns the index of the interface on which
|
||||
// the packet was received.
|
||||
ExtInterfaceIndex Extension = 8
|
||||
// ExtNetlinkAttr returns the netlink attribute of type X at
|
||||
// offset A.
|
||||
ExtNetlinkAttr Extension = 12
|
||||
// ExtNetlinkAttrNested returns the nested netlink attribute of
|
||||
// type X at offset A.
|
||||
ExtNetlinkAttrNested Extension = 16
|
||||
// ExtMark returns the packet's mark value.
|
||||
ExtMark Extension = 20
|
||||
// ExtQueue returns the packet's assigned hardware queue.
|
||||
ExtQueue Extension = 24
|
||||
// ExtLinkLayerType returns the packet's hardware address type
|
||||
// (e.g. Ethernet, Infiniband).
|
||||
ExtLinkLayerType Extension = 28
|
||||
// ExtRXHash returns the packets receive hash.
|
||||
//
|
||||
// TODO: figure out what this rxhash actually is.
|
||||
ExtRXHash Extension = 32
|
||||
// ExtCPUID returns the ID of the CPU processing the current
|
||||
// packet.
|
||||
ExtCPUID Extension = 36
|
||||
// ExtVLANTag returns the packet's VLAN tag.
|
||||
ExtVLANTag Extension = 44
|
||||
// ExtVLANTagPresent returns non-zero if the packet has a VLAN
|
||||
// tag.
|
||||
//
|
||||
// TODO: I think this might be a lie: it reads bit 0x1000 of the
|
||||
// VLAN header, which changed meaning in recent revisions of the
|
||||
// spec - this extension may now return meaningless information.
|
||||
ExtVLANTagPresent Extension = 48
|
||||
// ExtVLANProto returns 0x8100 if the frame has a VLAN header,
|
||||
// 0x88a8 if the frame has a "Q-in-Q" double VLAN header, or some
|
||||
// other value if no VLAN information is present.
|
||||
ExtVLANProto Extension = 60
|
||||
// ExtRand returns a uniformly random uint32.
|
||||
ExtRand Extension = 56
|
||||
)
|
||||
|
||||
// The following gives names to various bit patterns used in opcode construction.
|
||||
|
||||
const (
|
||||
opMaskCls uint16 = 0x7
|
||||
// opClsLoad masks
|
||||
opMaskLoadDest = 0x01
|
||||
opMaskLoadWidth = 0x18
|
||||
opMaskLoadMode = 0xe0
|
||||
// opClsALU & opClsJump
|
||||
opMaskOperand = 0x08
|
||||
opMaskOperator = 0xf0
|
||||
)
|
||||
|
||||
const (
|
||||
// +---------------+-----------------+---+---+---+
|
||||
// | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 0 |
|
||||
// +---------------+-----------------+---+---+---+
|
||||
opClsLoadA uint16 = iota
|
||||
// +---------------+-----------------+---+---+---+
|
||||
// | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 1 |
|
||||
// +---------------+-----------------+---+---+---+
|
||||
opClsLoadX
|
||||
// +---+---+---+---+---+---+---+---+
|
||||
// | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
|
||||
// +---+---+---+---+---+---+---+---+
|
||||
opClsStoreA
|
||||
// +---+---+---+---+---+---+---+---+
|
||||
// | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
|
||||
// +---+---+---+---+---+---+---+---+
|
||||
opClsStoreX
|
||||
// +---------------+-----------------+---+---+---+
|
||||
// | Operator (4b) | OperandSrc (1b) | 1 | 0 | 0 |
|
||||
// +---------------+-----------------+---+---+---+
|
||||
opClsALU
|
||||
// +-----------------------------+---+---+---+---+
|
||||
// | TestOperator (4b) | 0 | 1 | 0 | 1 |
|
||||
// +-----------------------------+---+---+---+---+
|
||||
opClsJump
|
||||
// +---+-------------------------+---+---+---+---+
|
||||
// | 0 | 0 | 0 | RetSrc (1b) | 0 | 1 | 1 | 0 |
|
||||
// +---+-------------------------+---+---+---+---+
|
||||
opClsReturn
|
||||
// +---+-------------------------+---+---+---+---+
|
||||
// | 0 | 0 | 0 | TXAorTAX (1b) | 0 | 1 | 1 | 1 |
|
||||
// +---+-------------------------+---+---+---+---+
|
||||
opClsMisc
|
||||
)
|
||||
|
||||
const (
|
||||
opAddrModeImmediate uint16 = iota << 5
|
||||
opAddrModeAbsolute
|
||||
opAddrModeIndirect
|
||||
opAddrModeScratch
|
||||
opAddrModePacketLen // actually an extension, not an addressing mode.
|
||||
opAddrModeMemShift
|
||||
)
|
||||
|
||||
const (
|
||||
opLoadWidth4 uint16 = iota << 3
|
||||
opLoadWidth2
|
||||
opLoadWidth1
|
||||
)
|
||||
|
||||
// Operand for ALU and Jump instructions
|
||||
type opOperand uint16
|
||||
|
||||
// Supported operand sources.
|
||||
const (
|
||||
opOperandConstant opOperand = iota << 3
|
||||
opOperandX
|
||||
)
|
||||
|
||||
// An jumpOp is a conditional jump condition.
|
||||
type jumpOp uint16
|
||||
|
||||
// Supported jump conditions.
|
||||
const (
|
||||
opJumpAlways jumpOp = iota << 4
|
||||
opJumpEqual
|
||||
opJumpGT
|
||||
opJumpGE
|
||||
opJumpSet
|
||||
)
|
||||
|
||||
const (
|
||||
opRetSrcConstant uint16 = iota << 4
|
||||
opRetSrcA
|
||||
)
|
||||
|
||||
const (
|
||||
opMiscTAX = 0x00
|
||||
opMiscTXA = 0x80
|
||||
)
|
||||
82
vendor/golang.org/x/net/bpf/doc.go
generated
vendored
Normal file
82
vendor/golang.org/x/net/bpf/doc.go
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
|
||||
Package bpf implements marshaling and unmarshaling of programs for the
|
||||
Berkeley Packet Filter virtual machine, and provides a Go implementation
|
||||
of the virtual machine.
|
||||
|
||||
BPF's main use is to specify a packet filter for network taps, so that
|
||||
the kernel doesn't have to expensively copy every packet it sees to
|
||||
userspace. However, it's been repurposed to other areas where running
|
||||
user code in-kernel is needed. For example, Linux's seccomp uses BPF
|
||||
to apply security policies to system calls. For simplicity, this
|
||||
documentation refers only to packets, but other uses of BPF have their
|
||||
own data payloads.
|
||||
|
||||
BPF programs run in a restricted virtual machine. It has almost no
|
||||
access to kernel functions, and while conditional branches are
|
||||
allowed, they can only jump forwards, to guarantee that there are no
|
||||
infinite loops.
|
||||
|
||||
The virtual machine
|
||||
|
||||
The BPF VM is an accumulator machine. Its main register, called
|
||||
register A, is an implicit source and destination in all arithmetic
|
||||
and logic operations. The machine also has 16 scratch registers for
|
||||
temporary storage, and an indirection register (register X) for
|
||||
indirect memory access. All registers are 32 bits wide.
|
||||
|
||||
Each run of a BPF program is given one packet, which is placed in the
|
||||
VM's read-only "main memory". LoadAbsolute and LoadIndirect
|
||||
instructions can fetch up to 32 bits at a time into register A for
|
||||
examination.
|
||||
|
||||
The goal of a BPF program is to produce and return a verdict (uint32),
|
||||
which tells the kernel what to do with the packet. In the context of
|
||||
packet filtering, the returned value is the number of bytes of the
|
||||
packet to forward to userspace, or 0 to ignore the packet. Other
|
||||
contexts like seccomp define their own return values.
|
||||
|
||||
In order to simplify programs, attempts to read past the end of the
|
||||
packet terminate the program execution with a verdict of 0 (ignore
|
||||
packet). This means that the vast majority of BPF programs don't need
|
||||
to do any explicit bounds checking.
|
||||
|
||||
In addition to the bytes of the packet, some BPF programs have access
|
||||
to extensions, which are essentially calls to kernel utility
|
||||
functions. Currently, the only extensions supported by this package
|
||||
are the Linux packet filter extensions.
|
||||
|
||||
Examples
|
||||
|
||||
This packet filter selects all ARP packets.
|
||||
|
||||
bpf.Assemble([]bpf.Instruction{
|
||||
// Load "EtherType" field from the ethernet header.
|
||||
bpf.LoadAbsolute{Off: 12, Size: 2},
|
||||
// Skip over the next instruction if EtherType is not ARP.
|
||||
bpf.JumpIf{Cond: bpf.JumpNotEqual, Val: 0x0806, SkipTrue: 1},
|
||||
// Verdict is "send up to 4k of the packet to userspace."
|
||||
bpf.RetConstant{Val: 4096},
|
||||
// Verdict is "ignore packet."
|
||||
bpf.RetConstant{Val: 0},
|
||||
})
|
||||
|
||||
This packet filter captures a random 1% sample of traffic.
|
||||
|
||||
bpf.Assemble([]bpf.Instruction{
|
||||
// Get a 32-bit random number from the Linux kernel.
|
||||
bpf.LoadExtension{Num: bpf.ExtRand},
|
||||
// 1% dice roll?
|
||||
bpf.JumpIf{Cond: bpf.JumpLessThan, Val: 2^32/100, SkipFalse: 1},
|
||||
// Capture.
|
||||
bpf.RetConstant{Val: 4096},
|
||||
// Ignore.
|
||||
bpf.RetConstant{Val: 0},
|
||||
})
|
||||
|
||||
*/
|
||||
package bpf // import "golang.org/x/net/bpf"
|
||||
726
vendor/golang.org/x/net/bpf/instructions.go
generated
vendored
Normal file
726
vendor/golang.org/x/net/bpf/instructions.go
generated
vendored
Normal file
@@ -0,0 +1,726 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bpf
|
||||
|
||||
import "fmt"
|
||||
|
||||
// An Instruction is one instruction executed by the BPF virtual
|
||||
// machine.
|
||||
type Instruction interface {
|
||||
// Assemble assembles the Instruction into a RawInstruction.
|
||||
Assemble() (RawInstruction, error)
|
||||
}
|
||||
|
||||
// A RawInstruction is a raw BPF virtual machine instruction.
|
||||
type RawInstruction struct {
|
||||
// Operation to execute.
|
||||
Op uint16
|
||||
// For conditional jump instructions, the number of instructions
|
||||
// to skip if the condition is true/false.
|
||||
Jt uint8
|
||||
Jf uint8
|
||||
// Constant parameter. The meaning depends on the Op.
|
||||
K uint32
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (ri RawInstruction) Assemble() (RawInstruction, error) { return ri, nil }
|
||||
|
||||
// Disassemble parses ri into an Instruction and returns it. If ri is
|
||||
// not recognized by this package, ri itself is returned.
|
||||
func (ri RawInstruction) Disassemble() Instruction {
|
||||
switch ri.Op & opMaskCls {
|
||||
case opClsLoadA, opClsLoadX:
|
||||
reg := Register(ri.Op & opMaskLoadDest)
|
||||
sz := 0
|
||||
switch ri.Op & opMaskLoadWidth {
|
||||
case opLoadWidth4:
|
||||
sz = 4
|
||||
case opLoadWidth2:
|
||||
sz = 2
|
||||
case opLoadWidth1:
|
||||
sz = 1
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
switch ri.Op & opMaskLoadMode {
|
||||
case opAddrModeImmediate:
|
||||
if sz != 4 {
|
||||
return ri
|
||||
}
|
||||
return LoadConstant{Dst: reg, Val: ri.K}
|
||||
case opAddrModeScratch:
|
||||
if sz != 4 || ri.K > 15 {
|
||||
return ri
|
||||
}
|
||||
return LoadScratch{Dst: reg, N: int(ri.K)}
|
||||
case opAddrModeAbsolute:
|
||||
if ri.K > extOffset+0xffffffff {
|
||||
return LoadExtension{Num: Extension(-extOffset + ri.K)}
|
||||
}
|
||||
return LoadAbsolute{Size: sz, Off: ri.K}
|
||||
case opAddrModeIndirect:
|
||||
return LoadIndirect{Size: sz, Off: ri.K}
|
||||
case opAddrModePacketLen:
|
||||
if sz != 4 {
|
||||
return ri
|
||||
}
|
||||
return LoadExtension{Num: ExtLen}
|
||||
case opAddrModeMemShift:
|
||||
return LoadMemShift{Off: ri.K}
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
|
||||
case opClsStoreA:
|
||||
if ri.Op != opClsStoreA || ri.K > 15 {
|
||||
return ri
|
||||
}
|
||||
return StoreScratch{Src: RegA, N: int(ri.K)}
|
||||
|
||||
case opClsStoreX:
|
||||
if ri.Op != opClsStoreX || ri.K > 15 {
|
||||
return ri
|
||||
}
|
||||
return StoreScratch{Src: RegX, N: int(ri.K)}
|
||||
|
||||
case opClsALU:
|
||||
switch op := ALUOp(ri.Op & opMaskOperator); op {
|
||||
case ALUOpAdd, ALUOpSub, ALUOpMul, ALUOpDiv, ALUOpOr, ALUOpAnd, ALUOpShiftLeft, ALUOpShiftRight, ALUOpMod, ALUOpXor:
|
||||
switch operand := opOperand(ri.Op & opMaskOperand); operand {
|
||||
case opOperandX:
|
||||
return ALUOpX{Op: op}
|
||||
case opOperandConstant:
|
||||
return ALUOpConstant{Op: op, Val: ri.K}
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
case aluOpNeg:
|
||||
return NegateA{}
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
|
||||
case opClsJump:
|
||||
switch op := jumpOp(ri.Op & opMaskOperator); op {
|
||||
case opJumpAlways:
|
||||
return Jump{Skip: ri.K}
|
||||
case opJumpEqual, opJumpGT, opJumpGE, opJumpSet:
|
||||
cond, skipTrue, skipFalse := jumpOpToTest(op, ri.Jt, ri.Jf)
|
||||
switch operand := opOperand(ri.Op & opMaskOperand); operand {
|
||||
case opOperandX:
|
||||
return JumpIfX{Cond: cond, SkipTrue: skipTrue, SkipFalse: skipFalse}
|
||||
case opOperandConstant:
|
||||
return JumpIf{Cond: cond, Val: ri.K, SkipTrue: skipTrue, SkipFalse: skipFalse}
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
|
||||
case opClsReturn:
|
||||
switch ri.Op {
|
||||
case opClsReturn | opRetSrcA:
|
||||
return RetA{}
|
||||
case opClsReturn | opRetSrcConstant:
|
||||
return RetConstant{Val: ri.K}
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
|
||||
case opClsMisc:
|
||||
switch ri.Op {
|
||||
case opClsMisc | opMiscTAX:
|
||||
return TAX{}
|
||||
case opClsMisc | opMiscTXA:
|
||||
return TXA{}
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
|
||||
default:
|
||||
panic("unreachable") // switch is exhaustive on the bit pattern
|
||||
}
|
||||
}
|
||||
|
||||
func jumpOpToTest(op jumpOp, skipTrue uint8, skipFalse uint8) (JumpTest, uint8, uint8) {
|
||||
var test JumpTest
|
||||
|
||||
// Decode "fake" jump conditions that don't appear in machine code
|
||||
// Ensures the Assemble -> Disassemble stage recreates the same instructions
|
||||
// See https://github.com/golang/go/issues/18470
|
||||
if skipTrue == 0 {
|
||||
switch op {
|
||||
case opJumpEqual:
|
||||
test = JumpNotEqual
|
||||
case opJumpGT:
|
||||
test = JumpLessOrEqual
|
||||
case opJumpGE:
|
||||
test = JumpLessThan
|
||||
case opJumpSet:
|
||||
test = JumpBitsNotSet
|
||||
}
|
||||
|
||||
return test, skipFalse, 0
|
||||
}
|
||||
|
||||
switch op {
|
||||
case opJumpEqual:
|
||||
test = JumpEqual
|
||||
case opJumpGT:
|
||||
test = JumpGreaterThan
|
||||
case opJumpGE:
|
||||
test = JumpGreaterOrEqual
|
||||
case opJumpSet:
|
||||
test = JumpBitsSet
|
||||
}
|
||||
|
||||
return test, skipTrue, skipFalse
|
||||
}
|
||||
|
||||
// LoadConstant loads Val into register Dst.
|
||||
type LoadConstant struct {
|
||||
Dst Register
|
||||
Val uint32
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a LoadConstant) Assemble() (RawInstruction, error) {
|
||||
return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val)
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a LoadConstant) String() string {
|
||||
switch a.Dst {
|
||||
case RegA:
|
||||
return fmt.Sprintf("ld #%d", a.Val)
|
||||
case RegX:
|
||||
return fmt.Sprintf("ldx #%d", a.Val)
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// LoadScratch loads scratch[N] into register Dst.
|
||||
type LoadScratch struct {
|
||||
Dst Register
|
||||
N int // 0-15
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a LoadScratch) Assemble() (RawInstruction, error) {
|
||||
if a.N < 0 || a.N > 15 {
|
||||
return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N)
|
||||
}
|
||||
return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N))
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a LoadScratch) String() string {
|
||||
switch a.Dst {
|
||||
case RegA:
|
||||
return fmt.Sprintf("ld M[%d]", a.N)
|
||||
case RegX:
|
||||
return fmt.Sprintf("ldx M[%d]", a.N)
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// LoadAbsolute loads packet[Off:Off+Size] as an integer value into
|
||||
// register A.
|
||||
type LoadAbsolute struct {
|
||||
Off uint32
|
||||
Size int // 1, 2 or 4
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a LoadAbsolute) Assemble() (RawInstruction, error) {
|
||||
return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off)
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a LoadAbsolute) String() string {
|
||||
switch a.Size {
|
||||
case 1: // byte
|
||||
return fmt.Sprintf("ldb [%d]", a.Off)
|
||||
case 2: // half word
|
||||
return fmt.Sprintf("ldh [%d]", a.Off)
|
||||
case 4: // word
|
||||
if a.Off > extOffset+0xffffffff {
|
||||
return LoadExtension{Num: Extension(a.Off + 0x1000)}.String()
|
||||
}
|
||||
return fmt.Sprintf("ld [%d]", a.Off)
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// LoadIndirect loads packet[X+Off:X+Off+Size] as an integer value
|
||||
// into register A.
|
||||
type LoadIndirect struct {
|
||||
Off uint32
|
||||
Size int // 1, 2 or 4
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a LoadIndirect) Assemble() (RawInstruction, error) {
|
||||
return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off)
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a LoadIndirect) String() string {
|
||||
switch a.Size {
|
||||
case 1: // byte
|
||||
return fmt.Sprintf("ldb [x + %d]", a.Off)
|
||||
case 2: // half word
|
||||
return fmt.Sprintf("ldh [x + %d]", a.Off)
|
||||
case 4: // word
|
||||
return fmt.Sprintf("ld [x + %d]", a.Off)
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// LoadMemShift multiplies the first 4 bits of the byte at packet[Off]
|
||||
// by 4 and stores the result in register X.
|
||||
//
|
||||
// This instruction is mainly useful to load into X the length of an
|
||||
// IPv4 packet header in a single instruction, rather than have to do
|
||||
// the arithmetic on the header's first byte by hand.
|
||||
type LoadMemShift struct {
|
||||
Off uint32
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a LoadMemShift) Assemble() (RawInstruction, error) {
|
||||
return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off)
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a LoadMemShift) String() string {
|
||||
return fmt.Sprintf("ldx 4*([%d]&0xf)", a.Off)
|
||||
}
|
||||
|
||||
// LoadExtension invokes a linux-specific extension and stores the
|
||||
// result in register A.
|
||||
type LoadExtension struct {
|
||||
Num Extension
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a LoadExtension) Assemble() (RawInstruction, error) {
|
||||
if a.Num == ExtLen {
|
||||
return assembleLoad(RegA, 4, opAddrModePacketLen, 0)
|
||||
}
|
||||
return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(extOffset+a.Num))
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a LoadExtension) String() string {
|
||||
switch a.Num {
|
||||
case ExtLen:
|
||||
return "ld #len"
|
||||
case ExtProto:
|
||||
return "ld #proto"
|
||||
case ExtType:
|
||||
return "ld #type"
|
||||
case ExtPayloadOffset:
|
||||
return "ld #poff"
|
||||
case ExtInterfaceIndex:
|
||||
return "ld #ifidx"
|
||||
case ExtNetlinkAttr:
|
||||
return "ld #nla"
|
||||
case ExtNetlinkAttrNested:
|
||||
return "ld #nlan"
|
||||
case ExtMark:
|
||||
return "ld #mark"
|
||||
case ExtQueue:
|
||||
return "ld #queue"
|
||||
case ExtLinkLayerType:
|
||||
return "ld #hatype"
|
||||
case ExtRXHash:
|
||||
return "ld #rxhash"
|
||||
case ExtCPUID:
|
||||
return "ld #cpu"
|
||||
case ExtVLANTag:
|
||||
return "ld #vlan_tci"
|
||||
case ExtVLANTagPresent:
|
||||
return "ld #vlan_avail"
|
||||
case ExtVLANProto:
|
||||
return "ld #vlan_tpid"
|
||||
case ExtRand:
|
||||
return "ld #rand"
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// StoreScratch stores register Src into scratch[N].
|
||||
type StoreScratch struct {
|
||||
Src Register
|
||||
N int // 0-15
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a StoreScratch) Assemble() (RawInstruction, error) {
|
||||
if a.N < 0 || a.N > 15 {
|
||||
return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N)
|
||||
}
|
||||
var op uint16
|
||||
switch a.Src {
|
||||
case RegA:
|
||||
op = opClsStoreA
|
||||
case RegX:
|
||||
op = opClsStoreX
|
||||
default:
|
||||
return RawInstruction{}, fmt.Errorf("invalid source register %v", a.Src)
|
||||
}
|
||||
|
||||
return RawInstruction{
|
||||
Op: op,
|
||||
K: uint32(a.N),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a StoreScratch) String() string {
|
||||
switch a.Src {
|
||||
case RegA:
|
||||
return fmt.Sprintf("st M[%d]", a.N)
|
||||
case RegX:
|
||||
return fmt.Sprintf("stx M[%d]", a.N)
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// ALUOpConstant executes A = A <Op> Val.
|
||||
type ALUOpConstant struct {
|
||||
Op ALUOp
|
||||
Val uint32
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a ALUOpConstant) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsALU | uint16(opOperandConstant) | uint16(a.Op),
|
||||
K: a.Val,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a ALUOpConstant) String() string {
|
||||
switch a.Op {
|
||||
case ALUOpAdd:
|
||||
return fmt.Sprintf("add #%d", a.Val)
|
||||
case ALUOpSub:
|
||||
return fmt.Sprintf("sub #%d", a.Val)
|
||||
case ALUOpMul:
|
||||
return fmt.Sprintf("mul #%d", a.Val)
|
||||
case ALUOpDiv:
|
||||
return fmt.Sprintf("div #%d", a.Val)
|
||||
case ALUOpMod:
|
||||
return fmt.Sprintf("mod #%d", a.Val)
|
||||
case ALUOpAnd:
|
||||
return fmt.Sprintf("and #%d", a.Val)
|
||||
case ALUOpOr:
|
||||
return fmt.Sprintf("or #%d", a.Val)
|
||||
case ALUOpXor:
|
||||
return fmt.Sprintf("xor #%d", a.Val)
|
||||
case ALUOpShiftLeft:
|
||||
return fmt.Sprintf("lsh #%d", a.Val)
|
||||
case ALUOpShiftRight:
|
||||
return fmt.Sprintf("rsh #%d", a.Val)
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// ALUOpX executes A = A <Op> X
|
||||
type ALUOpX struct {
|
||||
Op ALUOp
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a ALUOpX) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsALU | uint16(opOperandX) | uint16(a.Op),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a ALUOpX) String() string {
|
||||
switch a.Op {
|
||||
case ALUOpAdd:
|
||||
return "add x"
|
||||
case ALUOpSub:
|
||||
return "sub x"
|
||||
case ALUOpMul:
|
||||
return "mul x"
|
||||
case ALUOpDiv:
|
||||
return "div x"
|
||||
case ALUOpMod:
|
||||
return "mod x"
|
||||
case ALUOpAnd:
|
||||
return "and x"
|
||||
case ALUOpOr:
|
||||
return "or x"
|
||||
case ALUOpXor:
|
||||
return "xor x"
|
||||
case ALUOpShiftLeft:
|
||||
return "lsh x"
|
||||
case ALUOpShiftRight:
|
||||
return "rsh x"
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// NegateA executes A = -A.
|
||||
type NegateA struct{}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a NegateA) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsALU | uint16(aluOpNeg),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a NegateA) String() string {
|
||||
return fmt.Sprintf("neg")
|
||||
}
|
||||
|
||||
// Jump skips the following Skip instructions in the program.
|
||||
type Jump struct {
|
||||
Skip uint32
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a Jump) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsJump | uint16(opJumpAlways),
|
||||
K: a.Skip,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a Jump) String() string {
|
||||
return fmt.Sprintf("ja %d", a.Skip)
|
||||
}
|
||||
|
||||
// JumpIf skips the following Skip instructions in the program if A
|
||||
// <Cond> Val is true.
|
||||
type JumpIf struct {
|
||||
Cond JumpTest
|
||||
Val uint32
|
||||
SkipTrue uint8
|
||||
SkipFalse uint8
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a JumpIf) Assemble() (RawInstruction, error) {
|
||||
return jumpToRaw(a.Cond, opOperandConstant, a.Val, a.SkipTrue, a.SkipFalse)
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a JumpIf) String() string {
|
||||
return jumpToString(a.Cond, fmt.Sprintf("#%d", a.Val), a.SkipTrue, a.SkipFalse)
|
||||
}
|
||||
|
||||
// JumpIfX skips the following Skip instructions in the program if A
|
||||
// <Cond> X is true.
|
||||
type JumpIfX struct {
|
||||
Cond JumpTest
|
||||
SkipTrue uint8
|
||||
SkipFalse uint8
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a JumpIfX) Assemble() (RawInstruction, error) {
|
||||
return jumpToRaw(a.Cond, opOperandX, 0, a.SkipTrue, a.SkipFalse)
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a JumpIfX) String() string {
|
||||
return jumpToString(a.Cond, "x", a.SkipTrue, a.SkipFalse)
|
||||
}
|
||||
|
||||
// jumpToRaw assembles a jump instruction into a RawInstruction
|
||||
func jumpToRaw(test JumpTest, operand opOperand, k uint32, skipTrue, skipFalse uint8) (RawInstruction, error) {
|
||||
var (
|
||||
cond jumpOp
|
||||
flip bool
|
||||
)
|
||||
switch test {
|
||||
case JumpEqual:
|
||||
cond = opJumpEqual
|
||||
case JumpNotEqual:
|
||||
cond, flip = opJumpEqual, true
|
||||
case JumpGreaterThan:
|
||||
cond = opJumpGT
|
||||
case JumpLessThan:
|
||||
cond, flip = opJumpGE, true
|
||||
case JumpGreaterOrEqual:
|
||||
cond = opJumpGE
|
||||
case JumpLessOrEqual:
|
||||
cond, flip = opJumpGT, true
|
||||
case JumpBitsSet:
|
||||
cond = opJumpSet
|
||||
case JumpBitsNotSet:
|
||||
cond, flip = opJumpSet, true
|
||||
default:
|
||||
return RawInstruction{}, fmt.Errorf("unknown JumpTest %v", test)
|
||||
}
|
||||
jt, jf := skipTrue, skipFalse
|
||||
if flip {
|
||||
jt, jf = jf, jt
|
||||
}
|
||||
return RawInstruction{
|
||||
Op: opClsJump | uint16(cond) | uint16(operand),
|
||||
Jt: jt,
|
||||
Jf: jf,
|
||||
K: k,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// jumpToString converts a jump instruction to assembler notation
|
||||
func jumpToString(cond JumpTest, operand string, skipTrue, skipFalse uint8) string {
|
||||
switch cond {
|
||||
// K == A
|
||||
case JumpEqual:
|
||||
return conditionalJump(operand, skipTrue, skipFalse, "jeq", "jneq")
|
||||
// K != A
|
||||
case JumpNotEqual:
|
||||
return fmt.Sprintf("jneq %s,%d", operand, skipTrue)
|
||||
// K > A
|
||||
case JumpGreaterThan:
|
||||
return conditionalJump(operand, skipTrue, skipFalse, "jgt", "jle")
|
||||
// K < A
|
||||
case JumpLessThan:
|
||||
return fmt.Sprintf("jlt %s,%d", operand, skipTrue)
|
||||
// K >= A
|
||||
case JumpGreaterOrEqual:
|
||||
return conditionalJump(operand, skipTrue, skipFalse, "jge", "jlt")
|
||||
// K <= A
|
||||
case JumpLessOrEqual:
|
||||
return fmt.Sprintf("jle %s,%d", operand, skipTrue)
|
||||
// K & A != 0
|
||||
case JumpBitsSet:
|
||||
if skipFalse > 0 {
|
||||
return fmt.Sprintf("jset %s,%d,%d", operand, skipTrue, skipFalse)
|
||||
}
|
||||
return fmt.Sprintf("jset %s,%d", operand, skipTrue)
|
||||
// K & A == 0, there is no assembler instruction for JumpBitNotSet, use JumpBitSet and invert skips
|
||||
case JumpBitsNotSet:
|
||||
return jumpToString(JumpBitsSet, operand, skipFalse, skipTrue)
|
||||
default:
|
||||
return fmt.Sprintf("unknown JumpTest %#v", cond)
|
||||
}
|
||||
}
|
||||
|
||||
func conditionalJump(operand string, skipTrue, skipFalse uint8, positiveJump, negativeJump string) string {
|
||||
if skipTrue > 0 {
|
||||
if skipFalse > 0 {
|
||||
return fmt.Sprintf("%s %s,%d,%d", positiveJump, operand, skipTrue, skipFalse)
|
||||
}
|
||||
return fmt.Sprintf("%s %s,%d", positiveJump, operand, skipTrue)
|
||||
}
|
||||
return fmt.Sprintf("%s %s,%d", negativeJump, operand, skipFalse)
|
||||
}
|
||||
|
||||
// RetA exits the BPF program, returning the value of register A.
|
||||
type RetA struct{}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a RetA) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsReturn | opRetSrcA,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a RetA) String() string {
|
||||
return fmt.Sprintf("ret a")
|
||||
}
|
||||
|
||||
// RetConstant exits the BPF program, returning a constant value.
|
||||
type RetConstant struct {
|
||||
Val uint32
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a RetConstant) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsReturn | opRetSrcConstant,
|
||||
K: a.Val,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a RetConstant) String() string {
|
||||
return fmt.Sprintf("ret #%d", a.Val)
|
||||
}
|
||||
|
||||
// TXA copies the value of register X to register A.
|
||||
type TXA struct{}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a TXA) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsMisc | opMiscTXA,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a TXA) String() string {
|
||||
return fmt.Sprintf("txa")
|
||||
}
|
||||
|
||||
// TAX copies the value of register A to register X.
|
||||
type TAX struct{}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a TAX) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsMisc | opMiscTAX,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a TAX) String() string {
|
||||
return fmt.Sprintf("tax")
|
||||
}
|
||||
|
||||
func assembleLoad(dst Register, loadSize int, mode uint16, k uint32) (RawInstruction, error) {
|
||||
var (
|
||||
cls uint16
|
||||
sz uint16
|
||||
)
|
||||
switch dst {
|
||||
case RegA:
|
||||
cls = opClsLoadA
|
||||
case RegX:
|
||||
cls = opClsLoadX
|
||||
default:
|
||||
return RawInstruction{}, fmt.Errorf("invalid target register %v", dst)
|
||||
}
|
||||
switch loadSize {
|
||||
case 1:
|
||||
sz = opLoadWidth1
|
||||
case 2:
|
||||
sz = opLoadWidth2
|
||||
case 4:
|
||||
sz = opLoadWidth4
|
||||
default:
|
||||
return RawInstruction{}, fmt.Errorf("invalid load byte length %d", sz)
|
||||
}
|
||||
return RawInstruction{
|
||||
Op: cls | sz | mode,
|
||||
K: k,
|
||||
}, nil
|
||||
}
|
||||
10
vendor/golang.org/x/net/bpf/setter.go
generated
vendored
Normal file
10
vendor/golang.org/x/net/bpf/setter.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bpf
|
||||
|
||||
// A Setter is a type which can attach a compiled BPF filter to itself.
|
||||
type Setter interface {
|
||||
SetBPF(filter []RawInstruction) error
|
||||
}
|
||||
150
vendor/golang.org/x/net/bpf/vm.go
generated
vendored
Normal file
150
vendor/golang.org/x/net/bpf/vm.go
generated
vendored
Normal file
@@ -0,0 +1,150 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bpf
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// A VM is an emulated BPF virtual machine.
|
||||
type VM struct {
|
||||
filter []Instruction
|
||||
}
|
||||
|
||||
// NewVM returns a new VM using the input BPF program.
|
||||
func NewVM(filter []Instruction) (*VM, error) {
|
||||
if len(filter) == 0 {
|
||||
return nil, errors.New("one or more Instructions must be specified")
|
||||
}
|
||||
|
||||
for i, ins := range filter {
|
||||
check := len(filter) - (i + 1)
|
||||
switch ins := ins.(type) {
|
||||
// Check for out-of-bounds jumps in instructions
|
||||
case Jump:
|
||||
if check <= int(ins.Skip) {
|
||||
return nil, fmt.Errorf("cannot jump %d instructions; jumping past program bounds", ins.Skip)
|
||||
}
|
||||
case JumpIf:
|
||||
if check <= int(ins.SkipTrue) {
|
||||
return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue)
|
||||
}
|
||||
if check <= int(ins.SkipFalse) {
|
||||
return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse)
|
||||
}
|
||||
case JumpIfX:
|
||||
if check <= int(ins.SkipTrue) {
|
||||
return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue)
|
||||
}
|
||||
if check <= int(ins.SkipFalse) {
|
||||
return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse)
|
||||
}
|
||||
// Check for division or modulus by zero
|
||||
case ALUOpConstant:
|
||||
if ins.Val != 0 {
|
||||
break
|
||||
}
|
||||
|
||||
switch ins.Op {
|
||||
case ALUOpDiv, ALUOpMod:
|
||||
return nil, errors.New("cannot divide by zero using ALUOpConstant")
|
||||
}
|
||||
// Check for unknown extensions
|
||||
case LoadExtension:
|
||||
switch ins.Num {
|
||||
case ExtLen:
|
||||
default:
|
||||
return nil, fmt.Errorf("extension %d not implemented", ins.Num)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure last instruction is a return instruction
|
||||
switch filter[len(filter)-1].(type) {
|
||||
case RetA, RetConstant:
|
||||
default:
|
||||
return nil, errors.New("BPF program must end with RetA or RetConstant")
|
||||
}
|
||||
|
||||
// Though our VM works using disassembled instructions, we
|
||||
// attempt to assemble the input filter anyway to ensure it is compatible
|
||||
// with an operating system VM.
|
||||
_, err := Assemble(filter)
|
||||
|
||||
return &VM{
|
||||
filter: filter,
|
||||
}, err
|
||||
}
|
||||
|
||||
// Run runs the VM's BPF program against the input bytes.
|
||||
// Run returns the number of bytes accepted by the BPF program, and any errors
|
||||
// which occurred while processing the program.
|
||||
func (v *VM) Run(in []byte) (int, error) {
|
||||
var (
|
||||
// Registers of the virtual machine
|
||||
regA uint32
|
||||
regX uint32
|
||||
regScratch [16]uint32
|
||||
|
||||
// OK is true if the program should continue processing the next
|
||||
// instruction, or false if not, causing the loop to break
|
||||
ok = true
|
||||
)
|
||||
|
||||
// TODO(mdlayher): implement:
|
||||
// - NegateA:
|
||||
// - would require a change from uint32 registers to int32
|
||||
// registers
|
||||
|
||||
// TODO(mdlayher): add interop tests that check signedness of ALU
|
||||
// operations against kernel implementation, and make sure Go
|
||||
// implementation matches behavior
|
||||
|
||||
for i := 0; i < len(v.filter) && ok; i++ {
|
||||
ins := v.filter[i]
|
||||
|
||||
switch ins := ins.(type) {
|
||||
case ALUOpConstant:
|
||||
regA = aluOpConstant(ins, regA)
|
||||
case ALUOpX:
|
||||
regA, ok = aluOpX(ins, regA, regX)
|
||||
case Jump:
|
||||
i += int(ins.Skip)
|
||||
case JumpIf:
|
||||
jump := jumpIf(ins, regA)
|
||||
i += jump
|
||||
case JumpIfX:
|
||||
jump := jumpIfX(ins, regA, regX)
|
||||
i += jump
|
||||
case LoadAbsolute:
|
||||
regA, ok = loadAbsolute(ins, in)
|
||||
case LoadConstant:
|
||||
regA, regX = loadConstant(ins, regA, regX)
|
||||
case LoadExtension:
|
||||
regA = loadExtension(ins, in)
|
||||
case LoadIndirect:
|
||||
regA, ok = loadIndirect(ins, in, regX)
|
||||
case LoadMemShift:
|
||||
regX, ok = loadMemShift(ins, in)
|
||||
case LoadScratch:
|
||||
regA, regX = loadScratch(ins, regScratch, regA, regX)
|
||||
case RetA:
|
||||
return int(regA), nil
|
||||
case RetConstant:
|
||||
return int(ins.Val), nil
|
||||
case StoreScratch:
|
||||
regScratch = storeScratch(ins, regScratch, regA, regX)
|
||||
case TAX:
|
||||
regX = regA
|
||||
case TXA:
|
||||
regA = regX
|
||||
default:
|
||||
return 0, fmt.Errorf("unknown Instruction at index %d: %T", i, ins)
|
||||
}
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
181
vendor/golang.org/x/net/bpf/vm_instructions.go
generated
vendored
Normal file
181
vendor/golang.org/x/net/bpf/vm_instructions.go
generated
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bpf
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func aluOpConstant(ins ALUOpConstant, regA uint32) uint32 {
|
||||
return aluOpCommon(ins.Op, regA, ins.Val)
|
||||
}
|
||||
|
||||
func aluOpX(ins ALUOpX, regA uint32, regX uint32) (uint32, bool) {
|
||||
// Guard against division or modulus by zero by terminating
|
||||
// the program, as the OS BPF VM does
|
||||
if regX == 0 {
|
||||
switch ins.Op {
|
||||
case ALUOpDiv, ALUOpMod:
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
|
||||
return aluOpCommon(ins.Op, regA, regX), true
|
||||
}
|
||||
|
||||
func aluOpCommon(op ALUOp, regA uint32, value uint32) uint32 {
|
||||
switch op {
|
||||
case ALUOpAdd:
|
||||
return regA + value
|
||||
case ALUOpSub:
|
||||
return regA - value
|
||||
case ALUOpMul:
|
||||
return regA * value
|
||||
case ALUOpDiv:
|
||||
// Division by zero not permitted by NewVM and aluOpX checks
|
||||
return regA / value
|
||||
case ALUOpOr:
|
||||
return regA | value
|
||||
case ALUOpAnd:
|
||||
return regA & value
|
||||
case ALUOpShiftLeft:
|
||||
return regA << value
|
||||
case ALUOpShiftRight:
|
||||
return regA >> value
|
||||
case ALUOpMod:
|
||||
// Modulus by zero not permitted by NewVM and aluOpX checks
|
||||
return regA % value
|
||||
case ALUOpXor:
|
||||
return regA ^ value
|
||||
default:
|
||||
return regA
|
||||
}
|
||||
}
|
||||
|
||||
func jumpIf(ins JumpIf, regA uint32) int {
|
||||
return jumpIfCommon(ins.Cond, ins.SkipTrue, ins.SkipFalse, regA, ins.Val)
|
||||
}
|
||||
|
||||
func jumpIfX(ins JumpIfX, regA uint32, regX uint32) int {
|
||||
return jumpIfCommon(ins.Cond, ins.SkipTrue, ins.SkipFalse, regA, regX)
|
||||
}
|
||||
|
||||
func jumpIfCommon(cond JumpTest, skipTrue, skipFalse uint8, regA uint32, value uint32) int {
|
||||
var ok bool
|
||||
|
||||
switch cond {
|
||||
case JumpEqual:
|
||||
ok = regA == value
|
||||
case JumpNotEqual:
|
||||
ok = regA != value
|
||||
case JumpGreaterThan:
|
||||
ok = regA > value
|
||||
case JumpLessThan:
|
||||
ok = regA < value
|
||||
case JumpGreaterOrEqual:
|
||||
ok = regA >= value
|
||||
case JumpLessOrEqual:
|
||||
ok = regA <= value
|
||||
case JumpBitsSet:
|
||||
ok = (regA & value) != 0
|
||||
case JumpBitsNotSet:
|
||||
ok = (regA & value) == 0
|
||||
}
|
||||
|
||||
if ok {
|
||||
return int(skipTrue)
|
||||
}
|
||||
|
||||
return int(skipFalse)
|
||||
}
|
||||
|
||||
func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) {
|
||||
offset := int(ins.Off)
|
||||
size := int(ins.Size)
|
||||
|
||||
return loadCommon(in, offset, size)
|
||||
}
|
||||
|
||||
func loadConstant(ins LoadConstant, regA uint32, regX uint32) (uint32, uint32) {
|
||||
switch ins.Dst {
|
||||
case RegA:
|
||||
regA = ins.Val
|
||||
case RegX:
|
||||
regX = ins.Val
|
||||
}
|
||||
|
||||
return regA, regX
|
||||
}
|
||||
|
||||
func loadExtension(ins LoadExtension, in []byte) uint32 {
|
||||
switch ins.Num {
|
||||
case ExtLen:
|
||||
return uint32(len(in))
|
||||
default:
|
||||
panic(fmt.Sprintf("unimplemented extension: %d", ins.Num))
|
||||
}
|
||||
}
|
||||
|
||||
func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) {
|
||||
offset := int(ins.Off) + int(regX)
|
||||
size := int(ins.Size)
|
||||
|
||||
return loadCommon(in, offset, size)
|
||||
}
|
||||
|
||||
func loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) {
|
||||
offset := int(ins.Off)
|
||||
|
||||
if !inBounds(len(in), offset, 0) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Mask off high 4 bits and multiply low 4 bits by 4
|
||||
return uint32(in[offset]&0x0f) * 4, true
|
||||
}
|
||||
|
||||
func inBounds(inLen int, offset int, size int) bool {
|
||||
return offset+size <= inLen
|
||||
}
|
||||
|
||||
func loadCommon(in []byte, offset int, size int) (uint32, bool) {
|
||||
if !inBounds(len(in), offset, size) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
switch size {
|
||||
case 1:
|
||||
return uint32(in[offset]), true
|
||||
case 2:
|
||||
return uint32(binary.BigEndian.Uint16(in[offset : offset+size])), true
|
||||
case 4:
|
||||
return uint32(binary.BigEndian.Uint32(in[offset : offset+size])), true
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid load size: %d", size))
|
||||
}
|
||||
}
|
||||
|
||||
func loadScratch(ins LoadScratch, regScratch [16]uint32, regA uint32, regX uint32) (uint32, uint32) {
|
||||
switch ins.Dst {
|
||||
case RegA:
|
||||
regA = regScratch[ins.N]
|
||||
case RegX:
|
||||
regX = regScratch[ins.N]
|
||||
}
|
||||
|
||||
return regA, regX
|
||||
}
|
||||
|
||||
func storeScratch(ins StoreScratch, regScratch [16]uint32, regA uint32, regX uint32) [16]uint32 {
|
||||
switch ins.Src {
|
||||
case RegA:
|
||||
regScratch[ins.N] = regA
|
||||
case RegX:
|
||||
regScratch[ins.N] = regX
|
||||
}
|
||||
|
||||
return regScratch
|
||||
}
|
||||
223
vendor/golang.org/x/net/internal/iana/const.go
generated
vendored
Normal file
223
vendor/golang.org/x/net/internal/iana/const.go
generated
vendored
Normal file
@@ -0,0 +1,223 @@
|
||||
// go generate gen.go
|
||||
// Code generated by the command above; DO NOT EDIT.
|
||||
|
||||
// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).
|
||||
package iana // import "golang.org/x/net/internal/iana"
|
||||
|
||||
// Differentiated Services Field Codepoints (DSCP), Updated: 2018-05-04
|
||||
const (
|
||||
DiffServCS0 = 0x00 // CS0
|
||||
DiffServCS1 = 0x20 // CS1
|
||||
DiffServCS2 = 0x40 // CS2
|
||||
DiffServCS3 = 0x60 // CS3
|
||||
DiffServCS4 = 0x80 // CS4
|
||||
DiffServCS5 = 0xa0 // CS5
|
||||
DiffServCS6 = 0xc0 // CS6
|
||||
DiffServCS7 = 0xe0 // CS7
|
||||
DiffServAF11 = 0x28 // AF11
|
||||
DiffServAF12 = 0x30 // AF12
|
||||
DiffServAF13 = 0x38 // AF13
|
||||
DiffServAF21 = 0x48 // AF21
|
||||
DiffServAF22 = 0x50 // AF22
|
||||
DiffServAF23 = 0x58 // AF23
|
||||
DiffServAF31 = 0x68 // AF31
|
||||
DiffServAF32 = 0x70 // AF32
|
||||
DiffServAF33 = 0x78 // AF33
|
||||
DiffServAF41 = 0x88 // AF41
|
||||
DiffServAF42 = 0x90 // AF42
|
||||
DiffServAF43 = 0x98 // AF43
|
||||
DiffServEF = 0xb8 // EF
|
||||
DiffServVOICEADMIT = 0xb0 // VOICE-ADMIT
|
||||
NotECNTransport = 0x00 // Not-ECT (Not ECN-Capable Transport)
|
||||
ECNTransport1 = 0x01 // ECT(1) (ECN-Capable Transport(1))
|
||||
ECNTransport0 = 0x02 // ECT(0) (ECN-Capable Transport(0))
|
||||
CongestionExperienced = 0x03 // CE (Congestion Experienced)
|
||||
)
|
||||
|
||||
// Protocol Numbers, Updated: 2017-10-13
|
||||
const (
|
||||
ProtocolIP = 0 // IPv4 encapsulation, pseudo protocol number
|
||||
ProtocolHOPOPT = 0 // IPv6 Hop-by-Hop Option
|
||||
ProtocolICMP = 1 // Internet Control Message
|
||||
ProtocolIGMP = 2 // Internet Group Management
|
||||
ProtocolGGP = 3 // Gateway-to-Gateway
|
||||
ProtocolIPv4 = 4 // IPv4 encapsulation
|
||||
ProtocolST = 5 // Stream
|
||||
ProtocolTCP = 6 // Transmission Control
|
||||
ProtocolCBT = 7 // CBT
|
||||
ProtocolEGP = 8 // Exterior Gateway Protocol
|
||||
ProtocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP)
|
||||
ProtocolBBNRCCMON = 10 // BBN RCC Monitoring
|
||||
ProtocolNVPII = 11 // Network Voice Protocol
|
||||
ProtocolPUP = 12 // PUP
|
||||
ProtocolEMCON = 14 // EMCON
|
||||
ProtocolXNET = 15 // Cross Net Debugger
|
||||
ProtocolCHAOS = 16 // Chaos
|
||||
ProtocolUDP = 17 // User Datagram
|
||||
ProtocolMUX = 18 // Multiplexing
|
||||
ProtocolDCNMEAS = 19 // DCN Measurement Subsystems
|
||||
ProtocolHMP = 20 // Host Monitoring
|
||||
ProtocolPRM = 21 // Packet Radio Measurement
|
||||
ProtocolXNSIDP = 22 // XEROX NS IDP
|
||||
ProtocolTRUNK1 = 23 // Trunk-1
|
||||
ProtocolTRUNK2 = 24 // Trunk-2
|
||||
ProtocolLEAF1 = 25 // Leaf-1
|
||||
ProtocolLEAF2 = 26 // Leaf-2
|
||||
ProtocolRDP = 27 // Reliable Data Protocol
|
||||
ProtocolIRTP = 28 // Internet Reliable Transaction
|
||||
ProtocolISOTP4 = 29 // ISO Transport Protocol Class 4
|
||||
ProtocolNETBLT = 30 // Bulk Data Transfer Protocol
|
||||
ProtocolMFENSP = 31 // MFE Network Services Protocol
|
||||
ProtocolMERITINP = 32 // MERIT Internodal Protocol
|
||||
ProtocolDCCP = 33 // Datagram Congestion Control Protocol
|
||||
Protocol3PC = 34 // Third Party Connect Protocol
|
||||
ProtocolIDPR = 35 // Inter-Domain Policy Routing Protocol
|
||||
ProtocolXTP = 36 // XTP
|
||||
ProtocolDDP = 37 // Datagram Delivery Protocol
|
||||
ProtocolIDPRCMTP = 38 // IDPR Control Message Transport Proto
|
||||
ProtocolTPPP = 39 // TP++ Transport Protocol
|
||||
ProtocolIL = 40 // IL Transport Protocol
|
||||
ProtocolIPv6 = 41 // IPv6 encapsulation
|
||||
ProtocolSDRP = 42 // Source Demand Routing Protocol
|
||||
ProtocolIPv6Route = 43 // Routing Header for IPv6
|
||||
ProtocolIPv6Frag = 44 // Fragment Header for IPv6
|
||||
ProtocolIDRP = 45 // Inter-Domain Routing Protocol
|
||||
ProtocolRSVP = 46 // Reservation Protocol
|
||||
ProtocolGRE = 47 // Generic Routing Encapsulation
|
||||
ProtocolDSR = 48 // Dynamic Source Routing Protocol
|
||||
ProtocolBNA = 49 // BNA
|
||||
ProtocolESP = 50 // Encap Security Payload
|
||||
ProtocolAH = 51 // Authentication Header
|
||||
ProtocolINLSP = 52 // Integrated Net Layer Security TUBA
|
||||
ProtocolNARP = 54 // NBMA Address Resolution Protocol
|
||||
ProtocolMOBILE = 55 // IP Mobility
|
||||
ProtocolTLSP = 56 // Transport Layer Security Protocol using Kryptonet key management
|
||||
ProtocolSKIP = 57 // SKIP
|
||||
ProtocolIPv6ICMP = 58 // ICMP for IPv6
|
||||
ProtocolIPv6NoNxt = 59 // No Next Header for IPv6
|
||||
ProtocolIPv6Opts = 60 // Destination Options for IPv6
|
||||
ProtocolCFTP = 62 // CFTP
|
||||
ProtocolSATEXPAK = 64 // SATNET and Backroom EXPAK
|
||||
ProtocolKRYPTOLAN = 65 // Kryptolan
|
||||
ProtocolRVD = 66 // MIT Remote Virtual Disk Protocol
|
||||
ProtocolIPPC = 67 // Internet Pluribus Packet Core
|
||||
ProtocolSATMON = 69 // SATNET Monitoring
|
||||
ProtocolVISA = 70 // VISA Protocol
|
||||
ProtocolIPCV = 71 // Internet Packet Core Utility
|
||||
ProtocolCPNX = 72 // Computer Protocol Network Executive
|
||||
ProtocolCPHB = 73 // Computer Protocol Heart Beat
|
||||
ProtocolWSN = 74 // Wang Span Network
|
||||
ProtocolPVP = 75 // Packet Video Protocol
|
||||
ProtocolBRSATMON = 76 // Backroom SATNET Monitoring
|
||||
ProtocolSUNND = 77 // SUN ND PROTOCOL-Temporary
|
||||
ProtocolWBMON = 78 // WIDEBAND Monitoring
|
||||
ProtocolWBEXPAK = 79 // WIDEBAND EXPAK
|
||||
ProtocolISOIP = 80 // ISO Internet Protocol
|
||||
ProtocolVMTP = 81 // VMTP
|
||||
ProtocolSECUREVMTP = 82 // SECURE-VMTP
|
||||
ProtocolVINES = 83 // VINES
|
||||
ProtocolTTP = 84 // Transaction Transport Protocol
|
||||
ProtocolIPTM = 84 // Internet Protocol Traffic Manager
|
||||
ProtocolNSFNETIGP = 85 // NSFNET-IGP
|
||||
ProtocolDGP = 86 // Dissimilar Gateway Protocol
|
||||
ProtocolTCF = 87 // TCF
|
||||
ProtocolEIGRP = 88 // EIGRP
|
||||
ProtocolOSPFIGP = 89 // OSPFIGP
|
||||
ProtocolSpriteRPC = 90 // Sprite RPC Protocol
|
||||
ProtocolLARP = 91 // Locus Address Resolution Protocol
|
||||
ProtocolMTP = 92 // Multicast Transport Protocol
|
||||
ProtocolAX25 = 93 // AX.25 Frames
|
||||
ProtocolIPIP = 94 // IP-within-IP Encapsulation Protocol
|
||||
ProtocolSCCSP = 96 // Semaphore Communications Sec. Pro.
|
||||
ProtocolETHERIP = 97 // Ethernet-within-IP Encapsulation
|
||||
ProtocolENCAP = 98 // Encapsulation Header
|
||||
ProtocolGMTP = 100 // GMTP
|
||||
ProtocolIFMP = 101 // Ipsilon Flow Management Protocol
|
||||
ProtocolPNNI = 102 // PNNI over IP
|
||||
ProtocolPIM = 103 // Protocol Independent Multicast
|
||||
ProtocolARIS = 104 // ARIS
|
||||
ProtocolSCPS = 105 // SCPS
|
||||
ProtocolQNX = 106 // QNX
|
||||
ProtocolAN = 107 // Active Networks
|
||||
ProtocolIPComp = 108 // IP Payload Compression Protocol
|
||||
ProtocolSNP = 109 // Sitara Networks Protocol
|
||||
ProtocolCompaqPeer = 110 // Compaq Peer Protocol
|
||||
ProtocolIPXinIP = 111 // IPX in IP
|
||||
ProtocolVRRP = 112 // Virtual Router Redundancy Protocol
|
||||
ProtocolPGM = 113 // PGM Reliable Transport Protocol
|
||||
ProtocolL2TP = 115 // Layer Two Tunneling Protocol
|
||||
ProtocolDDX = 116 // D-II Data Exchange (DDX)
|
||||
ProtocolIATP = 117 // Interactive Agent Transfer Protocol
|
||||
ProtocolSTP = 118 // Schedule Transfer Protocol
|
||||
ProtocolSRP = 119 // SpectraLink Radio Protocol
|
||||
ProtocolUTI = 120 // UTI
|
||||
ProtocolSMP = 121 // Simple Message Protocol
|
||||
ProtocolPTP = 123 // Performance Transparency Protocol
|
||||
ProtocolISIS = 124 // ISIS over IPv4
|
||||
ProtocolFIRE = 125 // FIRE
|
||||
ProtocolCRTP = 126 // Combat Radio Transport Protocol
|
||||
ProtocolCRUDP = 127 // Combat Radio User Datagram
|
||||
ProtocolSSCOPMCE = 128 // SSCOPMCE
|
||||
ProtocolIPLT = 129 // IPLT
|
||||
ProtocolSPS = 130 // Secure Packet Shield
|
||||
ProtocolPIPE = 131 // Private IP Encapsulation within IP
|
||||
ProtocolSCTP = 132 // Stream Control Transmission Protocol
|
||||
ProtocolFC = 133 // Fibre Channel
|
||||
ProtocolRSVPE2EIGNORE = 134 // RSVP-E2E-IGNORE
|
||||
ProtocolMobilityHeader = 135 // Mobility Header
|
||||
ProtocolUDPLite = 136 // UDPLite
|
||||
ProtocolMPLSinIP = 137 // MPLS-in-IP
|
||||
ProtocolMANET = 138 // MANET Protocols
|
||||
ProtocolHIP = 139 // Host Identity Protocol
|
||||
ProtocolShim6 = 140 // Shim6 Protocol
|
||||
ProtocolWESP = 141 // Wrapped Encapsulating Security Payload
|
||||
ProtocolROHC = 142 // Robust Header Compression
|
||||
ProtocolReserved = 255 // Reserved
|
||||
)
|
||||
|
||||
// Address Family Numbers, Updated: 2018-04-02
|
||||
const (
|
||||
AddrFamilyIPv4 = 1 // IP (IP version 4)
|
||||
AddrFamilyIPv6 = 2 // IP6 (IP version 6)
|
||||
AddrFamilyNSAP = 3 // NSAP
|
||||
AddrFamilyHDLC = 4 // HDLC (8-bit multidrop)
|
||||
AddrFamilyBBN1822 = 5 // BBN 1822
|
||||
AddrFamily802 = 6 // 802 (includes all 802 media plus Ethernet "canonical format")
|
||||
AddrFamilyE163 = 7 // E.163
|
||||
AddrFamilyE164 = 8 // E.164 (SMDS, Frame Relay, ATM)
|
||||
AddrFamilyF69 = 9 // F.69 (Telex)
|
||||
AddrFamilyX121 = 10 // X.121 (X.25, Frame Relay)
|
||||
AddrFamilyIPX = 11 // IPX
|
||||
AddrFamilyAppletalk = 12 // Appletalk
|
||||
AddrFamilyDecnetIV = 13 // Decnet IV
|
||||
AddrFamilyBanyanVines = 14 // Banyan Vines
|
||||
AddrFamilyE164withSubaddress = 15 // E.164 with NSAP format subaddress
|
||||
AddrFamilyDNS = 16 // DNS (Domain Name System)
|
||||
AddrFamilyDistinguishedName = 17 // Distinguished Name
|
||||
AddrFamilyASNumber = 18 // AS Number
|
||||
AddrFamilyXTPoverIPv4 = 19 // XTP over IP version 4
|
||||
AddrFamilyXTPoverIPv6 = 20 // XTP over IP version 6
|
||||
AddrFamilyXTPnativemodeXTP = 21 // XTP native mode XTP
|
||||
AddrFamilyFibreChannelWorldWidePortName = 22 // Fibre Channel World-Wide Port Name
|
||||
AddrFamilyFibreChannelWorldWideNodeName = 23 // Fibre Channel World-Wide Node Name
|
||||
AddrFamilyGWID = 24 // GWID
|
||||
AddrFamilyL2VPN = 25 // AFI for L2VPN information
|
||||
AddrFamilyMPLSTPSectionEndpointID = 26 // MPLS-TP Section Endpoint Identifier
|
||||
AddrFamilyMPLSTPLSPEndpointID = 27 // MPLS-TP LSP Endpoint Identifier
|
||||
AddrFamilyMPLSTPPseudowireEndpointID = 28 // MPLS-TP Pseudowire Endpoint Identifier
|
||||
AddrFamilyMTIPv4 = 29 // MT IP: Multi-Topology IP version 4
|
||||
AddrFamilyMTIPv6 = 30 // MT IPv6: Multi-Topology IP version 6
|
||||
AddrFamilyEIGRPCommonServiceFamily = 16384 // EIGRP Common Service Family
|
||||
AddrFamilyEIGRPIPv4ServiceFamily = 16385 // EIGRP IPv4 Service Family
|
||||
AddrFamilyEIGRPIPv6ServiceFamily = 16386 // EIGRP IPv6 Service Family
|
||||
AddrFamilyLISPCanonicalAddressFormat = 16387 // LISP Canonical Address Format (LCAF)
|
||||
AddrFamilyBGPLS = 16388 // BGP-LS
|
||||
AddrFamily48bitMAC = 16389 // 48-bit MAC
|
||||
AddrFamily64bitMAC = 16390 // 64-bit MAC
|
||||
AddrFamilyOUI = 16391 // OUI
|
||||
AddrFamilyMACFinal24bits = 16392 // MAC/24
|
||||
AddrFamilyMACFinal40bits = 16393 // MAC/40
|
||||
AddrFamilyIPv6Initial64bits = 16394 // IPv6/64
|
||||
AddrFamilyRBridgePortID = 16395 // RBridge Port ID
|
||||
AddrFamilyTRILLNickname = 16396 // TRILL Nickname
|
||||
)
|
||||
383
vendor/golang.org/x/net/internal/iana/gen.go
generated
vendored
Normal file
383
vendor/golang.org/x/net/internal/iana/gen.go
generated
vendored
Normal file
@@ -0,0 +1,383 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
//go:generate go run gen.go
|
||||
|
||||
// This program generates internet protocol constants and tables by
|
||||
// reading IANA protocol registries.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var registries = []struct {
|
||||
url string
|
||||
parse func(io.Writer, io.Reader) error
|
||||
}{
|
||||
{
|
||||
"https://www.iana.org/assignments/dscp-registry/dscp-registry.xml",
|
||||
parseDSCPRegistry,
|
||||
},
|
||||
{
|
||||
"https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml",
|
||||
parseProtocolNumbers,
|
||||
},
|
||||
{
|
||||
"https://www.iana.org/assignments/address-family-numbers/address-family-numbers.xml",
|
||||
parseAddrFamilyNumbers,
|
||||
},
|
||||
}
|
||||
|
||||
func main() {
|
||||
var bb bytes.Buffer
|
||||
fmt.Fprintf(&bb, "// go generate gen.go\n")
|
||||
fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n")
|
||||
fmt.Fprintf(&bb, "// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\n")
|
||||
fmt.Fprintf(&bb, `package iana // import "golang.org/x/net/internal/iana"`+"\n\n")
|
||||
for _, r := range registries {
|
||||
resp, err := http.Get(r.url)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
fmt.Fprintf(os.Stderr, "got HTTP status code %v for %v\n", resp.StatusCode, r.url)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := r.parse(&bb, resp.Body); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Fprintf(&bb, "\n")
|
||||
}
|
||||
b, err := format.Source(bb.Bytes())
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := ioutil.WriteFile("const.go", b, 0644); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func parseDSCPRegistry(w io.Writer, r io.Reader) error {
|
||||
dec := xml.NewDecoder(r)
|
||||
var dr dscpRegistry
|
||||
if err := dec.Decode(&dr); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(w, "// %s, Updated: %s\n", dr.Title, dr.Updated)
|
||||
fmt.Fprintf(w, "const (\n")
|
||||
for _, dr := range dr.escapeDSCP() {
|
||||
fmt.Fprintf(w, "DiffServ%s = %#02x", dr.Name, dr.Value)
|
||||
fmt.Fprintf(w, "// %s\n", dr.OrigName)
|
||||
}
|
||||
for _, er := range dr.escapeECN() {
|
||||
fmt.Fprintf(w, "%s = %#02x", er.Descr, er.Value)
|
||||
fmt.Fprintf(w, "// %s\n", er.OrigDescr)
|
||||
}
|
||||
fmt.Fprintf(w, ")\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
type dscpRegistry struct {
|
||||
XMLName xml.Name `xml:"registry"`
|
||||
Title string `xml:"title"`
|
||||
Updated string `xml:"updated"`
|
||||
Note string `xml:"note"`
|
||||
Registries []struct {
|
||||
Title string `xml:"title"`
|
||||
Registries []struct {
|
||||
Title string `xml:"title"`
|
||||
Records []struct {
|
||||
Name string `xml:"name"`
|
||||
Space string `xml:"space"`
|
||||
} `xml:"record"`
|
||||
} `xml:"registry"`
|
||||
Records []struct {
|
||||
Value string `xml:"value"`
|
||||
Descr string `xml:"description"`
|
||||
} `xml:"record"`
|
||||
} `xml:"registry"`
|
||||
}
|
||||
|
||||
type canonDSCPRecord struct {
|
||||
OrigName string
|
||||
Name string
|
||||
Value int
|
||||
}
|
||||
|
||||
func (drr *dscpRegistry) escapeDSCP() []canonDSCPRecord {
|
||||
var drs []canonDSCPRecord
|
||||
for _, preg := range drr.Registries {
|
||||
if !strings.Contains(preg.Title, "Differentiated Services Field Codepoints") {
|
||||
continue
|
||||
}
|
||||
for _, reg := range preg.Registries {
|
||||
if !strings.Contains(reg.Title, "Pool 1 Codepoints") {
|
||||
continue
|
||||
}
|
||||
drs = make([]canonDSCPRecord, len(reg.Records))
|
||||
sr := strings.NewReplacer(
|
||||
"+", "",
|
||||
"-", "",
|
||||
"/", "",
|
||||
".", "",
|
||||
" ", "",
|
||||
)
|
||||
for i, dr := range reg.Records {
|
||||
s := strings.TrimSpace(dr.Name)
|
||||
drs[i].OrigName = s
|
||||
drs[i].Name = sr.Replace(s)
|
||||
n, err := strconv.ParseUint(dr.Space, 2, 8)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
drs[i].Value = int(n) << 2
|
||||
}
|
||||
}
|
||||
}
|
||||
return drs
|
||||
}
|
||||
|
||||
type canonECNRecord struct {
|
||||
OrigDescr string
|
||||
Descr string
|
||||
Value int
|
||||
}
|
||||
|
||||
func (drr *dscpRegistry) escapeECN() []canonECNRecord {
|
||||
var ers []canonECNRecord
|
||||
for _, reg := range drr.Registries {
|
||||
if !strings.Contains(reg.Title, "ECN Field") {
|
||||
continue
|
||||
}
|
||||
ers = make([]canonECNRecord, len(reg.Records))
|
||||
sr := strings.NewReplacer(
|
||||
"Capable", "",
|
||||
"Not-ECT", "",
|
||||
"ECT(1)", "",
|
||||
"ECT(0)", "",
|
||||
"CE", "",
|
||||
"(", "",
|
||||
")", "",
|
||||
"+", "",
|
||||
"-", "",
|
||||
"/", "",
|
||||
".", "",
|
||||
" ", "",
|
||||
)
|
||||
for i, er := range reg.Records {
|
||||
s := strings.TrimSpace(er.Descr)
|
||||
ers[i].OrigDescr = s
|
||||
ss := strings.Split(s, " ")
|
||||
if len(ss) > 1 {
|
||||
ers[i].Descr = strings.Join(ss[1:], " ")
|
||||
} else {
|
||||
ers[i].Descr = ss[0]
|
||||
}
|
||||
ers[i].Descr = sr.Replace(er.Descr)
|
||||
n, err := strconv.ParseUint(er.Value, 2, 8)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
ers[i].Value = int(n)
|
||||
}
|
||||
}
|
||||
return ers
|
||||
}
|
||||
|
||||
func parseProtocolNumbers(w io.Writer, r io.Reader) error {
|
||||
dec := xml.NewDecoder(r)
|
||||
var pn protocolNumbers
|
||||
if err := dec.Decode(&pn); err != nil {
|
||||
return err
|
||||
}
|
||||
prs := pn.escape()
|
||||
prs = append([]canonProtocolRecord{{
|
||||
Name: "IP",
|
||||
Descr: "IPv4 encapsulation, pseudo protocol number",
|
||||
Value: 0,
|
||||
}}, prs...)
|
||||
fmt.Fprintf(w, "// %s, Updated: %s\n", pn.Title, pn.Updated)
|
||||
fmt.Fprintf(w, "const (\n")
|
||||
for _, pr := range prs {
|
||||
if pr.Name == "" {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(w, "Protocol%s = %d", pr.Name, pr.Value)
|
||||
s := pr.Descr
|
||||
if s == "" {
|
||||
s = pr.OrigName
|
||||
}
|
||||
fmt.Fprintf(w, "// %s\n", s)
|
||||
}
|
||||
fmt.Fprintf(w, ")\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
type protocolNumbers struct {
|
||||
XMLName xml.Name `xml:"registry"`
|
||||
Title string `xml:"title"`
|
||||
Updated string `xml:"updated"`
|
||||
RegTitle string `xml:"registry>title"`
|
||||
Note string `xml:"registry>note"`
|
||||
Records []struct {
|
||||
Value string `xml:"value"`
|
||||
Name string `xml:"name"`
|
||||
Descr string `xml:"description"`
|
||||
} `xml:"registry>record"`
|
||||
}
|
||||
|
||||
type canonProtocolRecord struct {
|
||||
OrigName string
|
||||
Name string
|
||||
Descr string
|
||||
Value int
|
||||
}
|
||||
|
||||
func (pn *protocolNumbers) escape() []canonProtocolRecord {
|
||||
prs := make([]canonProtocolRecord, len(pn.Records))
|
||||
sr := strings.NewReplacer(
|
||||
"-in-", "in",
|
||||
"-within-", "within",
|
||||
"-over-", "over",
|
||||
"+", "P",
|
||||
"-", "",
|
||||
"/", "",
|
||||
".", "",
|
||||
" ", "",
|
||||
)
|
||||
for i, pr := range pn.Records {
|
||||
if strings.Contains(pr.Name, "Deprecated") ||
|
||||
strings.Contains(pr.Name, "deprecated") {
|
||||
continue
|
||||
}
|
||||
prs[i].OrigName = pr.Name
|
||||
s := strings.TrimSpace(pr.Name)
|
||||
switch pr.Name {
|
||||
case "ISIS over IPv4":
|
||||
prs[i].Name = "ISIS"
|
||||
case "manet":
|
||||
prs[i].Name = "MANET"
|
||||
default:
|
||||
prs[i].Name = sr.Replace(s)
|
||||
}
|
||||
ss := strings.Split(pr.Descr, "\n")
|
||||
for i := range ss {
|
||||
ss[i] = strings.TrimSpace(ss[i])
|
||||
}
|
||||
if len(ss) > 1 {
|
||||
prs[i].Descr = strings.Join(ss, " ")
|
||||
} else {
|
||||
prs[i].Descr = ss[0]
|
||||
}
|
||||
prs[i].Value, _ = strconv.Atoi(pr.Value)
|
||||
}
|
||||
return prs
|
||||
}
|
||||
|
||||
func parseAddrFamilyNumbers(w io.Writer, r io.Reader) error {
|
||||
dec := xml.NewDecoder(r)
|
||||
var afn addrFamilylNumbers
|
||||
if err := dec.Decode(&afn); err != nil {
|
||||
return err
|
||||
}
|
||||
afrs := afn.escape()
|
||||
fmt.Fprintf(w, "// %s, Updated: %s\n", afn.Title, afn.Updated)
|
||||
fmt.Fprintf(w, "const (\n")
|
||||
for _, afr := range afrs {
|
||||
if afr.Name == "" {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(w, "AddrFamily%s = %d", afr.Name, afr.Value)
|
||||
fmt.Fprintf(w, "// %s\n", afr.Descr)
|
||||
}
|
||||
fmt.Fprintf(w, ")\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
type addrFamilylNumbers struct {
|
||||
XMLName xml.Name `xml:"registry"`
|
||||
Title string `xml:"title"`
|
||||
Updated string `xml:"updated"`
|
||||
RegTitle string `xml:"registry>title"`
|
||||
Note string `xml:"registry>note"`
|
||||
Records []struct {
|
||||
Value string `xml:"value"`
|
||||
Descr string `xml:"description"`
|
||||
} `xml:"registry>record"`
|
||||
}
|
||||
|
||||
type canonAddrFamilyRecord struct {
|
||||
Name string
|
||||
Descr string
|
||||
Value int
|
||||
}
|
||||
|
||||
func (afn *addrFamilylNumbers) escape() []canonAddrFamilyRecord {
|
||||
afrs := make([]canonAddrFamilyRecord, len(afn.Records))
|
||||
sr := strings.NewReplacer(
|
||||
"IP version 4", "IPv4",
|
||||
"IP version 6", "IPv6",
|
||||
"Identifier", "ID",
|
||||
"-", "",
|
||||
"-", "",
|
||||
"/", "",
|
||||
".", "",
|
||||
" ", "",
|
||||
)
|
||||
for i, afr := range afn.Records {
|
||||
if strings.Contains(afr.Descr, "Unassigned") ||
|
||||
strings.Contains(afr.Descr, "Reserved") {
|
||||
continue
|
||||
}
|
||||
afrs[i].Descr = afr.Descr
|
||||
s := strings.TrimSpace(afr.Descr)
|
||||
switch s {
|
||||
case "IP (IP version 4)":
|
||||
afrs[i].Name = "IPv4"
|
||||
case "IP6 (IP version 6)":
|
||||
afrs[i].Name = "IPv6"
|
||||
case "AFI for L2VPN information":
|
||||
afrs[i].Name = "L2VPN"
|
||||
case "E.164 with NSAP format subaddress":
|
||||
afrs[i].Name = "E164withSubaddress"
|
||||
case "MT IP: Multi-Topology IP version 4":
|
||||
afrs[i].Name = "MTIPv4"
|
||||
case "MAC/24":
|
||||
afrs[i].Name = "MACFinal24bits"
|
||||
case "MAC/40":
|
||||
afrs[i].Name = "MACFinal40bits"
|
||||
case "IPv6/64":
|
||||
afrs[i].Name = "IPv6Initial64bits"
|
||||
default:
|
||||
n := strings.Index(s, "(")
|
||||
if n > 0 {
|
||||
s = s[:n]
|
||||
}
|
||||
n = strings.Index(s, ":")
|
||||
if n > 0 {
|
||||
s = s[:n]
|
||||
}
|
||||
afrs[i].Name = sr.Replace(s)
|
||||
}
|
||||
afrs[i].Value, _ = strconv.Atoi(afr.Value)
|
||||
}
|
||||
return afrs
|
||||
}
|
||||
11
vendor/golang.org/x/net/internal/socket/cmsghdr.go
generated
vendored
Normal file
11
vendor/golang.org/x/net/internal/socket/cmsghdr.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package socket
|
||||
|
||||
func (h *cmsghdr) len() int { return int(h.Len) }
|
||||
func (h *cmsghdr) lvl() int { return int(h.Level) }
|
||||
func (h *cmsghdr) typ() int { return int(h.Type) }
|
||||
13
vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go
generated
vendored
Normal file
13
vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin dragonfly freebsd netbsd openbsd
|
||||
|
||||
package socket
|
||||
|
||||
func (h *cmsghdr) set(l, lvl, typ int) {
|
||||
h.Len = uint32(l)
|
||||
h.Level = int32(lvl)
|
||||
h.Type = int32(typ)
|
||||
}
|
||||
14
vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go
generated
vendored
Normal file
14
vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build arm mips mipsle 386
|
||||
// +build linux
|
||||
|
||||
package socket
|
||||
|
||||
func (h *cmsghdr) set(l, lvl, typ int) {
|
||||
h.Len = uint32(l)
|
||||
h.Level = int32(lvl)
|
||||
h.Type = int32(typ)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user