1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-21 20:03:22 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
411f75aadf azureblob: enable on freebsd, netbsd, openbsd
At some point the SDK was fixed on these architectures, so re-enable
building the azure blob backend for them.
2018-11-26 08:32:58 +00:00
970 changed files with 34152 additions and 100435 deletions

View File

@@ -1,4 +1,3 @@
---
version: 2
jobs:
@@ -14,10 +13,10 @@ jobs:
- run:
name: Cross-compile rclone
command: |
docker pull rclone/xgo-cgofuse
docker pull billziss/xgo-cgofuse
go get -v github.com/karalabe/xgo
xgo \
--image=rclone/xgo-cgofuse \
--image=billziss/xgo-cgofuse \
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
-tags cmount \
.
@@ -30,21 +29,6 @@ jobs:
command: |
mkdir -p /tmp/rclone.dist
cp -R rclone-* /tmp/rclone.dist
mkdir build
cp -R rclone-* build/
- run:
name: Build rclone
command: |
go version
go build
- run:
name: Upload artifacts
command: |
if [[ $CIRCLE_PULL_REQUEST != "" ]]; then
make circleci_upload
fi
- store_artifacts:
path: /tmp/rclone.dist

View File

@@ -1,30 +0,0 @@
# golangci-lint configuration options
run:
build-tags:
- cmount
linters:
enable:
- deadcode
- errcheck
- goimports
- golint
- ineffassign
- structcheck
- varcheck
- govet
- unconvert
#- prealloc
#- maligned
disable-all: true
issues:
# Enable some lints excluded by default
exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0

14
.gometalinter.json Normal file
View File

@@ -0,0 +1,14 @@
{
"Enable": [
"deadcode",
"errcheck",
"goimports",
"golint",
"ineffassign",
"structcheck",
"varcheck",
"vet"
],
"EnableGC": true,
"Vendor": true
}

View File

@@ -8,7 +8,6 @@ go:
- 1.9.x
- 1.10.x
- 1.11.x
- 1.12rc1
- tip
go_import_path: github.com/ncw/rclone
before_install:
@@ -25,7 +24,6 @@ script:
env:
global:
- GOTAGS=cmount
- GO111MODULE=off
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
- secure: AMjrMAksDy3QwqGqnvtUg8FL/GNVgNqTqhntLF9HSU0njHhX6YurGGnfKdD9vNHlajPQOewvmBjwNLcDWGn2WObdvmh9Ohep0EmOjZ63kliaRaSSQueSd8y0idfqMQAxep0SObOYbEDVmQh0RCAE9wOVKRaPgw98XvgqWGDq5Tw=
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
@@ -44,7 +42,7 @@ matrix:
- go: tip
include:
- os: osx
go: 1.12rc1
go: 1.11.x
env: GOTAGS=""
cache:
directories:
@@ -56,5 +54,5 @@ deploy:
on:
repo: ncw/rclone
all_branches: true
go: 1.12rc1
go: 1.11.x
condition: $TRAVIS_PULL_REQUEST == false

View File

@@ -351,12 +351,6 @@ Unit tests
Integration tests
* Add your backend to `fstest/test_all/config.yaml`
* Once you've done that then you can use the integration test framework from the project root:
* go install ./...
* test_all -backend remote
Or if you want to run the integration tests manually:
* Make sure integration tests pass with
* `cd fs/operations`
* `go test -v -remote TestRemote:`
@@ -378,3 +372,4 @@ Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](
* `docs/content/about.md` - front page of rclone.org
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
* `bin/make_manual.py` - add the page to the `docs` constant
* `cmd/cmd.go` - the main help for rclone

View File

@@ -1,17 +1,14 @@
# Maintainers guide for rclone #
Current active maintainers of rclone are:
Current active maintainers of rclone are
| Name | GitHub ID | Specific Responsibilities |
| :--------------- | :---------- | :-------------------------- |
| Nick Craig-Wood | @ncw | overall project health |
| Stefan Breunig | @breunigs | |
| Ishuah Kariuki | @ishuah | |
| Remus Bunduc | @remusb | cache backend |
| Fabian Möller | @B4dM4n | |
| Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
* Nick Craig-Wood @ncw
* Stefan Breunig @breunigs
* Ishuah Kariuki @ishuah
* Remus Bunduc @remusb - cache subsystem maintainer
* Fabian Möller @B4dM4n
* Alex Chen @Cnly
* Sandeep Ummadi @sandeepkru
**This is a work in progress Draft**

File diff suppressed because it is too large Load Diff

1416
MANUAL.md

File diff suppressed because it is too large Load Diff

1525
MANUAL.txt

File diff suppressed because it is too large Load Diff

View File

@@ -11,7 +11,7 @@ ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
BRANCH_PATH :=
endif
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
ifneq ($(TAG),$(LAST_TAG))
TAG := $(TAG)-beta
endif
@@ -64,20 +64,30 @@ endif
# Do source code quality checks
check: rclone
ifdef FULL_TESTS
@# we still run go vet for -printfuncs which golangci-lint doesn't do yet
@# see: https://github.com/golangci/golangci-lint/issues/204
@echo "-- START CODE QUALITY REPORT -------------------------------"
@go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
@golangci-lint run ./...
@echo "-- END CODE QUALITY REPORT ---------------------------------"
go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
errcheck $(BUILDTAGS) ./...
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
go list ./... | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
else
@echo Skipping source quality tests as version of go too old
endif
gometalinter_install:
go get -u github.com/alecthomas/gometalinter
gometalinter --install --update
# We aren't using gometalinter as the default linter yet because
# 1. it doesn't support build tags: https://github.com/alecthomas/gometalinter/issues/275
# 2. can't get -printfuncs working with the vet linter
gometalinter:
gometalinter ./...
# Get the build dependencies
build_dep:
ifdef FULL_TESTS
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
go get -u github.com/kisielk/errcheck
go get -u golang.org/x/tools/cmd/goimports
go get -u golang.org/x/lint/golint
endif
# Get the release dependencies
@@ -175,13 +185,6 @@ ifndef BRANCH_PATH
endif
@echo Beta release ready at $(BETA_URL)
circleci_upload:
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
ifndef BRANCH_PATH
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
endif
@echo Beta release ready at $(BETA_URL)/testbuilds
BUILD_FLAGS := -exclude "^(windows|darwin)/"
ifeq ($(TRAVIS_OS_NAME),osx)
BUILD_FLAGS := -include "^darwin/" -cgo
@@ -189,7 +192,7 @@ endif
travis_beta:
ifeq ($(TRAVIS_OS_NAME),linux)
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
endif
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)

View File

@@ -20,7 +20,6 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
## Storage providers
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
@@ -44,14 +43,13 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
* OVH [:page_facing_up:](https://rclone.org/swift/)
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Openstack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* put.io [:page_facing_up:](https://rclone.org/webdav/#put-io)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
@@ -62,13 +60,13 @@ Please see [the full list of all storage providers and their features](https://r
## Features
* MD5/SHA-1 hashes checked at all times for file integrity
* MD5/SHA1 hashes checked at all times for file integrity
* Timestamps preserved on files
* Partial syncs supported on a whole file basis
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
* Can sync to and from network, e.g. two different cloud accounts
* Can sync to and from network, eg two different cloud accounts
* Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional cache ([Cache](https://rclone.org/cache/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
@@ -93,4 +91,4 @@ License
-------
This is free software under the terms of MIT the license (check the
[COPYING file](/COPYING) included in this package).
[COPYING file](/rclone/COPYING) included in this package).

View File

@@ -1,821 +0,0 @@
package adb
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
adb "github.com/thinkhy/go-adb"
"github.com/thinkhy/go-adb/wire"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "adb",
Description: "Android Debug Bridge",
NewFs: NewFs,
Options: []fs.Option{{
Name: "serial",
Help: "The device serial to use. Leave empty for auto selection.",
Advanced: true,
}, {
Name: "host",
Default: "localhost",
Help: "The ADB server host.",
Advanced: true,
}, {
Name: "port",
Default: 5037,
Help: "The ADB server port.",
Advanced: true,
}, {
Name: "executable",
Help: "The ADB executable path.",
Advanced: true,
}, {
Name: "copy_links",
Help: "Follow symlinks and copy the pointed to item.",
Default: false,
Advanced: true,
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Serial string
Host string
Port uint16
Executable string
FollowSymlinks bool `config:"copy_links"`
}
// Fs represents a adb device
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
client *adb.Adb
device *execDevice
statFunc statFunc
statFuncMu sync.Mutex
touchFunc touchFunc
touchFuncMu sync.Mutex
}
// Object describes a adb file
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
size int64
mode os.FileMode
modTime time.Time
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("ADB root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if root == "" {
root = "/"
}
f := &Fs{
name: name,
root: root,
opt: *opt,
statFunc: (*Object).statTry,
touchFunc: (*Object).touchTry,
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(f)
f.client, err = adb.NewWithConfig(adb.ServerConfig{
Host: opt.Host,
Port: int(opt.Port),
PathToAdb: opt.Executable,
})
if err != nil {
return nil, errors.Wrapf(err, "Could not configure ADB server")
}
err = f.client.StartServer()
if err != nil {
return nil, errors.Wrapf(err, "Could not start ADB server")
}
serverVersion, err := f.client.ServerVersion()
if err != nil {
return nil, errors.Wrapf(err, "Could not get ADB server version")
}
fs.Debugf(f, "ADB server version: 0x%X", serverVersion)
serials, err := f.client.ListDeviceSerials()
if err != nil {
return nil, errors.Wrapf(err, "Could not get ADB devices")
}
descriptor := adb.AnyDevice()
if opt.Serial != "" {
descriptor = adb.DeviceWithSerial(opt.Serial)
}
if len(serials) > 1 && opt.Serial == "" {
return nil, errors.New("Multiple ADB devices found. Use the serial config to select a specific device")
}
f.device = &execDevice{f.client.Device(descriptor)}
// follow symlinks for root pathes
entry, err := f.newEntryFollowSymlinks("")
switch err {
case nil:
case fs.ErrorObjectNotFound:
default:
return nil, err
}
switch entry.(type) {
case fs.Object:
f.root = path.Dir(f.root)
return f, fs.ErrorIsFile
case nil:
return f, nil
case fs.Directory:
return f, nil
default:
return nil, errors.Errorf("Invalid root entry type %t", entry)
}
}
// Precision of the object storage system
func (f *Fs) Precision() time.Duration {
return 1 * time.Second
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
p := path.Join(f.root, dir)
dirEntries, err := f.device.ListDirEntries(p)
if err != nil {
return nil, errors.Wrap(err, "ListDirEntries")
}
defer fs.CheckClose(dirEntries, &err)
found := false
for dirEntries.Next() {
found = true
dirEntry := dirEntries.Entry()
switch dirEntry.Name {
case ".", "..":
continue
}
fsEntry, err := f.entryForDirEntry(path.Join(dir, dirEntry.Name), dirEntry, f.opt.FollowSymlinks)
if err != nil {
fs.Errorf(p, "Listing error: %q: %v", dirEntry.Name, err)
return nil, err
} else if fsEntry != nil {
entries = append(entries, fsEntry)
} else {
fs.Debugf(f, "Skipping DirEntry %#v", dirEntry)
}
}
err = dirEntries.Err()
if err != nil {
return nil, errors.Wrap(err, "ListDirEntries")
}
if !found {
return nil, fs.ErrorDirNotFound
}
return
}
func (f *Fs) entryForDirEntry(remote string, e *adb.DirEntry, followSymlinks bool) (fs.DirEntry, error) {
o := f.newObjectWithInfo(remote, e)
// Follow symlinks if required
if followSymlinks && (e.Mode&os.ModeSymlink) != 0 {
err := f.statFunc(&o)
if err != nil {
return nil, err
}
}
if o.mode.IsDir() {
return fs.NewDir(remote, o.modTime), nil
}
return &o, nil
}
func (f *Fs) newEntry(remote string) (fs.DirEntry, error) {
return f.newEntryWithFollow(remote, f.opt.FollowSymlinks)
}
func (f *Fs) newEntryFollowSymlinks(remote string) (fs.DirEntry, error) {
return f.newEntryWithFollow(remote, true)
}
func (f *Fs) newEntryWithFollow(remote string, followSymlinks bool) (fs.DirEntry, error) {
entry, err := f.device.Stat(path.Join(f.root, remote))
if err != nil {
if adb.HasErrCode(err, adb.FileNoExistError) {
return nil, fs.ErrorObjectNotFound
}
return nil, errors.Wrapf(err, "Stat failed")
}
return f.entryForDirEntry(remote, entry, followSymlinks)
}
func (f *Fs) newObjectWithInfo(remote string, e *adb.DirEntry) Object {
return Object{
fs: f,
remote: remote,
size: int64(e.Size),
mode: e.Mode,
modTime: e.ModifiedAt,
}
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
entry, err := f.newEntry(remote)
if err != nil {
return nil, err
}
obj, ok := entry.(fs.Object)
if !ok {
return nil, fs.ErrorObjectNotFound
}
return obj, nil
}
// Put in to the remote path with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
// return an error or upload it properly (rather than e.g. calling panic).
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote()
// Temporary Object under construction - info filled in by Update()
o := f.newObject(remote)
err := o.Update(in, src, options...)
if err != nil {
return nil, err
}
return o, nil
}
// newObject makes a half completed Object
func (f *Fs) newObject(remote string) *Object {
return &Object{
fs: f,
remote: remote,
}
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(dir string) error {
p := path.Join(f.root, dir)
output, code, err := f.device.execCommandWithExitCode("mkdir -p", p)
switch err := err.(type) {
case nil:
return nil
case adb.ShellExitError:
entry, _ := f.newEntry(p)
if _, ok := entry.(fs.Directory); ok {
return nil
}
return errors.Errorf("mkdir %q failed with %d: %q", dir, code, output)
default:
return errors.Wrap(err, "mkdir")
}
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(dir string) error {
p := path.Join(f.root, dir)
output, code, err := f.device.execCommandWithExitCode("rmdir", p)
switch err := err.(type) {
case nil:
return nil
case adb.ShellExitError:
return errors.Errorf("rmdir %q failed with %d: %q", dir, code, output)
default:
return errors.Wrap(err, "rmdir")
}
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
func (o *Object) ModTime() time.Time {
return o.modTime
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return o.size
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Storable says whether this object can be stored
func (o *Object) Storable() bool {
return true
}
// SetModTime sets the metadata on the object to set the modification date
func (o *Object) SetModTime(t time.Time) error {
return o.fs.touchFunc(o, t)
}
func (o *Object) stat() error {
return o.statStatArg(statArgC, path.Join(o.fs.root, o.remote))
}
func (o *Object) setMetadata(entry *adb.DirEntry) {
// Don't overwrite the values if we don't need to
// this avoids upsetting the race detector
if o.size != int64(entry.Size) {
o.size = int64(entry.Size)
}
if !o.modTime.Equal(entry.ModifiedAt) {
o.modTime = entry.ModifiedAt
}
if o.mode != entry.Mode {
o.mode = decodeEntryMode(uint32(entry.Mode))
}
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
const blockSize = 1 << 12
var offset, count int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, count = x.Decode(o.size)
case *fs.SeekOption:
offset = x.Offset
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
if offset > o.size {
offset = o.size
}
if count < 0 {
count = o.size - offset
} else if count+offset > o.size {
count = o.size - offset
}
fs.Debugf(o, "Open: remote: %q offset: %d count: %d", o.remote, offset, count)
if count == 0 {
return ioutil.NopCloser(bytes.NewReader(nil)), nil
}
offsetBlocks, offsetRest := offset/blockSize, offset%blockSize
countBlocks := (count-1)/blockSize + 1
conn, err := o.fs.device.execCommand(fmt.Sprintf("sh -c 'dd \"if=$0\" bs=%d skip=%d count=%d 2>/dev/null'", blockSize, offsetBlocks, countBlocks), path.Join(o.fs.root, o.remote))
if err != nil {
return nil, err
}
return &adbReader{
ReadCloser: readers.NewLimitedReadCloser(conn, count+offsetRest),
skip: offsetRest,
expected: count,
}, nil
}
// Update in to the object with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
for _, option := range options {
if option.Mandatory() {
fs.Logf(option, "Unsupported mandatory option: %v", option)
}
}
written, err := o.writeToFile(path.Join(o.fs.root, o.remote), in, 0666, src.ModTime())
if err != nil {
if removeErr := o.Remove(); removeErr != nil {
fs.Errorf(o, "Failed to remove partially written file: %v", removeErr)
}
return err
}
expected := src.Size()
if expected == -1 {
expected = written
}
for _, t := range []int64{100, 250, 500, 1000, 2500, 5000, 10000} {
err = o.stat()
if err != nil {
return err
}
if o.size == expected {
return nil
}
fs.Debugf(o, "Invalid size after update, expected: %d got: %d", expected, o.size)
time.Sleep(time.Duration(t) * time.Millisecond)
}
return o.stat()
}
// Remove this object
func (o *Object) Remove() error {
p := path.Join(o.fs.root, o.remote)
output, code, err := o.fs.device.execCommandWithExitCode("rm", p)
switch err := err.(type) {
case nil:
return nil
case adb.ShellExitError:
return errors.Errorf("rm %q failed with %d: %q", o.remote, code, output)
default:
return errors.Wrap(err, "rm")
}
}
func (o *Object) writeToFile(path string, rd io.Reader, perms os.FileMode, modeTime time.Time) (written int64, err error) {
dst, err := o.fs.device.OpenWrite(path, perms, modeTime)
if err != nil {
return
}
defer fs.CheckClose(dst, &err)
return io.Copy(dst, rd)
}
type statFunc func(*Object) error
func (o *Object) statTry() error {
o.fs.statFuncMu.Lock()
defer o.fs.statFuncMu.Unlock()
for _, f := range []statFunc{
(*Object).statStatL, (*Object).statRealPath, (*Object).statReadLink,
} {
err := f(o)
if err != nil {
fs.Debugf(o, "%s", err)
} else {
o.fs.statFunc = f
return nil
}
}
return errors.Errorf("unable to resolve link target")
}
const (
statArgLc = "-Lc"
statArgC = "-c"
)
func (o *Object) statStatL() error {
return o.statStatArg(statArgLc, path.Join(o.fs.root, o.remote))
}
func (o *Object) statStatArg(arg, path string) error {
output, code, err := o.fs.device.execCommandWithExitCode(fmt.Sprintf("stat %s %s", arg, "%f,%s,%Y"), path)
output = strings.TrimSpace(output)
switch err := err.(type) {
case nil:
case adb.ShellExitError:
return errors.Errorf("stat %q failed with %d: %q", o.remote, code, output)
default:
return errors.Wrap(err, "stat")
}
parts := strings.Split(output, ",")
if len(parts) != 3 {
return errors.Errorf("stat %q invalid output %q", o.remote, output)
}
mode, err := strconv.ParseUint(parts[0], 16, 32)
if err != nil {
return errors.Errorf("stat %q invalid output %q", o.remote, output)
}
size, err := strconv.ParseUint(parts[1], 10, 64)
if err != nil {
return errors.Errorf("stat %q invalid output %q", o.remote, output)
}
modTime, err := strconv.ParseInt(parts[2], 10, 64)
if err != nil {
return errors.Errorf("stat %q invalid output %q", o.remote, output)
}
o.size = int64(size)
o.modTime = time.Unix(modTime, 0)
o.mode = decodeEntryMode(uint32(mode))
return nil
}
func (o *Object) statReadLink() error {
p := path.Join(o.fs.root, o.remote)
output, code, err := o.fs.device.execCommandWithExitCode("readlink -f", p)
output = strings.TrimSuffix(output, "\n")
switch err := err.(type) {
case nil:
case adb.ShellExitError:
return errors.Errorf("readlink %q failed with %d: %q", o.remote, code, output)
default:
return errors.Wrap(err, "readlink")
}
return o.statStatArg(statArgC, output)
}
func (o *Object) statRealPath() error {
p := path.Join(o.fs.root, o.remote)
output, code, err := o.fs.device.execCommandWithExitCode("realpath", p)
output = strings.TrimSuffix(output, "\n")
switch err := err.(type) {
case nil:
case adb.ShellExitError:
return errors.Errorf("realpath %q failed with %d: %q", o.remote, code, output)
default:
return errors.Wrap(err, "realpath")
}
return o.statStatArg(statArgC, output)
}
type touchFunc func(*Object, time.Time) error
func (o *Object) touchTry(t time.Time) error {
o.fs.touchFuncMu.Lock()
defer o.fs.touchFuncMu.Unlock()
for _, f := range []touchFunc{
(*Object).touchCmd, (*Object).touchCd,
} {
err := f(o, t)
if err != nil {
fs.Debugf(o, "%s", err)
} else {
o.fs.touchFunc = f
return nil
}
}
return errors.Errorf("unable to resolve link target")
}
const (
touchArgCmd = "-cmd"
touchArgCd = "-cd"
)
func (o *Object) touchCmd(t time.Time) error {
return o.touchStatArg(touchArgCmd, path.Join(o.fs.root, o.remote), t)
}
func (o *Object) touchCd(t time.Time) error {
return o.touchStatArg(touchArgCd, path.Join(o.fs.root, o.remote), t)
}
func (o *Object) touchStatArg(arg, path string, t time.Time) error {
output, code, err := o.fs.device.execCommandWithExitCode(fmt.Sprintf("touch %s %s", arg, t.Format(time.RFC3339Nano)), path)
output = strings.TrimSpace(output)
switch err := err.(type) {
case nil:
case adb.ShellExitError:
return errors.Errorf("touch %q failed with %d: %q", o.remote, code, output)
default:
return errors.Wrap(err, "touch")
}
err = o.stat()
if err != nil {
return err
}
if diff, ok := checkTimeEqualWithPrecision(t, o.modTime, o.fs.Precision()); !ok {
return errors.Errorf("touch %q to %s was ineffective: %d", o.remote, t, diff)
}
return nil
}
func checkTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
dt := t0.Sub(t1)
if dt >= precision || dt <= -precision {
return dt, false
}
return dt, true
}
func decodeEntryMode(entryMode uint32) os.FileMode {
const (
unixIFBLK = 0x6000
unixIFMT = 0xf000
unixIFCHR = 0x2000
unixIFDIR = 0x4000
unixIFIFO = 0x1000
unixIFLNK = 0xa000
unixIFREG = 0x8000
unixIFSOCK = 0xc000
unixISGID = 0x400
unixISUID = 0x800
unixISVTX = 0x200
)
mode := os.FileMode(entryMode & 0777)
switch entryMode & unixIFMT {
case unixIFBLK:
mode |= os.ModeDevice
case unixIFCHR:
mode |= os.ModeDevice | os.ModeCharDevice
case unixIFDIR:
mode |= os.ModeDir
case unixIFIFO:
mode |= os.ModeNamedPipe
case unixIFLNK:
mode |= os.ModeSymlink
case unixIFREG:
// nothing to do
case unixIFSOCK:
mode |= os.ModeSocket
}
if entryMode&unixISGID != 0 {
mode |= os.ModeSetgid
}
if entryMode&unixISUID != 0 {
mode |= os.ModeSetuid
}
if entryMode&unixISVTX != 0 {
mode |= os.ModeSticky
}
return mode
}
type execDevice struct {
*adb.Device
}
func (d *execDevice) execCommandWithExitCode(cmd string, arg string) (string, int, error) {
cmdLine := fmt.Sprintf("sh -c '%s \"$0\"; echo :$?' '%s'", cmd, strings.Replace(arg, "'", "'\\''", -1))
fs.Debugf("adb", "exec: %s", cmdLine)
conn, err := d.execCommand(cmdLine)
if err != nil {
return "", -1, err
}
resp, err := conn.ReadUntilEof()
if err != nil {
return "", -1, errors.Wrap(err, "ExecCommand")
}
outStr := string(resp)
idx := strings.LastIndexByte(outStr, ':')
if idx == -1 {
return outStr, -1, fmt.Errorf("adb shell aborted, can not parse exit code")
}
exitCode, _ := strconv.Atoi(strings.TrimSpace(outStr[idx+1:]))
if exitCode != 0 {
err = adb.ShellExitError{Command: cmdLine, ExitCode: exitCode}
}
return outStr[:idx], exitCode, err
}
func (d *execDevice) execCommand(cmd string, args ...string) (*wire.Conn, error) {
cmd = prepareCommandLineEscaped(cmd, args...)
conn, err := d.Dial()
if err != nil {
return nil, errors.Wrap(err, "ExecCommand")
}
defer func() {
if err != nil && conn != nil {
_ = conn.Close()
}
}()
req := fmt.Sprintf("exec:%s", cmd)
if err = conn.SendMessage([]byte(req)); err != nil {
return nil, errors.Wrap(err, "ExecCommand")
}
if _, err = conn.ReadStatus(req); err != nil {
return nil, errors.Wrap(err, "ExecCommand")
}
return conn, nil
}
func prepareCommandLineEscaped(cmd string, args ...string) string {
for i, arg := range args {
args[i] = fmt.Sprintf("'%s'", strings.Replace(arg, "'", "'\\''", -1))
}
// Prepend the command to the args array.
if len(args) > 0 {
cmd = fmt.Sprintf("%s %s", cmd, strings.Join(args, " "))
}
return cmd
}
type adbReader struct {
io.ReadCloser
skip int64
read int64
expected int64
}
func (r *adbReader) Read(b []byte) (n int, err error) {
n, err = r.ReadCloser.Read(b)
if s := r.skip; n > 0 && s > 0 {
_n := int64(n)
if _n <= s {
r.skip -= _n
return r.Read(b)
}
r.skip = 0
copy(b, b[s:n])
n -= int(s)
}
r.read += int64(n)
if err == io.EOF && r.read < r.expected {
fs.Debugf("adb", "Read: read: %d expected: %d n: %d", r.read, r.expected, n)
return n, io.ErrUnexpectedEOF
}
return n, err
}

View File

@@ -1,20 +0,0 @@
// Test ADB filesystem interface
package adb_test
import (
"testing"
"github.com/ncw/rclone/backend/adb"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAdb:/data/local/tmp",
NilObject: (*adb.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: "TestAdb", Key: "copy_links", Value: "true"},
},
})
}

View File

@@ -30,7 +30,7 @@ type Options struct {
Remote string `config:"remote"`
}
// NewFs constructs an Fs from the path.
// NewFs contstructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {

View File

@@ -80,7 +80,7 @@ func TestNewFS(t *testing.T) {
wantEntry := test.entries[i]
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
require.Equal(t, wantEntry.size, int64(gotEntry.Size()), what)
_, isDir := gotEntry.(fs.Directory)
require.Equal(t, wantEntry.isDir, isDir, what)
}

View File

@@ -2,7 +2,6 @@ package all
import (
// Active file systems
_ "github.com/ncw/rclone/backend/adb"
_ "github.com/ncw/rclone/backend/alias"
_ "github.com/ncw/rclone/backend/amazonclouddrive"
_ "github.com/ncw/rclone/backend/azureblob"

View File

@@ -21,7 +21,7 @@ import (
"strings"
"time"
acd "github.com/ncw/go-acd"
"github.com/ncw/go-acd"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"

View File

@@ -22,14 +22,12 @@ import (
"sync"
"time"
"github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/pacer"
@@ -77,7 +75,7 @@ func init() {
}, {
Name: "upload_cutoff",
Help: "Cutoff for switching to chunked upload (<= 256MB).",
Default: defaultUploadCutoff,
Default: fs.SizeSuffix(defaultUploadCutoff),
Advanced: true,
}, {
Name: "chunk_size",
@@ -85,7 +83,7 @@ func init() {
Note that this is stored in memory and there may be up to
"--transfers" chunks stored at once in memory.`,
Default: defaultChunkSize,
Default: fs.SizeSuffix(defaultChunkSize),
Advanced: true,
}, {
Name: "list_chunk",
@@ -137,7 +135,6 @@ type Fs struct {
root string // the path we are working on if any
opt Options // parsed config options
features *fs.Features // optional features
client *http.Client // http client we are using
svcURL *azblob.ServiceURL // reference to serviceURL
cntURL *azblob.ContainerURL // reference to containerURL
container string // the container we are working on
@@ -275,39 +272,7 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
return
}
// httpClientFactory creates a Factory object that sends HTTP requests
// to a rclone's http.Client.
//
// copied from azblob.newDefaultHTTPClientFactory
func httpClientFactory(client *http.Client) pipeline.Factory {
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
r, err := client.Do(request.WithContext(ctx))
if err != nil {
err = pipeline.NewError(err, "HTTP request failed")
}
return pipeline.NewHTTPResponse(r), err
}
})
}
// newPipeline creates a Pipeline using the specified credentials and options.
//
// this code was copied from azblob.NewPipeline
func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline.Pipeline {
// Closest to API goes first; closest to the wire goes last
factories := []pipeline.Factory{
azblob.NewTelemetryPolicyFactory(o.Telemetry),
azblob.NewUniqueRequestIDPolicyFactory(),
azblob.NewRetryPolicyFactory(o.Retry),
c,
pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked
azblob.NewRequestLogPolicyFactory(o.RequestLog),
}
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
}
// NewFs constructs an Fs from the path, container:path
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
@@ -342,23 +307,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
}
f := &Fs{
name: name,
opt: *opt,
container: container,
root: directory,
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant).SetPacer(pacer.S3Pacer),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
client: fshttp.NewClient(fs.Config),
}
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
SetTier: true,
GetTier: true,
}).Fill(f)
var (
u *url.URL
serviceURL azblob.ServiceURL
@@ -375,7 +323,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
}
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline)
containerURL = serviceURL.NewContainerURL(container)
case opt.SASURL != "":
@@ -384,7 +332,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrapf(err, "failed to parse SAS URL")
}
// use anonymous credentials in case of sas url
pipeline := f.newPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
pipeline := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
// Check if we have container level SAS or account level sas
parts := azblob.NewBlobURLParts(*u)
if parts.ContainerName != "" {
@@ -392,7 +340,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.New("Container name in SAS URL and container provided in command do not match")
}
f.container = parts.ContainerName
container = parts.ContainerName
containerURL = azblob.NewContainerURL(*u, pipeline)
} else {
serviceURL = azblob.NewServiceURL(*u, pipeline)
@@ -401,9 +349,24 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
default:
return nil, errors.New("Need account+key or connectionString or sasURL")
}
f.svcURL = &serviceURL
f.cntURL = &containerURL
f := &Fs{
name: name,
opt: *opt,
container: container,
root: directory,
svcURL: &serviceURL,
cntURL: &containerURL,
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
}
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
SetTier: true,
GetTier: true,
}).Fill(f)
if f.root != "" {
f.root += "/"
// Check to see if the (container,directory) is actually an existing file
@@ -417,8 +380,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
_, err := f.NewObject(remote)
if err != nil {
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
// File doesn't exist or is a directory so return old f
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
f.root = oldRoot
return f, nil
}
@@ -474,21 +437,6 @@ func (o *Object) updateMetadataWithModTime(modTime time.Time) {
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
}
// Returns whether file is a directory marker or not
func isDirectoryMarker(size int64, metadata azblob.Metadata, remote string) bool {
// Directory markers are 0 length
if size == 0 {
// Note that metadata with hdi_isfolder = true seems to be a
// defacto standard for marking blobs as directories.
endsWithSlash := strings.HasSuffix(remote, "/")
if endsWithSlash || remote == "" || metadata["hdi_isfolder"] == "true" {
return true
}
}
return false
}
// listFn is called from list to handle an object
type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
@@ -524,7 +472,6 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
MaxResults: int32(maxResults),
}
ctx := context.Background()
directoryMarkers := map[string]struct{}{}
for marker := (azblob.Marker{}); marker.NotDone(); {
var response *azblob.ListBlobsHierarchySegmentResponse
err := f.pacer.Call(func() (bool, error) {
@@ -554,23 +501,13 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
continue
}
remote := file.Name[len(f.root):]
if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) {
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
err = fn(remote, file, true)
if err != nil {
return err
}
// Keep track of directory markers. If recursing then
// there will be no Prefixes so no need to keep track
if !recurse {
directoryMarkers[remote] = struct{}{}
}
continue // skip directory marker
// Check for directory
isDirectory := strings.HasSuffix(remote, "/")
if isDirectory {
remote = remote[:len(remote)-1]
}
// Send object
err = fn(remote, file, false)
err = fn(remote, file, isDirectory)
if err != nil {
return err
}
@@ -583,10 +520,6 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
continue
}
remote = remote[len(f.root):]
// Don't send if already sent as a directory marker
if _, found := directoryMarkers[remote]; found {
continue
}
// Send object
err = fn(remote, nil, true)
if err != nil {
@@ -754,35 +687,6 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
return fs, fs.Update(in, src, options...)
}
// Check if the container exists
//
// NB this can return incorrect results if called immediately after container deletion
func (f *Fs) dirExists() (bool, error) {
options := azblob.ListBlobsSegmentOptions{
Details: azblob.BlobListingDetails{
Copy: false,
Metadata: false,
Snapshots: false,
UncommittedBlobs: false,
Deleted: false,
},
MaxResults: 1,
}
err := f.pacer.Call(func() (bool, error) {
ctx := context.Background()
_, err := f.cntURL.ListBlobsHierarchySegment(ctx, azblob.Marker{}, "", options)
return f.shouldRetry(err)
})
if err == nil {
return true, nil
}
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
return false, nil
}
return false, err
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(dir string) error {
f.containerOKMu.Lock()
@@ -790,15 +694,6 @@ func (f *Fs) Mkdir(dir string) error {
if f.containerOK {
return nil
}
if !f.containerDeleted {
exists, err := f.dirExists()
if err == nil {
f.containerOK = exists
}
if err != nil || exists {
return err
}
}
// now try to create the container
err := f.pacer.Call(func() (bool, error) {
@@ -1028,37 +923,27 @@ func (o *Object) setMetadata(metadata azblob.Metadata) {
// o.md5
// o.meta
func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetPropertiesResponse) (err error) {
metadata := info.NewMetadata()
size := info.ContentLength()
if isDirectoryMarker(size, metadata, o.remote) {
return fs.ErrorNotAFile
}
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
// this as base64 encoded string.
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
o.mimeType = info.ContentType()
o.size = size
o.modTime = info.LastModified()
o.size = info.ContentLength()
o.modTime = time.Time(info.LastModified())
o.accessTier = azblob.AccessTierType(info.AccessTier())
o.setMetadata(metadata)
o.setMetadata(info.NewMetadata())
return nil
}
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
metadata := info.Metadata
size := *info.Properties.ContentLength
if isDirectoryMarker(size, metadata, o.remote) {
return fs.ErrorNotAFile
}
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
// this as base64 encoded string.
o.md5 = base64.StdEncoding.EncodeToString(info.Properties.ContentMD5)
o.mimeType = *info.Properties.ContentType
o.size = size
o.size = *info.Properties.ContentLength
o.modTime = info.Properties.LastModified
o.accessTier = info.Properties.AccessTier
o.setMetadata(metadata)
o.setMetadata(info.Metadata)
return nil
}
@@ -1104,6 +989,12 @@ func (o *Object) readMetaData() (err error) {
return o.decodeMetaDataFromPropertiesResponse(blobProperties)
}
// timeString returns modTime as the number of milliseconds
// elapsed since January 1, 1970 UTC as a decimal string.
func timeString(modTime time.Time) string {
return strconv.FormatInt(modTime.UnixNano()/1E6, 10)
}
// parseTimeString converts a decimal string number of milliseconds
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
// the modTime variable.

View File

@@ -17,12 +17,12 @@ type Error struct {
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
}
// Error satisfies the error interface
// Error statisfies the error interface
func (e *Error) Error() string {
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
}
// Fatal satisfies the Fatal interface
// Fatal statisfies the Fatal interface
//
// It indicates which errors should be treated as fatal
func (e *Error) Fatal() bool {
@@ -100,7 +100,7 @@ func RemoveVersion(remote string) (t Timestamp, newRemote string) {
return Timestamp(newT), base[:versionStart] + ext
}
// IsZero returns true if the timestamp is uninitialized
// IsZero returns true if the timestamp is unitialised
func (t Timestamp) IsZero() bool {
return time.Time(t).IsZero()
}
@@ -136,7 +136,6 @@ type AuthorizeAccountResponse struct {
AccountID string `json:"accountId"` // The identifier for the account.
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
} `json:"allowed"`

View File

@@ -108,7 +108,7 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
Files above this size will be uploaded in chunks of "--b2-chunk-size".
This value should be set no larger than 4.657GiB (== 5GB).`,
Default: defaultUploadCutoff,
Default: fs.SizeSuffix(defaultUploadCutoff),
Advanced: true,
}, {
Name: "chunk_size",
@@ -117,21 +117,8 @@ This value should be set no larger than 4.657GiB (== 5GB).`,
When uploading large files, chunk the file into this size. Note that
these chunks are buffered in memory and there might a maximum of
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
minimum size.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files`,
Default: false,
Advanced: true,
}, {
Name: "download_url",
Help: `Custom endpoint for downloads.
This is usually set to a Cloudflare CDN URL as Backblaze offers
free egress for data downloaded through the Cloudflare network.
Leave blank if you want to use the endpoint provided by Backblaze.`,
minimim size.`,
Default: fs.SizeSuffix(defaultChunkSize),
Advanced: true,
}},
})
@@ -139,16 +126,14 @@ Leave blank if you want to use the endpoint provided by Backblaze.`,
// Options defines the configuration for this backend
type Options struct {
Account string `config:"account"`
Key string `config:"key"`
Endpoint string `config:"endpoint"`
TestMode string `config:"test_mode"`
Versions bool `config:"versions"`
HardDelete bool `config:"hard_delete"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableCheckSum bool `config:"disable_checksum"`
DownloadURL string `config:"download_url"`
Account string `config:"account"`
Key string `config:"key"`
Endpoint string `config:"endpoint"`
TestMode string `config:"test_mode"`
Versions bool `config:"versions"`
HardDelete bool `config:"hard_delete"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
}
// Fs represents a remote b2 server
@@ -328,7 +313,7 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
return
}
// NewFs constructs an Fs from the path, bucket:path
// NewFs contstructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
@@ -383,13 +368,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
// If this is a key limited to a single bucket, it must exist already
if f.bucket != "" && f.info.Allowed.BucketID != "" {
allowedBucket := f.info.Allowed.BucketName
if allowedBucket == "" {
return nil, errors.New("bucket that application key is restricted to no longer exists")
}
if allowedBucket != f.bucket {
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
}
f.markBucketOK()
f.setBucketID(f.info.Allowed.BucketID)
}
@@ -1002,12 +980,6 @@ func (f *Fs) purge(oldOnly bool) error {
errReturn = err
}
}
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
if time.Since(time.Time(timestamp)).Hours() > 24 {
return true
}
return false
}
// Delete Config.Transfers in parallel
toBeDeleted := make(chan *api.File, fs.Config.Transfers)
@@ -1031,9 +1003,6 @@ func (f *Fs) purge(oldOnly bool) error {
if object.Action == "hide" {
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
toBeDeleted <- object
} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
toBeDeleted <- object
} else {
fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
}
@@ -1305,17 +1274,9 @@ var _ io.ReadCloser = &openFile{}
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: o.fs.info.DownloadURL,
Options: options,
}
// Use downloadUrl from backblaze if downloadUrl is not set
// otherwise use the custom downloadUrl
if o.fs.opt.DownloadURL == "" {
opts.RootURL = o.fs.info.DownloadURL
} else {
opts.RootURL = o.fs.opt.DownloadURL
}
// Download by id if set otherwise by name
if o.id != "" {
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
@@ -1476,7 +1437,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
// Content-Type b2/x-auto to automatically set the stored Content-Type
// post upload. In the case where a file extension is absent or the
// lookup fails, the Content-Type is set to application/octet-stream. The
// Content-Type mappings can be pursued here.
// Content-Type mappings can be purused here.
//
// X-Bz-Content-Sha1
// required
@@ -1523,6 +1484,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
},
ContentLength: &size,
}
// for go1.8 (see release notes) we must nil the Body if we want a
// "Content-Length: 0" header which b2 requires for all files.
if size == 0 {
opts.Body = nil
}
var response api.FileInfo
// Don't retry, return a retry error instead
err = o.fs.pacer.CallNoRetry(func() (bool, error) {

View File

@@ -116,10 +116,8 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
},
}
// Set the SHA1 if known
if !o.fs.opt.DisableCheckSum {
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
}
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
}
var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) {

View File

@@ -45,7 +45,7 @@ type Error struct {
RequestID string `json:"request_id"`
}
// Error returns a string for the error and satisfies the error interface
// Error returns a string for the error and statistifes the error interface
func (e *Error) Error() string {
out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status)
if e.Message != "" {
@@ -57,7 +57,7 @@ func (e *Error) Error() string {
return out
}
// Check Error satisfies the error interface
// Check Error statisfies the error interface
var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo

View File

@@ -171,13 +171,13 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
authRetry := false
authRety := false
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
authRetry = true
authRety = true
fs.Debugf(nil, "Should retry: %v", err)
}
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// substitute reserved characters for box
@@ -530,10 +530,10 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil)
switch err {
case nil:
return existingObj, existingObj.Update(in, src, options...)
return exisitingObj, exisitingObj.Update(in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(in, src)

View File

@@ -211,8 +211,8 @@ outer:
}
reqSize := remaining
if reqSize >= chunkSize {
reqSize = chunkSize
if reqSize >= int64(chunkSize) {
reqSize = int64(chunkSize)
}
// Make a block of memory

View File

@@ -576,7 +576,7 @@ The slice indices are similar to Python slices: start[:end]
start is the 0 based chunk number from the beginning of the file
to fetch inclusive. end is 0 based chunk number from the beginning
of the file to fetch exclusive.
of the file to fetch exclisive.
Both values can be negative, in which case they count from the back
of the file. The value "-5:" represents the last 5 chunks of a file.
@@ -870,7 +870,7 @@ func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) {
}
}
// ChangeNotify can subscribe multiple callers
// ChangeNotify can subsribe multiple callers
// this is coupled with the wrapped fs ChangeNotify (if it supports it)
// and also notifies other caches (i.e VFS) to clear out whenever something changes
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
@@ -1549,7 +1549,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
}
if srcObj.isTempFile() {
// we check if the feature is still active
// we check if the feature is stil active
if f.opt.TempWritePath == "" {
fs.Errorf(srcObj, "can't copy - this is a local cached file but this feature is turned off this run")
return nil, fs.ErrorCantCopy
@@ -1625,7 +1625,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
// if this is a temp object then we perform the changes locally
if srcObj.isTempFile() {
// we check if the feature is still active
// we check if the feature is stil active
if f.opt.TempWritePath == "" {
fs.Errorf(srcObj, "can't move - this is a local cached file but this feature is turned off this run")
return nil, fs.ErrorCantMove

View File

@@ -387,10 +387,10 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
// write the object
o := runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
require.Equal(t, o.Size(), testSize)
require.Equal(t, o.Size(), int64(testSize))
time.Sleep(time.Second * 3)
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, int64(testSize), false)
require.NoError(t, err)
require.Equal(t, int64(len(checkSample)), o.Size())
@@ -726,7 +726,6 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
// Call the rc function
m, err := cacheExpire.Fn(rc.Params{"remote": "data.bin"})
require.NoError(t, err)
require.Contains(t, m, "status")
require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"])
@@ -736,21 +735,18 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
co, err = rootFs.NewObject("data.bin")
require.NoError(t, err)
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
_, err = runInstance.list(t, rootFs, "")
require.NoError(t, err)
li1, err := runInstance.list(t, rootFs, "")
// create some rand test data
testData2 := randStringBytes(int(chunkSize))
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
// list should have 1 item only
li1, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
li1, err = runInstance.list(t, rootFs, "")
require.Len(t, li1, 1)
// Call the rc function
m, err = cacheExpire.Fn(rc.Params{"remote": "/"})
require.NoError(t, err)
require.Contains(t, m, "status")
require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"])
@@ -758,7 +754,6 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
// list should have 2 items now
li2, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
require.Len(t, li2, 2)
}
@@ -1495,8 +1490,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
var err error
if r.useMount {
var f *os.File
f, err = os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
f, err := os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return err
}
@@ -1506,8 +1500,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
}()
_, err = f.WriteString(data + append)
} else {
var obj1 fs.Object
obj1, err = rootFs.NewObject(src)
obj1, err := rootFs.NewObject(src)
if err != nil {
return err
}
@@ -1639,13 +1632,15 @@ func (r *run) getCacheFs(f fs.Fs) (*cache.Fs, error) {
cfs, ok := f.(*cache.Fs)
if ok {
return cfs, nil
}
if f.Features().UnWrap != nil {
cfs, ok := f.Features().UnWrap().(*cache.Fs)
if ok {
return cfs, nil
} else {
if f.Features().UnWrap != nil {
cfs, ok := f.Features().UnWrap().(*cache.Fs)
if ok {
return cfs, nil
}
}
}
return nil, errors.New("didn't found a cache fs")
}

View File

@@ -15,7 +15,7 @@ import (
"time"
"github.com/ncw/rclone/fs"
cache "github.com/patrickmn/go-cache"
"github.com/patrickmn/go-cache"
"golang.org/x/net/websocket"
)

View File

@@ -8,7 +8,7 @@ import (
"time"
"github.com/ncw/rclone/fs"
cache "github.com/patrickmn/go-cache"
"github.com/patrickmn/go-cache"
"github.com/pkg/errors"
)

View File

@@ -398,7 +398,7 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
}
err = bucket.Put([]byte(cachedObject.Name), encoded)
err = bucket.Put([]byte(cachedObject.Name), []byte(encoded))
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
}
@@ -809,7 +809,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
}
err = bucket.Put([]byte(destPath), encoded)
err = bucket.Put([]byte(destPath), []byte(encoded))
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
}
@@ -1049,7 +1049,7 @@ func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
}
err = bucket.Put([]byte(destPath), encoded)
err = bucket.Put([]byte(destPath), []byte(encoded))
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
}

View File

@@ -41,7 +41,6 @@ var (
ErrorBadDecryptControlChar = errors.New("bad decryption - contains control chars")
ErrorNotAMultipleOfBlocksize = errors.New("not a multiple of blocksize")
ErrorTooShortAfterDecode = errors.New("too short after base32 decode")
ErrorTooLongAfterDecode = errors.New("too long after base32 decode")
ErrorEncryptedFileTooShort = errors.New("file is too short to be encrypted")
ErrorEncryptedFileBadHeader = errors.New("file has truncated block header")
ErrorEncryptedBadMagic = errors.New("not an encrypted file - bad magic string")
@@ -285,9 +284,6 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
// not possible if decodeFilename() working correctly
return "", ErrorTooShortAfterDecode
}
if len(rawCiphertext) > 2048 {
return "", ErrorTooLongAfterDecode
}
paddedPlaintext := eme.Transform(c.block, c.nameTweak[:], rawCiphertext, eme.DirectionDecrypt)
plaintext, err := pkcs7.Unpad(nameCipherBlockSize, paddedPlaintext)
if err != nil {
@@ -463,7 +459,7 @@ func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
if int(newRune) < base {
newRune += 256
}
_, _ = result.WriteRune(newRune)
_, _ = result.WriteRune(rune(newRune))
default:
_, _ = result.WriteRune(runeValue)
@@ -748,7 +744,7 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
if !bytes.Equal(readBuf[:fileMagicSize], fileMagicBytes) {
return nil, fh.finishAndClose(ErrorEncryptedBadMagic)
}
// retrieve the nonce
// retreive the nonce
fh.nonce.fromBuf(readBuf[fileMagicSize:])
fh.initialNonce = fh.nonce
return fh, nil

View File

@@ -194,10 +194,6 @@ func TestEncryptSegment(t *testing.T) {
func TestDecryptSegment(t *testing.T) {
// We've tested the forwards above, now concentrate on the errors
longName := make([]byte, 3328)
for i := range longName {
longName[i] = 'a'
}
c, _ := newCipher(NameEncryptionStandard, "", "", true)
for _, test := range []struct {
in string
@@ -205,7 +201,6 @@ func TestDecryptSegment(t *testing.T) {
}{
{"64=", ErrorBadBase32Encoding},
{"!", base32.CorruptInputError(0)},
{string(longName), ErrorTooLongAfterDecode},
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},

View File

@@ -122,7 +122,7 @@ func NewCipher(m configmap.Mapper) (Cipher, error) {
return newCipherForConfig(opt)
}
// NewFs constructs an Fs from the path, container:path
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
@@ -555,7 +555,7 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
}
// ComputeHash takes the nonce from o, and encrypts the contents of
// src with it, and calculates the hash given by HashType on the fly
// src with it, and calcuates the hash given by HashType on the fly
//
// Note that we break lots of encapsulation in this function.
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {

View File

@@ -1,7 +1,4 @@
// Package drive interfaces with the Google Drive object storage system
// +build go1.9
package drive
// FIXME need to deal with some corner cases
@@ -21,7 +18,6 @@ import (
"net/url"
"os"
"path"
"sort"
"strconv"
"strings"
"sync"
@@ -40,7 +36,6 @@ import (
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
@@ -56,8 +51,7 @@ const (
driveFolderType = "application/vnd.google-apps.folder"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
defaultMinSleep = fs.Duration(100 * time.Millisecond)
defaultBurst = 100
minSleep = 10 * time.Millisecond
defaultExportExtensions = "docx,xlsx,pptx,svg"
scopePrefix = "https://www.googleapis.com/auth/"
defaultScope = "drive"
@@ -128,29 +122,6 @@ var (
_linkTemplates map[string]*template.Template // available link types
)
// Parse the scopes option returning a slice of scopes
func driveScopes(scopesString string) (scopes []string) {
if scopesString == "" {
scopesString = defaultScope
}
for _, scope := range strings.Split(scopesString, ",") {
scope = strings.TrimSpace(scope)
scopes = append(scopes, scopePrefix+scope)
}
return scopes
}
// Returns true if one of the scopes was "drive.appfolder"
func driveScopesContainsAppFolder(scopes []string) bool {
for _, scope := range scopes {
if scope == scopePrefix+"drive.appfolder" {
return true
}
}
return false
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -165,14 +136,18 @@ func init() {
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
return
}
// Fill in the scopes
driveConfig.Scopes = driveScopes(opt.Scope)
// Set the root_folder_id if using drive.appfolder
if driveScopesContainsAppFolder(driveConfig.Scopes) {
m.Set("root_folder_id", "appDataFolder")
if opt.Scope == "" {
opt.Scope = defaultScope
}
driveConfig.Scopes = nil
for _, scope := range strings.Split(opt.Scope, ",") {
driveConfig.Scopes = append(driveConfig.Scopes, scopePrefix+strings.TrimSpace(scope))
// Set the root_folder_id if using drive.appfolder
if scope == "drive.appfolder" {
m.Set("root_folder_id", "appDataFolder")
}
}
if opt.ServiceAccountFile == "" {
err = oauthutil.Config("drive", name, m, driveConfig)
if err != nil {
@@ -359,16 +334,6 @@ will download it anyway.`,
Default: fs.SizeSuffix(-1),
Help: "If Object's are greater, use drive v2 API to download.",
Advanced: true,
}, {
Name: "pacer_min_sleep",
Default: defaultMinSleep,
Help: "Minimum time to sleep between API calls.",
Advanced: true,
}, {
Name: "pacer_burst",
Default: defaultBurst,
Help: "Number of API calls to allow without sleeping.",
Advanced: true,
}},
})
@@ -411,8 +376,6 @@ type Options struct {
AcknowledgeAbuse bool `config:"acknowledge_abuse"`
KeepRevisionForever bool `config:"keep_revision_forever"`
V2DownloadMinSize fs.SizeSuffix `config:"v2_download_min_size"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
PacerBurst int `config:"pacer_burst"`
}
// Fs represents a remote drive server
@@ -482,7 +445,7 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// shouldRetry determines whether a given err rates being retried
// shouldRetry determines whehter a given err rates being retried
func shouldRetry(err error) (bool, error) {
if err == nil {
return false, nil
@@ -733,16 +696,12 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
// Figure out if the user wants to use a team drive
func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
// Stop if we are running non-interactive config
if fs.Config.AutoConfirm {
return nil
}
if opt.TeamDriveID == "" {
fmt.Printf("Configure this as a team drive?\n")
} else {
fmt.Printf("Change current team drive ID %q?\n", opt.TeamDriveID)
}
if !config.Confirm() {
if !config.ConfirmWithDefault(false) {
return nil
}
client, err := createOAuthClient(opt, name, m)
@@ -759,7 +718,7 @@ func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
listFailed := false
for {
var teamDrives *drive.TeamDriveList
err = newPacer(opt).Call(func() (bool, error) {
err = newPacer().Call(func() (bool, error) {
teamDrives, err = listTeamDrives.Do()
return shouldRetry(err)
})
@@ -789,13 +748,12 @@ func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
}
// newPacer makes a pacer configured for drive
func newPacer(opt *Options) *pacer.Pacer {
return pacer.New().SetMinSleep(time.Duration(opt.PacerMinSleep)).SetBurst(opt.PacerBurst).SetPacer(pacer.GoogleDrivePacer)
func newPacer() *pacer.Pacer {
return pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer)
}
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
scopes := driveScopes(opt.Scope)
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
conf, err := google.JWTConfigFromJSON(credentialsData, driveConfig.Scopes...)
if err != nil {
return nil, errors.Wrap(err, "error processing credentials")
}
@@ -863,7 +821,7 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
return
}
// NewFs constructs an Fs from the path, container:path
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
@@ -894,7 +852,7 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
name: name,
root: root,
opt: *opt,
pacer: newPacer(opt),
pacer: newPacer(),
}
f.isTeamDrive = opt.TeamDriveID != ""
f.features = (&fs.Features{
@@ -1340,46 +1298,17 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
return entries, nil
}
// listREntry is a task to be executed by a litRRunner
type listREntry struct {
id, path string
}
// listRSlices is a helper struct to sort two slices at once
type listRSlices struct {
dirs []string
paths []string
}
func (s listRSlices) Sort() {
sort.Sort(s)
}
func (s listRSlices) Len() int {
return len(s.dirs)
}
func (s listRSlices) Swap(i, j int) {
s.dirs[i], s.dirs[j] = s.dirs[j], s.dirs[i]
s.paths[i], s.paths[j] = s.paths[j], s.paths[i]
}
func (s listRSlices) Less(i, j int) bool {
return s.dirs[i] < s.dirs[j]
}
// listRRunner will read dirIDs from the in channel, perform the file listing an call cb with each DirEntry.
//
// In each cycle it will read up to grouping entries from the in channel without blocking.
// In each cycle, will wait up to 10ms to read up to grouping entries from the in channel.
// If an error occurs it will be send to the out channel and then return. Once the in channel is closed,
// nil is send to the out channel and the function returns.
func (f *Fs) listRRunner(wg *sync.WaitGroup, in <-chan listREntry, out chan<- error, cb func(fs.DirEntry) error, grouping int) {
func (f *Fs) listRRunner(wg *sync.WaitGroup, in <-chan string, out chan<- error, cb func(fs.DirEntry) error, grouping int) {
var dirs []string
var paths []string
for dir := range in {
dirs = append(dirs[:0], dir.id)
paths = append(paths[:0], dir.path)
dirs = append(dirs[:0], dir)
wait := time.After(10 * time.Millisecond)
waitloop:
for i := 1; i < grouping; i++ {
select {
@@ -1387,32 +1316,31 @@ func (f *Fs) listRRunner(wg *sync.WaitGroup, in <-chan listREntry, out chan<- er
if !ok {
break waitloop
}
dirs = append(dirs, d.id)
paths = append(paths, d.path)
default:
dirs = append(dirs, d)
case <-wait:
break waitloop
}
}
listRSlices{dirs, paths}.Sort()
var iErr error
_, err := f.list(dirs, "", false, false, false, func(item *drive.File) bool {
for _, parent := range item.Parents {
// only handle parents that are in the requested dirs list
i := sort.SearchStrings(dirs, parent)
if i == len(dirs) || dirs[i] != parent {
continue
}
remote := path.Join(paths[i], item.Name)
entry, err := f.itemToDirEntry(remote, item)
if err != nil {
iErr = err
return true
parentPath := ""
if len(item.Parents) > 0 {
p, ok := f.dirCache.GetInv(item.Parents[0])
if ok {
parentPath = p
}
}
remote := path.Join(parentPath, item.Name)
entry, err := f.itemToDirEntry(remote, item)
if err != nil {
iErr = err
return true
}
err = cb(entry)
if err != nil {
iErr = err
return true
}
err = cb(entry)
if err != nil {
iErr = err
return true
}
return false
})
@@ -1463,44 +1391,30 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
if err != nil {
return err
}
if directoryID == "root" {
var info *drive.File
err = f.pacer.CallNoRetry(func() (bool, error) {
info, err = f.svc.Files.Get("root").
Fields("id").
SupportsTeamDrives(f.isTeamDrive).
Do()
return shouldRetry(err)
})
if err != nil {
return err
}
directoryID = info.Id
}
mu := sync.Mutex{} // protects in and overflow
wg := sync.WaitGroup{}
in := make(chan listREntry, inputBuffer)
in := make(chan string, inputBuffer)
out := make(chan error, fs.Config.Checkers)
list := walk.NewListRHelper(callback)
overflow := []listREntry{}
overfflow := []string{}
cb := func(entry fs.DirEntry) error {
mu.Lock()
defer mu.Unlock()
if d, isDir := entry.(*fs.Dir); isDir && in != nil {
select {
case in <- listREntry{d.ID(), d.Remote()}:
case in <- d.ID():
wg.Add(1)
default:
overflow = append(overflow, listREntry{d.ID(), d.Remote()})
overfflow = append(overfflow, d.ID())
}
}
return list.Add(entry)
}
wg.Add(1)
in <- listREntry{directoryID, dir}
in <- directoryID
for i := 0; i < fs.Config.Checkers; i++ {
go f.listRRunner(&wg, in, out, cb, grouping)
@@ -1509,18 +1423,18 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
// wait until the all directories are processed
wg.Wait()
// if the input channel overflowed add the collected entries to the channel now
for len(overflow) > 0 {
for len(overfflow) > 0 {
mu.Lock()
l := len(overflow)
// only fill half of the channel to prevent entries beeing put into overflow again
l := len(overfflow)
// only fill half of the channel to prevent entries beeing put into overfflow again
if l > inputBuffer/2 {
l = inputBuffer / 2
}
wg.Add(l)
for _, d := range overflow[:l] {
for _, d := range overfflow[:l] {
in <- d
}
overflow = overflow[l:]
overfflow = overfflow[l:]
mu.Unlock()
// wait again for the completion of all directories
@@ -1711,14 +1625,14 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error {
return shouldRetry(err)
})
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir)
return errors.Wrapf(err, "MergDirs move failed on %q in %v", info.Name, srcDir)
}
}
// rmdir (into trash) the now empty source directory
fs.Infof(srcDir, "removing empty directory")
err = f.rmdir(srcDir.ID(), true)
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
return errors.Wrapf(err, "MergDirs move failed to rmdir %q", srcDir)
}
}
return nil
@@ -2137,7 +2051,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
//
// Automatically restarts itself in case of unexpected behavior of the remote.
// Automatically restarts itself in case of unexpected behaviour of the remote.
//
// Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
@@ -2244,13 +2158,11 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), startPage
// translate the parent dir of this object
if len(change.File.Parents) > 0 {
for _, parent := range change.File.Parents {
if parentPath, ok := f.dirCache.GetInv(parent); ok {
// and append the drive file name to compute the full file name
newPath := path.Join(parentPath, change.File.Name)
// this will now clear the actual file too
pathsToClear = append(pathsToClear, entryType{path: newPath, entryType: changeType})
}
if parentPath, ok := f.dirCache.GetInv(change.File.Parents[0]); ok {
// and append the drive file name to compute the full file name
newPath := path.Join(parentPath, change.File.Name)
// this will now clear the actual file too
pathsToClear = append(pathsToClear, entryType{path: newPath, entryType: changeType})
}
} else { // a true root object that is changed
pathsToClear = append(pathsToClear, entryType{path: change.File.Name, entryType: changeType})
@@ -2542,32 +2454,16 @@ func (o *documentObject) Open(options ...fs.OpenOption) (in io.ReadCloser, err e
// Update the size with what we are reading as it can change from
// the HEAD in the listing to this GET. This stops rclone marking
// the transfer as corrupted.
var offset, end int64 = 0, -1
var newOptions = options[:0]
for _, o := range options {
// Note that Range requests don't work on Google docs:
// https://developers.google.com/drive/v3/web/manage-downloads#partial_download
// So do a subset of them manually
switch x := o.(type) {
case *fs.RangeOption:
offset, end = x.Start, x.End
case *fs.SeekOption:
offset, end = x.Offset, -1
default:
newOptions = append(newOptions, o)
if _, ok := o.(*fs.RangeOption); ok {
return nil, errors.New("partial downloads are not supported while exporting Google Documents")
}
}
options = newOptions
if offset != 0 {
return nil, errors.New("partial downloads are not supported while exporting Google Documents")
}
in, err = o.baseObject.open(o.url, options...)
if in != nil {
in = &openDocumentFile{o: o, in: in}
}
if end >= 0 {
in = readers.NewLimitedReadCloser(in, end-offset+1)
}
return
}
func (o *linkObject) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
@@ -2633,9 +2529,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
return err
}
newO, err := o.fs.newObjectWithInfo(src.Remote(), info)
if err != nil {
return err
}
switch newO := newO.(type) {
case *Object:
*o = *newO
@@ -2674,9 +2567,6 @@ func (o *documentObject) Update(in io.Reader, src fs.ObjectInfo, options ...fs.O
remote = remote[:len(remote)-o.extLen]
newO, err := o.fs.newObjectWithInfo(remote, info)
if err != nil {
return err
}
switch newO := newO.(type) {
case *documentObject:
*o = *newO

View File

@@ -1,5 +1,3 @@
// +build go1.9
package drive
import (
@@ -22,31 +20,6 @@ import (
"google.golang.org/api/drive/v3"
)
func TestDriveScopes(t *testing.T) {
for _, test := range []struct {
in string
want []string
wantFlag bool
}{
{"", []string{
"https://www.googleapis.com/auth/drive",
}, false},
{" drive.file , drive.readonly", []string{
"https://www.googleapis.com/auth/drive.file",
"https://www.googleapis.com/auth/drive.readonly",
}, false},
{" drive.file , drive.appfolder", []string{
"https://www.googleapis.com/auth/drive.file",
"https://www.googleapis.com/auth/drive.appfolder",
}, true},
} {
got := driveScopes(test.in)
assert.Equal(t, test.want, got, test.in)
gotFlag := driveScopesContainsAppFolder(got)
assert.Equal(t, test.wantFlag, gotFlag, test.in)
}
}
/*
var additionalMimeTypes = map[string]string{
"application/vnd.ms-excel.sheet.macroenabled.12": ".xlsm",

View File

@@ -1,7 +1,4 @@
// Test Drive filesystem interface
// +build go1.9
package drive
import (

View File

@@ -1,6 +0,0 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !go1.9
package drive

View File

@@ -8,8 +8,6 @@
//
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
// +build go1.9
package drive
import (
@@ -185,7 +183,7 @@ func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunk
// been 200 OK.
//
// So parse the response out of the body. We aren't expecting
// any other 2xx codes, so we parse it unconditionally on
// any other 2xx codes, so we parse it unconditionaly on
// StatusCode
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
return 598, err

View File

@@ -31,7 +31,6 @@ import (
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
@@ -130,8 +129,8 @@ Any files larger than this will be uploaded in chunks of this size.
Note that chunks are buffered in memory (one at a time) so rclone can
deal with retries. Setting this larger will increase the speed
slightly (at most 10%% for 128MB in tests) at the cost of using more
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
Default: defaultChunkSize,
memory. It can be set smaller if you are tight on memory.`, fs.SizeSuffix(maxChunkSize)),
Default: fs.SizeSuffix(defaultChunkSize),
Advanced: true,
}, {
Name: "impersonate",
@@ -204,17 +203,8 @@ func shouldRetry(err error) (bool, error) {
return false, err
}
baseErrString := errors.Cause(err).Error()
// handle any official Retry-After header from Dropbox's SDK first
switch e := err.(type) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
time.Sleep(time.Duration(e.RateLimitError.RetryAfter) * time.Second)
}
return true, err
}
// Keep old behavior for backward compatibility
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
// FIXME there is probably a better way of doing this!
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
return true, err
}
return fserrors.ShouldRetry(err), err
@@ -239,7 +229,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
return
}
// NewFs constructs an Fs from the path, container:path
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)

View File

@@ -166,7 +166,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
f.poolMu.Unlock()
}
// NewFs constructs an Fs from the path, container:path
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
// Parse config into Options struct
@@ -646,21 +646,7 @@ func (f *ftpReadCloser) Read(p []byte) (n int, err error) {
// Close the FTP reader and return the connection to the pool
func (f *ftpReadCloser) Close() error {
var err error
errchan := make(chan error, 1)
go func() {
errchan <- f.rc.Close()
}()
// Wait for Close for up to 60 seconds
timer := time.NewTimer(60 * time.Second)
select {
case err = <-errchan:
timer.Stop()
case <-timer.C:
// if timer fired assume no error but connection dead
fs.Errorf(f.f, "Timeout when waiting for connection Close")
return nil
}
err := f.rc.Close()
// if errors while reading or closing, dump the connection
if err != nil || f.err != nil {
_ = f.c.Quit()

View File

@@ -1,7 +1,4 @@
// Package googlecloudstorage provides an interface to Google Cloud Storage
// +build go1.9
package googlecloudstorage
/*
@@ -162,36 +159,21 @@ func init() {
}, {
Value: "asia-east1",
Help: "Taiwan.",
}, {
Value: "asia-east2",
Help: "Hong Kong.",
}, {
Value: "asia-northeast1",
Help: "Tokyo.",
}, {
Value: "asia-south1",
Help: "Mumbai.",
}, {
Value: "asia-southeast1",
Help: "Singapore.",
}, {
Value: "australia-southeast1",
Help: "Sydney.",
}, {
Value: "europe-north1",
Help: "Finland.",
}, {
Value: "europe-west1",
Help: "Belgium.",
}, {
Value: "europe-west2",
Help: "London.",
}, {
Value: "europe-west3",
Help: "Frankfurt.",
}, {
Value: "europe-west4",
Help: "Netherlands.",
}, {
Value: "us-central1",
Help: "Iowa.",
@@ -204,9 +186,6 @@ func init() {
}, {
Value: "us-west1",
Help: "Oregon.",
}, {
Value: "us-west2",
Help: "California.",
}},
}, {
Name: "storage_class",
@@ -300,7 +279,7 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// shouldRetry determines whether a given err rates being retried
// shouldRetry determines whehter a given err rates being retried
func shouldRetry(err error) (again bool, errOut error) {
again = false
if err != nil {
@@ -348,7 +327,7 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
}
// NewFs constructs an Fs from the path, bucket:path
// NewFs contstructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
var oAuthClient *http.Client

View File

@@ -1,7 +1,4 @@
// Test GoogleCloudStorage filesystem interface
// +build go1.9
package googlecloudstorage_test
import (

View File

@@ -1,6 +0,0 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !go1.9
package googlecloudstorage

View File

@@ -40,9 +40,6 @@ func init() {
Examples: []fs.OptionExample{{
Value: "https://example.com",
Help: "Connect to example.com",
}, {
Value: "https://user:pass@example.com",
Help: "Connect to example.com using a username and password",
}},
}},
}
@@ -196,7 +193,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
}
err := o.stat()
if err != nil {
return nil, err
return nil, errors.Wrap(err, "Stat failed")
}
return o, nil
}
@@ -251,7 +248,7 @@ func parseName(base *url.URL, name string) (string, error) {
}
// calculate the name relative to the base
name = u.Path[len(base.Path):]
// mustn't be empty
// musn't be empty
if name == "" {
return "", errNameIsEmpty
}
@@ -419,9 +416,6 @@ func (o *Object) url() string {
func (o *Object) stat() error {
url := o.url()
res, err := o.fs.httpClient.Head(url)
if err == nil && res.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
err = statusError(res, err)
if err != nil {
return errors.Wrap(err, "failed to stat")

View File

@@ -144,11 +144,6 @@ func TestNewObject(t *testing.T) {
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
// check object not found
o, err = f.NewObject("not found.txt")
assert.Nil(t, o)
assert.Equal(t, fs.ErrorObjectNotFound, err)
}
func TestOpen(t *testing.T) {

View File

@@ -9,10 +9,8 @@ package hubic
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strings"
"time"
"github.com/ncw/rclone/backend/swift"
@@ -126,9 +124,7 @@ func (f *Fs) getCredentials() (err error) {
}
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode < 200 || resp.StatusCode > 299 {
body, _ := ioutil.ReadAll(resp.Body)
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
return errors.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
return errors.Errorf("failed to get credentials: %s", resp.Status)
}
decoder := json.NewDecoder(resp.Body)
var result credentials

View File

@@ -9,10 +9,7 @@ import (
)
const (
// default time format for almost all request and responses
timeFormat = "2006-01-02-T15:04:05Z0700"
// the API server seems to use a different format
apiTimeFormat = "2006-01-02T15:04:05Z07:00"
)
// Time represents time values in the Jottacloud API. It uses a custom RFC3339 like format.
@@ -43,9 +40,6 @@ func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
// Return Time string in Jottacloud format
func (t Time) String() string { return time.Time(t).Format(timeFormat) }
// APIString returns Time string in Jottacloud API format
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
// Flag is a hacky type for checking if an attribute is present
type Flag bool
@@ -64,15 +58,6 @@ func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
return attr, errors.New("unimplemented")
}
// TokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
type TokenJSON struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
}
/*
GET http://www.jottacloud.com/JFS/<account>
@@ -280,37 +265,3 @@ func (e *Error) Error() string {
}
return out
}
// AllocateFileRequest to prepare an upload to Jottacloud
type AllocateFileRequest struct {
Bytes int64 `json:"bytes"`
Created string `json:"created"`
Md5 string `json:"md5"`
Modified string `json:"modified"`
Path string `json:"path"`
}
// AllocateFileResponse for upload requests
type AllocateFileResponse struct {
Name string `json:"name"`
Path string `json:"path"`
State string `json:"state"`
UploadID string `json:"upload_id"`
UploadURL string `json:"upload_url"`
Bytes int64 `json:"bytes"`
ResumePos int64 `json:"resume_pos"`
}
// UploadResponse after an upload
type UploadResponse struct {
Name string `json:"name"`
Path string `json:"path"`
Kind string `json:"kind"`
ContentID string `json:"content_id"`
Bytes int64 `json:"bytes"`
Md5 string `json:"md5"`
Created int64 `json:"created"`
Modified int64 `json:"modified"`
Deleted interface{} `json:"deleted"`
Mime string `json:"mime"`
}

View File

@@ -7,7 +7,6 @@ import (
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
@@ -27,41 +26,22 @@ import (
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
"golang.org/x/oauth2"
)
// Globals
const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta"
defaultMountpoint = "Sync" // nolint
rootURL = "https://www.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/files/v1/"
baseURL = "https://www.jottacloud.com/"
tokenURL = "https://api.jottacloud.com/auth/v1/token"
cachePrefix = "rclone-jcmd5-"
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
configUsername = "user"
)
var (
// Description of how to auth for this app for a personal account
oauthConfig = &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: tokenURL,
TokenURL: tokenURL,
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta"
defaultMountpoint = "Sync"
rootURL = "https://www.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com"
shareURL = "https://www.jottacloud.com/"
cachePrefix = "rclone-jcmd5-"
)
// Register with Fs
@@ -70,71 +50,13 @@ func init() {
Name: "jottacloud",
Description: "JottaCloud",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.Confirm() {
return
}
}
username, ok := m.Get(configUsername)
if !ok {
log.Fatalf("No username defined")
}
password := config.GetPassword("Your Jottacloud password is only required during config and will not be stored.")
// prepare out token request with username and password
srv := rest.NewClient(fshttp.NewClient(fs.Config))
values := url.Values{}
values.Set("grant_type", "PASSWORD")
values.Set("password", password)
values.Set("username", username)
values.Set("client_id", oauthConfig.ClientID)
values.Set("client_secret", oauthConfig.ClientSecret)
opts := rest.Opts{
Method: "POST",
RootURL: oauthConfig.Endpoint.AuthURL,
ContentType: "application/x-www-form-urlencoded",
Parameters: values,
}
var jsonToken api.TokenJSON
resp, err := srv.CallJSON(&opts, nil, &jsonToken)
if err != nil {
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
if resp != nil {
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
fmt.Printf("This account has 2 factor authentication enabled you will receive a verification code via SMS.\n")
fmt.Printf("Enter verification code> ")
authCode := config.ReadLine()
authCode = strings.Replace(authCode, "-", "", -1) // the sms received contains a pair of 3 digit numbers seperated by '-' but wants a single 6 digit number
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
resp, err = srv.CallJSON(&opts, nil, &jsonToken)
}
}
if err != nil {
log.Fatalf("Failed to get resource token: %v", err)
}
}
var token oauth2.Token
token.AccessToken = jsonToken.AccessToken
token.RefreshToken = jsonToken.RefreshToken
token.TokenType = jsonToken.TokenType
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
// finally save them in the config
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
log.Fatalf("Error while setting token: %s", err)
}
},
Options: []fs.Option{{
Name: configUsername,
Help: "User Name:",
Name: "user",
Help: "User Name",
}, {
Name: "pass",
Help: "Password.",
IsPassword: true,
}, {
Name: "mountpoint",
Help: "The mountpoint to use.",
@@ -161,11 +83,6 @@ func init() {
Help: "Remove existing public link to file/folder with link command rather than creating.\nDefault is false, meaning link command will create or retrieve public link.",
Default: false,
Advanced: true,
}, {
Name: "upload_resume_limit",
Help: "Files bigger than this can be resumed if the upload fail's.",
Default: fs.SizeSuffix(10 * 1024 * 1024),
Advanced: true,
}},
})
}
@@ -173,25 +90,23 @@ func init() {
// Options defines the configuration for this backend
type Options struct {
User string `config:"user"`
Pass string `config:"pass"`
Mountpoint string `config:"mountpoint"`
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
HardDelete bool `config:"hard_delete"`
Unlink bool `config:"unlink"`
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
}
// Fs represents a remote jottacloud
type Fs struct {
name string
root string
user string
opt Options
features *fs.Features
endpointURL string
srv *rest.Client
apiSrv *rest.Client
pacer *pacer.Pacer
tokenRenewer *oauthutil.Renew // renew the token on expiry
name string
root string
user string
opt Options
features *fs.Features
endpointURL string
srv *rest.Client
pacer *pacer.Pacer
}
// Object describes a jottacloud object
@@ -346,29 +261,6 @@ func (o *Object) filePath() string {
return o.fs.filePath(o.remote)
}
// Jottacloud requires the grant_type 'refresh_token' string
// to be uppercase and throws a 400 Bad Request if we use the
// lower case used by the oauth2 module
//
// This filter catches all refresh requests, reads the body,
// changes the case and then sends it on
func grantTypeFilter(req *http.Request) {
if tokenURL == req.URL.String() {
// read the entire body
refreshBody, err := ioutil.ReadAll(req.Body)
if err != nil {
return
}
_ = req.Body.Close()
// make the refresh token upper case
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
// set the new ReadCloser (with a dummy Close())
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
}
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
@@ -381,29 +273,25 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
rootIsDir := strings.HasSuffix(root, "/")
root = parsePath(root)
// the oauth client for the api servers needs
// a filter to fix the grant_type issues (see above)
baseClient := fshttp.NewClient(fs.Config)
if do, ok := baseClient.Transport.(interface {
SetRequestFilter(f func(req *http.Request))
}); ok {
do.SetRequestFilter(grantTypeFilter)
} else {
fs.Debugf(name+":", "Couldn't add request filter - uploads will fail")
}
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
user := config.FileGet(name, "user")
pass := config.FileGet(name, "pass")
if opt.Pass != "" {
var err error
opt.Pass, err = obscure.Reveal(opt.Pass)
if err != nil {
return nil, errors.Wrap(err, "couldn't decrypt password")
}
}
f := &Fs{
name: name,
root: root,
user: opt.User,
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
name: name,
root: root,
user: opt.User,
opt: *opt,
//endpointURL: rest.URLPathEscape(path.Join(user, defaultDevice, opt.Mountpoint)),
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(rootURL),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
}
f.features = (&fs.Features{
CaseInsensitive: true,
@@ -411,13 +299,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ReadMimeType: true,
WriteMimeType: true,
}).Fill(f)
f.srv.SetErrorHandler(errorHandler)
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath("")
return err
})
if user == "" || pass == "" {
return nil, errors.New("jottacloud needs user and password")
}
f.srv.SetUserPass(opt.User, opt.Pass)
f.srv.SetErrorHandler(errorHandler)
err = f.setEndpointURL(opt.Mountpoint)
if err != nil {
@@ -443,6 +331,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
@@ -459,7 +348,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, e
// Set info
err = o.setMetaData(info)
} else {
err = o.readMetaData(false) // reads info and meta, returning an error
err = o.readMetaData() // reads info and meta, returning an error
}
if err != nil {
return nil, err
@@ -507,7 +396,7 @@ func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
//fmt.Printf("List: %s\n", f.filePath(dir))
//fmt.Printf("List: %s\n", dir)
opts := rest.Opts{
Method: "GET",
Path: f.filePath(dir),
@@ -769,7 +658,7 @@ func (f *Fs) Purge() error {
return f.purgeCheck("", false)
}
// copyOrMoves copies or moves directories or files depending on the method parameter
// copyOrMoves copys or moves directories or files depending on the mthod parameter
func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err error) {
opts := rest.Opts{
Method: "POST",
@@ -787,6 +676,7 @@ func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err erro
if err != nil {
return nil, err
}
return info, nil
}
@@ -934,7 +824,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
if result.PublicSharePath == "" {
return "", errors.New("couldn't create public link - no link path received")
}
link = path.Join(baseURL, result.PublicSharePath)
link = path.Join(shareURL, result.PublicSharePath)
return link, nil
}
@@ -990,7 +880,7 @@ func (o *Object) Hash(t hash.Type) (string, error) {
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
err := o.readMetaData(false)
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return 0
@@ -1006,24 +896,21 @@ func (o *Object) MimeType() string {
// setMetaData sets the metadata from info
func (o *Object) setMetaData(info *api.JottaFile) (err error) {
o.hasMetaData = true
o.size = info.Size
o.size = int64(info.Size)
o.md5 = info.MD5
o.mimeType = info.MimeType
o.modTime = time.Time(info.ModifiedAt)
return nil
}
func (o *Object) readMetaData(force bool) (err error) {
if o.hasMetaData && !force {
func (o *Object) readMetaData() (err error) {
if o.hasMetaData {
return nil
}
info, err := o.fs.readMetaDataForPath(o.remote)
if err != nil {
return err
}
if info.Deleted {
return fs.ErrorObjectNotFound
}
return o.setMetaData(info)
}
@@ -1032,7 +919,7 @@ func (o *Object) readMetaData(force bool) (err error) {
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime() time.Time {
err := o.readMetaData(false)
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
@@ -1080,7 +967,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader, cleanup func(), err error) {
// we need a MD5
md5Hasher := md5.New()
// use the teeReader to write to the local file AND calculate the MD5 while doing so
// use the teeReader to write to the local file AND caclulate the MD5 while doing so
teeReader := io.TeeReader(in, md5Hasher)
// nothing to clean up by default
@@ -1153,74 +1040,43 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
in = wrap(in)
}
// use the api to allocate the file first and get resume / deduplication info
var resp *http.Response
var result api.JottaFile
opts := rest.Opts{
Method: "POST",
Path: "allocate",
ExtraHeaders: make(map[string]string),
}
fileDate := api.Time(src.ModTime()).APIString()
// the allocate request
var request = api.AllocateFileRequest{
Bytes: size,
Created: fileDate,
Modified: fileDate,
Md5: md5String,
Path: path.Join(o.fs.opt.Mountpoint, replaceReservedChars(path.Join(o.fs.root, o.remote))),
Method: "POST",
Path: o.filePath(),
Body: in,
ContentType: fs.MimeType(src),
ContentLength: &size,
ExtraHeaders: make(map[string]string),
Parameters: url.Values{},
}
// send it
var response api.AllocateFileResponse
opts.ExtraHeaders["JMd5"] = md5String
opts.Parameters.Set("cphash", md5String)
opts.ExtraHeaders["JSize"] = strconv.FormatInt(size, 10)
// opts.ExtraHeaders["JCreated"] = api.Time(src.ModTime()).String()
opts.ExtraHeaders["JModified"] = api.Time(src.ModTime()).String()
// Parameters observed in other implementations
//opts.ExtraHeaders["X-Jfs-DeviceName"] = "Jotta"
//opts.ExtraHeaders["X-Jfs-Devicename-Base64"] = ""
//opts.ExtraHeaders["X-Jftp-Version"] = "2.4" this appears to be the current version
//opts.ExtraHeaders["jx_csid"] = ""
//opts.ExtraHeaders["jx_lisence"] = ""
opts.Parameters.Set("umode", "nomultipart")
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.apiSrv.CallJSON(&opts, &request, &response)
resp, err = o.fs.srv.CallXML(&opts, nil, &result)
return shouldRetry(resp, err)
})
if err != nil {
return err
}
// If the file state is INCOMPLETE and CORRPUT, try to upload a then
if response.State != "COMPLETED" {
// how much do we still have to upload?
remainingBytes := size - response.ResumePos
opts = rest.Opts{
Method: "POST",
RootURL: response.UploadURL,
ContentLength: &remainingBytes,
ContentType: "application/octet-stream",
Body: in,
ExtraHeaders: make(map[string]string),
}
if response.ResumePos != 0 {
opts.ExtraHeaders["Range"] = "bytes=" + strconv.FormatInt(response.ResumePos, 10) + "-" + strconv.FormatInt(size-1, 10)
}
// copy the already uploaded bytes into the trash :)
var result api.UploadResponse
_, err = io.CopyN(ioutil.Discard, in, response.ResumePos)
if err != nil {
return err
}
// send the remaining bytes
resp, err = o.fs.apiSrv.CallJSON(&opts, nil, &result)
if err != nil {
return err
}
// finally update the meta data
o.hasMetaData = true
o.size = result.Bytes
o.md5 = result.Md5
o.modTime = time.Unix(result.Modified/1000, 0)
} else {
// If the file state is COMPLETE we don't need to upload it because the file was allready found but we still ned to update our metadata
return o.readMetaData(true)
}
return nil
// TODO: Check returned Metadata? Timeout on big uploads?
return o.setMetaData(&result)
}
// Remove an object

View File

@@ -2,7 +2,7 @@
Translate file names for JottaCloud adapted from OneDrive
The following characters are JottaCloud reserved characters, and can't
The following characters are JottaClous reserved characters, and can't
be used in JottaCloud folder and file names.
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"

View File

@@ -16,7 +16,7 @@ func (f *Fs) About() (*fs.Usage, error) {
if err != nil {
return nil, errors.Wrap(err, "failed to read disk usage")
}
bs := int64(s.Bsize) // nolint: unconvert
bs := int64(s.Bsize)
usage := &fs.Usage{
Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use

View File

@@ -1,20 +0,0 @@
// +build windows plan9
package local
import (
"time"
)
const haveLChtimes = false
// lChtimes changes the access and modification times of the named
// link, similar to the Unix utime() or utimes() functions.
//
// The underlying filesystem may truncate or round the values to a
// less precise time unit.
// If there is an error, it will be of type *PathError.
func lChtimes(name string, atime time.Time, mtime time.Time) error {
// Does nothing
return nil
}

View File

@@ -1,28 +0,0 @@
// +build !windows,!plan9
package local
import (
"os"
"time"
"golang.org/x/sys/unix"
)
const haveLChtimes = true
// lChtimes changes the access and modification times of the named
// link, similar to the Unix utime() or utimes() functions.
//
// The underlying filesystem may truncate or round the values to a
// less precise time unit.
// If there is an error, it will be of type *PathError.
func lChtimes(name string, atime time.Time, mtime time.Time) error {
var utimes [2]unix.Timespec
utimes[0] = unix.NsecToTimespec(atime.UnixNano())
utimes[1] = unix.NsecToTimespec(mtime.UnixNano())
if e := unix.UtimesNanoAt(unix.AT_FDCWD, name, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); e != nil {
return &os.PathError{Op: "lchtimes", Path: name, Err: e}
}
return nil
}

View File

@@ -2,7 +2,6 @@
package local
import (
"bytes"
"fmt"
"io"
"io/ioutil"
@@ -22,14 +21,12 @@ import (
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/file"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
)
// Constants
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
// Register with Fs
func init() {
@@ -51,13 +48,6 @@ func init() {
NoPrefix: true,
ShortOpt: "L",
Advanced: true,
}, {
Name: "links",
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension",
Default: false,
NoPrefix: true,
ShortOpt: "l",
Advanced: true,
}, {
Name: "skip_links",
Help: `Don't warn about skipped symlinks.
@@ -102,13 +92,12 @@ check can be disabled with this flag.`,
// Options defines the configuration for this backend
type Options struct {
FollowSymlinks bool `config:"copy_links"`
TranslateSymlinks bool `config:"links"`
SkipSymlinks bool `config:"skip_links"`
NoUTFNorm bool `config:"no_unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
OneFileSystem bool `config:"one_file_system"`
FollowSymlinks bool `config:"copy_links"`
SkipSymlinks bool `config:"skip_links"`
NoUTFNorm bool `config:"no_unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
OneFileSystem bool `config:"one_file_system"`
}
// Fs represents a local filesystem rooted at root
@@ -130,20 +119,17 @@ type Fs struct {
// Object represents a local filesystem object
type Object struct {
fs *Fs // The Fs this object is part of
remote string // The remote path - properly UTF-8 encoded - for rclone
path string // The local path - may not be properly UTF-8 encoded - for OS
size int64 // file metadata - always present
mode os.FileMode
modTime time.Time
hashes map[hash.Type]string // Hashes
translatedLink bool // Is this object a translated link
fs *Fs // The Fs this object is part of
remote string // The remote path - properly UTF-8 encoded - for rclone
path string // The local path - may not be properly UTF-8 encoded - for OS
size int64 // file metadata - always present
mode os.FileMode
modTime time.Time
hashes map[hash.Type]string // Hashes
}
// ------------------------------------------------------------
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
// NewFs constructs an Fs from the path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
@@ -152,9 +138,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
return nil, err
}
if opt.TranslateSymlinks && opt.FollowSymlinks {
return nil, errLinksAndCopyLinks
}
if opt.NoUTFNorm {
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
@@ -182,7 +165,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err == nil {
f.dev = readDevice(fi, f.opt.OneFileSystem)
}
if err == nil && f.isRegular(fi.Mode()) {
if err == nil && fi.Mode().IsRegular() {
// It is a file, so use the parent as the root
f.root = filepath.Dir(f.root)
// return an error with an fs which points to the parent
@@ -191,20 +174,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return f, nil
}
// Determine whether a file is a 'regular' file,
// Symlinks are regular files, only if the TranslateSymlink
// option is in-effect
func (f *Fs) isRegular(mode os.FileMode) bool {
if !f.opt.TranslateSymlinks {
return mode.IsRegular()
}
// fi.Mode().IsRegular() tests that all mode bits are zero
// Since symlinks are accepted, test that all other bits are zero,
// except the symlink bit
return mode&os.ModeType&^os.ModeSymlink == 0
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
@@ -225,48 +194,28 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// caseInsensitive returns whether the remote is case insensitive or not
// caseInsenstive returns whether the remote is case insensitive or not
func (f *Fs) caseInsensitive() bool {
// FIXME not entirely accurate since you can have case
// sensitive Fses on darwin and case insensitive Fses on linux.
// sensitive Fses on darwin and case insenstive Fses on linux.
// Should probably check but that would involve creating a
// file in the remote to be most accurate which probably isn't
// desirable.
return runtime.GOOS == "windows" || runtime.GOOS == "darwin"
}
// translateLink checks whether the remote is a translated link
// and returns a new path, removing the suffix as needed,
// It also returns whether this is a translated link at all
//
// for regular files, dstPath is returned unchanged
func translateLink(remote, dstPath string) (newDstPath string, isTranslatedLink bool) {
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
newDstPath = strings.TrimSuffix(dstPath, linkSuffix)
return newDstPath, isTranslatedLink
}
// newObject makes a half completed Object
//
// if dstPath is empty then it is made from remote
func (f *Fs) newObject(remote, dstPath string) *Object {
translatedLink := false
if dstPath == "" {
dstPath = f.cleanPath(filepath.Join(f.root, remote))
}
remote = f.cleanRemote(remote)
if f.opt.TranslateSymlinks {
// Possibly receive a new name for dstPath
dstPath, translatedLink = translateLink(remote, dstPath)
}
return &Object{
fs: f,
remote: remote,
path: dstPath,
translatedLink: translatedLink,
fs: f,
remote: remote,
path: dstPath,
}
}
@@ -288,11 +237,6 @@ func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Obj
}
return nil, err
}
// Handle the odd case, that a symlink was specified by name without the link suffix
if o.fs.opt.TranslateSymlinks && o.mode&os.ModeSymlink != 0 && !o.translatedLink {
return nil, fs.ErrorObjectNotFound
}
}
if o.mode.IsDir() {
return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote)
@@ -316,7 +260,6 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
dir = f.dirNames.Load(dir)
fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
remote := f.cleanRemote(dir)
@@ -373,10 +316,6 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
entries = append(entries, d)
}
} else {
// Check whether this link should be translated
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
newRemote += linkSuffix
}
fso, err := f.newObjectWithInfo(newRemote, newPath, fi)
if err != nil {
return nil, err
@@ -590,7 +529,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
// OK
} else if err != nil {
return nil, err
} else if !dstObj.fs.isRegular(dstObj.mode) {
} else if !dstObj.mode.IsRegular() {
// It isn't a file
return nil, errors.New("can't move file onto non-file")
}
@@ -712,13 +651,7 @@ func (o *Object) Hash(r hash.Type) (string, error) {
o.fs.objectHashesMu.Unlock()
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
var in io.ReadCloser
if !o.translatedLink {
in, err = file.Open(o.path)
} else {
in, err = o.openTranslatedLink(0, -1)
}
in, err := os.Open(o.path)
if err != nil {
return "", errors.Wrap(err, "hash: failed to open")
}
@@ -749,12 +682,7 @@ func (o *Object) ModTime() time.Time {
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error {
var err error
if o.translatedLink {
err = lChtimes(o.path, modTime, modTime)
} else {
err = os.Chtimes(o.path, modTime, modTime)
}
err := os.Chtimes(o.path, modTime, modTime)
if err != nil {
return err
}
@@ -772,7 +700,7 @@ func (o *Object) Storable() bool {
}
}
mode := o.mode
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
if mode&os.ModeSymlink != 0 {
if !o.fs.opt.SkipSymlinks {
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
}
@@ -833,16 +761,6 @@ func (file *localOpenFile) Close() (err error) {
return err
}
// Returns a ReadCloser() object that contains the contents of a symbolic link
func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err error) {
// Read the link and return the destination it as the contents of the object
linkdst, err := os.Readlink(o.path)
if err != nil {
return nil, err
}
return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
@@ -862,12 +780,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
}
}
// Handle a translated link
if o.translatedLink {
return o.openTranslatedLink(offset, limit)
}
fd, err := file.Open(o.path)
fd, err := os.Open(o.path)
if err != nil {
return
}
@@ -898,19 +811,8 @@ func (o *Object) mkdirAll() error {
return os.MkdirAll(dir, 0777)
}
type nopWriterCloser struct {
*bytes.Buffer
}
func (nwc nopWriterCloser) Close() error {
// noop
return nil
}
// Update the object from in with modTime and size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
var out io.WriteCloser
hashes := hash.Supported
for _, option := range options {
switch x := option.(type) {
@@ -924,23 +826,15 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
return err
}
var symlinkData bytes.Buffer
// If the object is a regular file, create it.
// If it is a translated link, just read in the contents, and
// then create a symlink
if !o.translatedLink {
f, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
}
// Pre-allocate the file for performance reasons
err = preAllocate(src.Size(), f)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
}
out = f
} else {
out = nopWriterCloser{&symlinkData}
out, err := os.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
}
// Pre-allocate the file for performance reasons
err = preAllocate(src.Size(), out)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
}
// Calculate the hash of the object we are reading as we go along
@@ -955,26 +849,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
if err == nil {
err = closeErr
}
if o.translatedLink {
if err == nil {
// Remove any current symlink or file, if one exists
if _, err := os.Lstat(o.path); err == nil {
if removeErr := os.Remove(o.path); removeErr != nil {
fs.Errorf(o, "Failed to remove previous file: %v", removeErr)
return removeErr
}
}
// Use the contents for the copied object to create a symlink
err = os.Symlink(symlinkData.String(), o.path)
}
// only continue if symlink creation succeeded
if err != nil {
return err
}
}
if err != nil {
fs.Logf(o, "Removing partially written file on error: %v", err)
if removeErr := os.Remove(o.path); removeErr != nil {

View File

@@ -1,19 +1,13 @@
package local
import (
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"testing"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/lib/file"
"github.com/ncw/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -44,13 +38,10 @@ func TestUpdatingCheck(t *testing.T) {
filePath := "sub dir/local test"
r.WriteFile(filePath, "content", time.Now())
fd, err := file.Open(path.Join(r.LocalName, filePath))
fd, err := os.Open(path.Join(r.LocalName, filePath))
if err != nil {
t.Fatalf("failed opening file %q: %v", filePath, err)
}
defer func() {
require.NoError(t, fd.Close())
}()
fi, err := fd.Stat()
require.NoError(t, err)
@@ -81,108 +72,3 @@ func TestUpdatingCheck(t *testing.T) {
require.NoError(t, err)
}
func TestSymlink(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
f := r.Flocal.(*Fs)
dir := f.root
// Write a file
modTime1 := fstest.Time("2001-02-03T04:05:10.123123123Z")
file1 := r.WriteFile("file.txt", "hello", modTime1)
// Write a symlink
modTime2 := fstest.Time("2002-02-03T04:05:10.123123123Z")
symlinkPath := filepath.Join(dir, "symlink.txt")
require.NoError(t, os.Symlink("file.txt", symlinkPath))
require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2))
// Object viewed as symlink
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
if runtime.GOOS == "windows" {
file2.Size = 0 // symlinks are 0 length under Windows
}
// Object viewed as destination
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
// Check with no symlink flags
fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote)
// Set fs into "-L" mode
f.opt.FollowSymlinks = true
f.opt.TranslateSymlinks = false
f.lstat = os.Stat
fstest.CheckItems(t, r.Flocal, file1, file2d)
fstest.CheckItems(t, r.Fremote)
// Set fs into "-l" mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = true
f.lstat = os.Lstat
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2}, nil, fs.ModTimeNotSupported)
if haveLChtimes {
fstest.CheckItems(t, r.Flocal, file1, file2)
}
// Create a symlink
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
file3 := r.WriteObjectTo(r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
if runtime.GOOS == "windows" {
file3.Size = 0 // symlinks are 0 length under Windows
}
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
if haveLChtimes {
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
}
// Check it got the correct contents
symlinkPath = filepath.Join(dir, "symlink2.txt")
fi, err := os.Lstat(symlinkPath)
require.NoError(t, err)
assert.False(t, fi.Mode().IsRegular())
linkText, err := os.Readlink(symlinkPath)
require.NoError(t, err)
assert.Equal(t, "file.txt", linkText)
// Check that NewObject gets the correct object
o, err := r.Flocal.NewObject("symlink2.txt" + linkSuffix)
require.NoError(t, err)
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
if runtime.GOOS != "windows" {
assert.Equal(t, int64(8), o.Size())
}
// Check that NewObject doesn't see the non suffixed version
_, err = r.Flocal.NewObject("symlink2.txt")
require.Equal(t, fs.ErrorObjectNotFound, err)
// Check reading the object
in, err := o.Open()
require.NoError(t, err)
contents, err := ioutil.ReadAll(in)
require.NoError(t, err)
require.Equal(t, "file.txt", string(contents))
require.NoError(t, in.Close())
// Check reading the object with range
in, err = o.Open(&fs.RangeOption{Start: 2, End: 5})
require.NoError(t, err)
contents, err = ioutil.ReadAll(in)
require.NoError(t, err)
require.Equal(t, "file.txt"[2:5+1], string(contents))
require.NoError(t, in.Close())
}
func TestSymlinkError(t *testing.T) {
m := configmap.Simple{
"links": "true",
"copy_links": "true",
}
_, err := NewFs("local", "/", m)
assert.Equal(t, errLinksAndCopyLinks, err)
}

View File

@@ -22,5 +22,5 @@ func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
fs.Debugf(fi.Name(), "Type assertion fi.Sys().(*syscall.Stat_t) failed from: %#v", fi.Sys())
return devUnset
}
return uint64(statT.Dev) // nolint: unconvert
return uint64(statT.Dev)
}

View File

@@ -497,7 +497,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
//
// Returns the dirNode, object, leaf and error
// Returns the dirNode, obect, leaf and error
//
// Used to create new objects
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
@@ -523,10 +523,10 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil)
switch err {
case nil:
return existingObj, existingObj.Update(in, src, options...)
return exisitingObj, exisitingObj.Update(in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(in, src)
@@ -847,14 +847,14 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error {
return shouldRetry(err)
})
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.GetName(), srcDir)
return errors.Wrapf(err, "MergDirs move failed on %q in %v", info.GetName(), srcDir)
}
}
// rmdir (into trash) the now empty source directory
fs.Infof(srcDir, "removing empty directory")
err = f.deleteNode(srcDirNode)
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
return errors.Wrapf(err, "MergDirs move failed to rmdir %q", srcDir)
}
}
return nil
@@ -1076,9 +1076,6 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
size := src.Size()
if size < 0 {
return errors.New("mega backend can't upload a file of unknown length")
}
//modTime := src.ModTime()
remote := o.Remote()
@@ -1129,7 +1126,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
return errors.Wrap(err, "failed to finish upload")
}
// If the upload succeeded and the original object existed, then delete it
// If the upload succeded and the original object existed, then delete it
if o.info != nil {
err = o.fs.deleteNode(o.info)
if err != nil {

View File

@@ -25,7 +25,7 @@ type Error struct {
} `json:"error"`
}
// Error returns a string for the error and satisfies the error interface
// Error returns a string for the error and statistifes the error interface
func (e *Error) Error() string {
out := e.ErrorInfo.Code
if e.ErrorInfo.InnerError.Code != "" {
@@ -35,7 +35,7 @@ func (e *Error) Error() string {
return out
}
// Check Error satisfies the error interface
// Check Error statisfies the error interface
var _ error = (*Error)(nil)
// Identity represents an identity of an actor. For example, and actor
@@ -285,7 +285,6 @@ type AsyncOperationStatus struct {
// GetID returns a normalized ID of the item
// If DriveID is known it will be prefixed to the ID with # seperator
// Can be parsed using onedrive.parseNormalizedID(normalizedID)
func (i *Item) GetID() string {
if i.IsRemote() && i.RemoteItem.ID != "" {
return i.RemoteItem.ParentReference.DriveID + "#" + i.RemoteItem.ID
@@ -295,9 +294,9 @@ func (i *Item) GetID() string {
return i.ID
}
// GetDriveID returns a normalized ParentReference of the item
// GetDriveID returns a normalized ParentReferance of the item
func (i *Item) GetDriveID() string {
return i.GetParentReference().DriveID
return i.GetParentReferance().DriveID
}
// GetName returns a normalized Name of the item
@@ -398,8 +397,8 @@ func (i *Item) GetLastModifiedDateTime() Timestamp {
return i.LastModifiedDateTime
}
// GetParentReference returns a normalized ParentReference of the item
func (i *Item) GetParentReference() *ItemReference {
// GetParentReferance returns a normalized ParentReferance of the item
func (i *Item) GetParentReferance() *ItemReference {
if i.IsRemote() && i.ParentReference == nil {
return i.RemoteItem.ParentReference
}

View File

@@ -75,8 +75,9 @@ func init() {
return
}
// Stop if we are running non-interactive config
if fs.Config.AutoConfirm {
// Are we running headless?
if automatic, _ := m.Get(config.ConfigAutomatic); automatic != "" {
// Yes, okay we are done
return
}
@@ -198,7 +199,7 @@ func init() {
fmt.Printf("Found drive '%s' of type '%s', URL: %s\nIs that okay?\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)
// This does not work, YET :)
if !config.ConfirmWithConfig(m, "config_drive_ok", true) {
if !config.Confirm() {
log.Fatalf("Cancelled by user")
}
@@ -227,7 +228,7 @@ that the chunks will be buffered into memory.`,
Advanced: true,
}, {
Name: "drive_type",
Help: "The type of the drive ( " + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + " )",
Help: "The type of the drive ( personal | business | documentLibrary )",
Default: "",
Advanced: true,
}, {
@@ -324,19 +325,29 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
authRetry := false
authRety := false
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
authRetry = true
authRety = true
fs.Debugf(nil, "Should retry: %v", err)
}
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
// if `relPath` == "", it reads the metadata for the item with that ID.
func (f *Fs) readMetaDataForPathRelativeToID(normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
opts := newOptsCall(normalizedID, "GET", ":/"+rest.URLPathEscape(replaceReservedChars(relPath)))
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
var opts rest.Opts
if len(path) == 0 {
opts = rest.Opts{
Method: "GET",
Path: "/root",
}
} else {
opts = rest.Opts{
Method: "GET",
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
}
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, nil, &info)
return shouldRetry(resp, err)
@@ -345,72 +356,6 @@ func (f *Fs) readMetaDataForPathRelativeToID(normalizedID string, relPath string
return info, resp, err
}
// readMetaDataForPath reads the metadata from the path (relative to the absolute root)
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
firstSlashIndex := strings.IndexRune(path, '/')
if f.driveType != driveTypePersonal || firstSlashIndex == -1 {
var opts rest.Opts
if len(path) == 0 {
opts = rest.Opts{
Method: "GET",
Path: "/root",
}
} else {
opts = rest.Opts{
Method: "GET",
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
}
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, nil, &info)
return shouldRetry(resp, err)
})
return info, resp, err
}
// The following branch handles the case when we're using OneDrive Personal and the path is in a folder.
// For OneDrive Personal, we need to consider the "shared with me" folders.
// An item in such a folder can only be addressed by its ID relative to the sharer's driveID or
// by its path relative to the folder's ID relative to the sharer's driveID.
// Note: A "shared with me" folder can only be placed in the sharee's absolute root.
// So we read metadata relative to a suitable folder's normalized ID.
var dirCacheFoundRoot bool
var rootNormalizedID string
if f.dirCache != nil {
var ok bool
if rootNormalizedID, ok = f.dirCache.Get(""); ok {
dirCacheFoundRoot = true
}
}
relPath, insideRoot := getRelativePathInsideBase(f.root, path)
var firstDir, baseNormalizedID string
if !insideRoot || !dirCacheFoundRoot {
// We do not have the normalized ID in dirCache for our query to base on. Query it manually.
firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
info, resp, err := f.readMetaDataForPath(firstDir)
if err != nil {
return info, resp, err
}
baseNormalizedID = info.GetID()
} else {
if f.root != "" {
// Read metadata based on root
baseNormalizedID = rootNormalizedID
} else {
// Read metadata based on firstDir
firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
baseNormalizedID, err = f.dirCache.FindDir(firstDir, false)
if err != nil {
return nil, nil, err
}
}
}
return f.readMetaDataForPathRelativeToID(baseNormalizedID, relPath)
}
// errorHandler parses a non 2xx error response into an error
func errorHandler(resp *http.Response) error {
// Decode error response
@@ -492,11 +437,11 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Get rootID
rootInfo, _, err := f.readMetaDataForPath("")
if err != nil || rootInfo.GetID() == "" {
if err != nil || rootInfo.ID == "" {
return nil, errors.Wrap(err, "failed to get root")
}
f.dirCache = dircache.New(root, rootInfo.GetID(), f)
f.dirCache = dircache.New(root, rootInfo.ID, f)
// Find the current root
err = f.dirCache.FindRoot(false)
@@ -569,11 +514,18 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
_, ok := f.dirCache.GetInv(pathID)
parent, ok := f.dirCache.GetInv(pathID)
if !ok {
return "", false, errors.New("couldn't find parent ID")
}
info, resp, err := f.readMetaDataForPathRelativeToID(pathID, leaf)
path := leaf
if parent != "" {
path = parent + "/" + path
}
if f.dirCache.FoundRoot() {
path = f.rootSlash() + path
}
info, resp, err := f.readMetaDataForPath(path)
if err != nil {
if resp != nil && resp.StatusCode == http.StatusNotFound {
return "", false, nil
@@ -915,13 +867,13 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
opts.ExtraHeaders = map[string]string{"Prefer": "respond-async"}
opts.NoResponse = true
id, dstDriveID, _ := parseNormalizedID(directoryID)
id, _, _ := parseDirID(directoryID)
replacedLeaf := replaceReservedChars(leaf)
copyReq := api.CopyItemRequest{
Name: &replacedLeaf,
ParentReference: api.ItemReference{
DriveID: dstDriveID,
DriveID: f.driveID,
ID: id,
},
}
@@ -988,23 +940,15 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
return nil, err
}
id, dstDriveID, _ := parseNormalizedID(directoryID)
_, srcObjDriveID, _ := parseNormalizedID(srcObj.id)
if dstDriveID != srcObjDriveID {
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
// "Items cannot be moved between Drives using this request."
return nil, fs.ErrorCantMove
}
// Move the object
opts := newOptsCall(srcObj.id, "PATCH", "")
id, _, _ := parseDirID(directoryID)
move := api.MoveItemRequest{
Name: replaceReservedChars(leaf),
ParentReference: &api.ItemReference{
DriveID: dstDriveID,
ID: id,
ID: id,
},
// We set the mod time too as it gets reset otherwise
FileSystemInfo: &api.FileSystemInfoFacet{
@@ -1080,20 +1024,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
if err != nil {
return err
}
parsedDstDirID, dstDriveID, _ := parseNormalizedID(dstDirectoryID)
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
if err != nil {
return err
}
_, srcDriveID, _ := parseNormalizedID(srcID)
if dstDriveID != srcDriveID {
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
// "Items cannot be moved between Drives using this request."
return fs.ErrorCantDirMove
}
parsedDstDirID, _, _ := parseDirID(dstDirectoryID)
// Check destination does not exist
if dstRemote != "" {
@@ -1107,8 +1038,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
}
}
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
if err != nil {
return err
}
// Get timestamps of src so they can be preserved
srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(srcID, "")
srcInfo, _, err := srcFs.readMetaDataForPath(srcPath)
if err != nil {
return err
}
@@ -1118,8 +1055,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
move := api.MoveItemRequest{
Name: replaceReservedChars(leaf),
ParentReference: &api.ItemReference{
DriveID: dstDriveID,
ID: parsedDstDirID,
ID: parsedDstDirID,
},
// We set the mod time too as it gets reset otherwise
FileSystemInfo: &api.FileSystemInfoFacet{
@@ -1186,7 +1122,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
if err != nil {
return "", err
}
opts := newOptsCall(info.GetID(), "POST", "/createLink")
opts := newOptsCall(info.ID, "POST", "/createLink")
share := api.CreateShareLinkRequest{
Type: "view",
@@ -1334,13 +1270,13 @@ func (o *Object) ModTime() time.Time {
// setModTime sets the modification time of the local fs object
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
var opts rest.Opts
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
_, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
_, drive, rootURL := parseDirID(directoryID)
if drive != "" {
opts = rest.Opts{
Method: "PATCH",
RootURL: rootURL,
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf),
Path: "/" + drive + "/root:/" + rest.URLPathEscape(o.srvPath()),
}
} else {
opts = rest.Opts{
@@ -1408,7 +1344,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
// createUploadSession creates an upload session for the object
func (o *Object) createUploadSession(modTime time.Time) (response *api.CreateUploadResponse, err error) {
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
id, drive, rootURL := parseNormalizedID(directoryID)
id, drive, rootURL := parseDirID(directoryID)
var opts rest.Opts
if drive != "" {
opts = rest.Opts{
@@ -1488,7 +1424,7 @@ func (o *Object) cancelUploadSession(url string) (err error) {
// uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
if size <= 0 {
return nil, errors.New("unknown-sized upload not supported")
panic("size passed into uploadMultipart must be > 0")
}
// Create upload session
@@ -1535,19 +1471,19 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (i
// This function will set modtime after uploading, which will create a new version for the remote file
func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
panic("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
}
fs.Debugf(o, "Starting singlepart upload")
var resp *http.Response
var opts rest.Opts
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
_, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
_, drive, rootURL := parseDirID(directoryID)
if drive != "" {
opts = rest.Opts{
Method: "PUT",
RootURL: rootURL,
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf) + ":/content",
Path: "/" + drive + "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
ContentLength: &size,
Body: in,
}
@@ -1560,6 +1496,10 @@ func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (
}
}
if size == 0 {
opts.Body = nil
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, nil, &info)
if apiErr, ok := err.(*api.Error); ok {
@@ -1602,7 +1542,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
} else if size == 0 {
info, err = o.uploadSinglepart(in, size, modTime)
} else {
return errors.New("unknown-sized upload not supported")
panic("src file size must be >= 0")
}
if err != nil {
return err
@@ -1626,8 +1566,8 @@ func (o *Object) ID() string {
return o.id
}
func newOptsCall(normalizedID string, method string, route string) (opts rest.Opts) {
id, drive, rootURL := parseNormalizedID(normalizedID)
func newOptsCall(id string, method string, route string) (opts rest.Opts) {
id, drive, rootURL := parseDirID(id)
if drive != "" {
return rest.Opts{
@@ -1642,10 +1582,7 @@ func newOptsCall(normalizedID string, method string, route string) (opts rest.Op
}
}
// parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`)
// and returns itemID, driveID, rootURL.
// Such a normalized ID can come from (*Item).GetID()
func parseNormalizedID(ID string) (string, string, string) {
func parseDirID(ID string) (string, string, string) {
if strings.Index(ID, "#") >= 0 {
s := strings.Split(ID, "#")
return s[1], s[0], graphURL + "/drives"
@@ -1653,21 +1590,6 @@ func parseNormalizedID(ID string) (string, string, string) {
return ID, "", ""
}
// getRelativePathInsideBase checks if `target` is inside `base`. If so, it
// returns a relative path for `target` based on `base` and a boolean `true`.
// Otherwise returns "", false.
func getRelativePathInsideBase(base, target string) (string, bool) {
if base == "" {
return target, true
}
baseSlash := base + "/"
if strings.HasPrefix(target+"/", baseSlash) {
return target[len(baseSlash):], true
}
return "", false
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)

View File

@@ -119,7 +119,7 @@ func (f *Fs) DirCacheFlush() {
f.dirCache.ResetRoot()
}
// NewFs constructs an Fs from the path, bucket:path
// NewFs contstructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
@@ -785,7 +785,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
remote := path.Join(dir, folder.Name)
// cache the directory ID for later lookups
f.dirCache.Put(remote, folder.FolderID)
d := fs.NewDir(remote, time.Unix(folder.DateModified, 0)).SetID(folder.FolderID)
d := fs.NewDir(remote, time.Unix(int64(folder.DateModified), 0)).SetID(folder.FolderID)
d.SetItems(int64(folder.ChildFolders))
entries = append(entries, d)
}

View File

@@ -13,7 +13,7 @@ type Error struct {
} `json:"error"`
}
// Error satisfies the error interface
// Error statisfies the error interface
func (e *Error) Error() string {
return fmt.Sprintf("%s (Error %d)", e.Info.Message, e.Info.Code)
}

View File

@@ -41,7 +41,7 @@ type Error struct {
ErrorString string `json:"error"`
}
// Error returns a string for the error and satisfies the error interface
// Error returns a string for the error and statistifes the error interface
func (e *Error) Error() string {
return fmt.Sprintf("pcloud error: %s (%d)", e.ErrorString, e.Result)
}
@@ -58,7 +58,7 @@ func (e *Error) Update(err error) error {
return e
}
// Check Error satisfies the error interface
// Check Error statisfies the error interface
var _ error = (*Error)(nil)
// Item describes a folder or a file as returned by Get Folder Items and others
@@ -161,6 +161,7 @@ type UserInfo struct {
PublicLinkQuota int64 `json:"publiclinkquota"`
Email string `json:"email"`
UserID int `json:"userid"`
Result int `json:"result"`
Quota int64 `json:"quota"`
TrashRevretentionDays int `json:"trashrevretentiondays"`
Premium bool `json:"premium"`

View File

@@ -385,7 +385,7 @@ func fileIDtoNumber(fileID string) string {
if len(fileID) > 0 && fileID[0] == 'f' {
return fileID[1:]
}
fs.Debugf(nil, "Invalid file id %q", fileID)
fs.Debugf(nil, "Invalid filee id %q", fileID)
return fileID
}

View File

@@ -72,54 +72,14 @@ func init() {
Help: "Number of connection retries.",
Default: 3,
Advanced: true,
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload
Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5GB.`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff they will be uploaded
as multipart uploads using this chunk size.
Note that "--qingstor-upload-concurrency" chunks of this size are buffered
in memory per transfer.
If you are transferring large files over high speed links and you have
enough memory, then increasing this will speed up the transfers.`,
Default: minChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
NB if you set this to > 1 then the checksums of multpart uploads
become corrupted (the uploads themselves are not corrupted though).
If you are uploading small numbers of large file over high speed link
and these uploads do not fully utilize your bandwidth, then increasing
this may help to speed up the transfers.`,
Default: 1,
Advanced: true,
}},
})
}
// Constants
const (
listLimitSize = 1000 // Number of items to read at once
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
minChunkSize = fs.SizeSuffix(minMultiPartSize)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
listLimitSize = 1000 // Number of items to read at once
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
)
// Globals
@@ -132,15 +92,12 @@ func timestampToTime(tp int64) time.Time {
// Options defines the configuration for this backend
type Options struct {
EnvAuth bool `config:"env_auth"`
AccessKeyID string `config:"access_key_id"`
SecretAccessKey string `config:"secret_access_key"`
Endpoint string `config:"endpoint"`
Zone string `config:"zone"`
ConnectionRetries int `config:"connection_retries"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
EnvAuth bool `config:"env_auth"`
AccessKeyID string `config:"access_key_id"`
SecretAccessKey string `config:"secret_access_key"`
Endpoint string `config:"endpoint"`
Zone string `config:"zone"`
ConnectionRetries int `config:"connection_retries"`
}
// Fs represents a remote qingstor server
@@ -270,36 +227,6 @@ func qsServiceConnection(opt *Options) (*qs.Service, error) {
return qs.Init(cf)
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
@@ -308,14 +235,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
return nil, err
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "qingstor: chunk size")
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "qingstor: upload cutoff")
}
bucket, key, err := qsParsePath(root)
if err != nil {
return nil, err
@@ -449,7 +368,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
}
_, err = bucketInit.PutObject(key, &req)
if err != nil {
fs.Debugf(f, "Copy Failed, API Error: %v", err)
fs.Debugf(f, "Copied Faild, API Error: %v", err)
return nil, err
}
return f.NewObject(remote)
@@ -756,7 +675,7 @@ func (f *Fs) Mkdir(dir string) error {
}
switch *statistics.Status {
case "deleted":
fs.Debugf(f, "Wait for qingstor sync bucket status, retries: %d", retries)
fs.Debugf(f, "Wiat for qingstor sync bucket status, retries: %d", retries)
time.Sleep(time.Second * 1)
retries++
continue
@@ -875,7 +794,7 @@ func (o *Object) readMetaData() (err error) {
fs.Debugf(o, "Read metadata of key: %s", key)
resp, err := bucketInit.HeadObject(key, &qs.HeadObjectInput{})
if err != nil {
fs.Debugf(o, "Read metadata failed, API Error: %v", err)
fs.Debugf(o, "Read metadata faild, API Error: %v", err)
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
@@ -994,24 +913,16 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
mimeType := fs.MimeType(src)
req := uploadInput{
body: in,
qsSvc: o.fs.svc,
bucket: o.fs.bucket,
zone: o.fs.zone,
key: key,
mimeType: mimeType,
partSize: int64(o.fs.opt.ChunkSize),
concurrency: o.fs.opt.UploadConcurrency,
body: in,
qsSvc: o.fs.svc,
bucket: o.fs.bucket,
zone: o.fs.zone,
key: key,
mimeType: mimeType,
}
uploader := newUploader(&req)
size := src.Size()
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
if multipart {
err = uploader.upload()
} else {
err = uploader.singlePartUpload(in, size)
}
err = uploader.upload()
if err != nil {
return err
}

View File

@@ -2,12 +2,12 @@
// +build !plan9
package qingstor
package qingstor_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/backend/qingstor"
"github.com/ncw/rclone/fstest/fstests"
)
@@ -15,19 +15,6 @@ import (
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestQingStor:",
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
},
NilObject: (*qingstor.Object)(nil),
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View File

@@ -143,7 +143,7 @@ func (u *uploader) init() {
// Try to adjust partSize if it is too small and account for
// integer division truncation.
if u.totalSize/u.cfg.partSize >= u.cfg.partSize {
if u.totalSize/u.cfg.partSize >= int64(u.cfg.partSize) {
// Add one to the part size to account for remainders
// during the size calculation. e.g odd number of bytes.
u.cfg.partSize = (u.totalSize / int64(u.cfg.maxUploadParts)) + 1
@@ -152,18 +152,18 @@ func (u *uploader) init() {
}
// singlePartUpload upload a single object that contentLength less than "defaultUploadPartSize"
func (u *uploader) singlePartUpload(buf io.Reader, size int64) error {
func (u *uploader) singlePartUpload(buf io.ReadSeeker) error {
bucketInit, _ := u.bucketInit()
req := qs.PutObjectInput{
ContentLength: &size,
ContentLength: &u.readerPos,
ContentType: &u.cfg.mimeType,
Body: buf,
}
_, err := bucketInit.PutObject(u.cfg.key, &req)
if err == nil {
fs.Debugf(u, "Upload single object finished")
fs.Debugf(u, "Upload single objcet finished")
}
return err
}
@@ -179,13 +179,13 @@ func (u *uploader) upload() error {
// Do one read to determine if we have more than one part
reader, _, err := u.nextReader()
if err == io.EOF { // single part
fs.Debugf(u, "Uploading as single part object to QingStor")
return u.singlePartUpload(reader, u.readerPos)
fs.Debugf(u, "Tried to upload a singile object to QingStor")
return u.singlePartUpload(reader)
} else if err != nil {
return errors.Errorf("read upload data failed: %s", err)
}
fs.Debugf(u, "Uploading as multi-part object to QingStor")
fs.Debugf(u, "Treied to upload a multi-part object to QingStor")
mu := multiUploader{uploader: u}
return mu.multiPartUpload(reader)
}
@@ -261,7 +261,7 @@ func (mu *multiUploader) initiate() error {
req := qs.InitiateMultipartUploadInput{
ContentType: &mu.cfg.mimeType,
}
fs.Debugf(mu, "Initiating a multi-part upload")
fs.Debugf(mu, "Tried to initiate a multi-part upload")
rsp, err := bucketInit.InitiateMultipartUpload(mu.cfg.key, &req)
if err == nil {
mu.uploadID = rsp.UploadID
@@ -279,12 +279,12 @@ func (mu *multiUploader) send(c chunk) error {
ContentLength: &c.size,
Body: c.buffer,
}
fs.Debugf(mu, "Uploading a part to QingStor with partNumber %d and partSize %d", c.partNumber, c.size)
fs.Debugf(mu, "Tried to upload a part to QingStor that partNumber %d and partSize %d", c.partNumber, c.size)
_, err := bucketInit.UploadMultipart(mu.cfg.key, &req)
if err != nil {
return err
}
fs.Debugf(mu, "Done uploading part partNumber %d and partSize %d", c.partNumber, c.size)
fs.Debugf(mu, "Upload part finished that partNumber %d and partSize %d", c.partNumber, c.size)
mu.mtx.Lock()
defer mu.mtx.Unlock()
@@ -304,7 +304,7 @@ func (mu *multiUploader) list() error {
req := qs.ListMultipartInput{
UploadID: mu.uploadID,
}
fs.Debugf(mu, "Reading multi-part details")
fs.Debugf(mu, "Tried to list a multi-part")
rsp, err := bucketInit.ListMultipart(mu.cfg.key, &req)
if err == nil {
mu.objectParts = rsp.ObjectParts
@@ -331,7 +331,7 @@ func (mu *multiUploader) complete() error {
ObjectParts: mu.objectParts,
ETag: &md5String,
}
fs.Debugf(mu, "Completing multi-part object")
fs.Debugf(mu, "Tried to complete a multi-part")
_, err = bucketInit.CompleteMultipartUpload(mu.cfg.key, &req)
if err == nil {
fs.Debugf(mu, "Complete multi-part finished")
@@ -348,7 +348,7 @@ func (mu *multiUploader) abort() error {
req := qs.AbortMultipartUploadInput{
UploadID: uploadID,
}
fs.Debugf(mu, "Aborting multi-part object %q", *uploadID)
fs.Debugf(mu, "Tried to abort a multi-part")
_, err = bucketInit.AbortMultipartUpload(mu.cfg.key, &req)
}
@@ -392,14 +392,6 @@ func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) error {
var nextChunkLen int
reader, nextChunkLen, err = mu.nextReader()
if err != nil && err != io.EOF {
// empty ch
go func() {
for range ch {
}
}()
// Wait for all goroutines finish
close(ch)
mu.wg.Wait()
return err
}
if nextChunkLen == 0 && partNumber > 0 {

View File

@@ -53,7 +53,7 @@ import (
func init() {
fs.Register(&fs.RegInfo{
Name: "s3",
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
Description: "Amazon S3 Compliant Storage Providers (AWS, Ceph, Dreamhost, IBM COS, Minio)",
NewFs: NewFs,
Options: []fs.Option{{
Name: fs.ConfigProvider,
@@ -61,9 +61,6 @@ func init() {
Examples: []fs.OptionExample{{
Value: "AWS",
Help: "Amazon Web Services (AWS) S3",
}, {
Value: "Alibaba",
Help: "Alibaba Cloud Object Storage System (OSS) formerly Aliyun",
}, {
Value: "Ceph",
Help: "Ceph Object Storage",
@@ -79,9 +76,6 @@ func init() {
}, {
Value: "Minio",
Help: "Minio Object Storage",
}, {
Value: "Netease",
Help: "Netease Object Storage (NOS)",
}, {
Value: "Wasabi",
Help: "Wasabi Object Storage",
@@ -131,9 +125,6 @@ func init() {
}, {
Value: "eu-west-2",
Help: "EU (London) Region\nNeeds location constraint eu-west-2.",
}, {
Value: "eu-north-1",
Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
}, {
Value: "eu-central-1",
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
@@ -159,7 +150,7 @@ func init() {
}, {
Name: "region",
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
Provider: "!AWS,Alibaba",
Provider: "!AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
@@ -237,10 +228,10 @@ func init() {
Help: "EU Cross Region Amsterdam Private Endpoint",
}, {
Value: "s3.eu-gb.objectstorage.softlayer.net",
Help: "Great Britain Endpoint",
Help: "Great Britan Endpoint",
}, {
Value: "s3.eu-gb.objectstorage.service.networklayer.com",
Help: "Great Britain Private Endpoint",
Help: "Great Britan Private Endpoint",
}, {
Value: "s3.ap-geo.objectstorage.softlayer.net",
Help: "APAC Cross Regional Endpoint",
@@ -278,75 +269,12 @@ func init() {
Value: "s3.tor01.objectstorage.service.networklayer.com",
Help: "Toronto Single Site Private Endpoint",
}},
}, {
// oss endpoints: https://help.aliyun.com/document_detail/31837.html
Name: "endpoint",
Help: "Endpoint for OSS API.",
Provider: "Alibaba",
Examples: []fs.OptionExample{{
Value: "oss-cn-hangzhou.aliyuncs.com",
Help: "East China 1 (Hangzhou)",
}, {
Value: "oss-cn-shanghai.aliyuncs.com",
Help: "East China 2 (Shanghai)",
}, {
Value: "oss-cn-qingdao.aliyuncs.com",
Help: "North China 1 (Qingdao)",
}, {
Value: "oss-cn-beijing.aliyuncs.com",
Help: "North China 2 (Beijing)",
}, {
Value: "oss-cn-zhangjiakou.aliyuncs.com",
Help: "North China 3 (Zhangjiakou)",
}, {
Value: "oss-cn-huhehaote.aliyuncs.com",
Help: "North China 5 (Huhehaote)",
}, {
Value: "oss-cn-shenzhen.aliyuncs.com",
Help: "South China 1 (Shenzhen)",
}, {
Value: "oss-cn-hongkong.aliyuncs.com",
Help: "Hong Kong (Hong Kong)",
}, {
Value: "oss-us-west-1.aliyuncs.com",
Help: "US West 1 (Silicon Valley)",
}, {
Value: "oss-us-east-1.aliyuncs.com",
Help: "US East 1 (Virginia)",
}, {
Value: "oss-ap-southeast-1.aliyuncs.com",
Help: "Southeast Asia Southeast 1 (Singapore)",
}, {
Value: "oss-ap-southeast-2.aliyuncs.com",
Help: "Asia Pacific Southeast 2 (Sydney)",
}, {
Value: "oss-ap-southeast-3.aliyuncs.com",
Help: "Southeast Asia Southeast 3 (Kuala Lumpur)",
}, {
Value: "oss-ap-southeast-5.aliyuncs.com",
Help: "Asia Pacific Southeast 5 (Jakarta)",
}, {
Value: "oss-ap-northeast-1.aliyuncs.com",
Help: "Asia Pacific Northeast 1 (Japan)",
}, {
Value: "oss-ap-south-1.aliyuncs.com",
Help: "Asia Pacific South 1 (Mumbai)",
}, {
Value: "oss-eu-central-1.aliyuncs.com",
Help: "Central Europe 1 (Frankfurt)",
}, {
Value: "oss-eu-west-1.aliyuncs.com",
Help: "West Europe (London)",
}, {
Value: "oss-me-east-1.aliyuncs.com",
Help: "Middle East 1 (Dubai)",
}},
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
Provider: "!AWS,IBMCOS,Alibaba",
Provider: "!AWS,IBMCOS",
Examples: []fs.OptionExample{{
Value: "objects-us-east-1.dream.io",
Value: "objects-us-west-1.dream.io",
Help: "Dream Objects endpoint",
Provider: "Dreamhost",
}, {
@@ -395,9 +323,6 @@ func init() {
}, {
Value: "eu-west-2",
Help: "EU (London) Region.",
}, {
Value: "eu-north-1",
Help: "EU (Stockholm) Region.",
}, {
Value: "EU",
Help: "EU Region.",
@@ -450,7 +375,7 @@ func init() {
Help: "US East Region Flex",
}, {
Value: "us-south-standard",
Help: "US South Region Standard",
Help: "US Sout hRegion Standard",
}, {
Value: "us-south-vault",
Help: "US South Region Vault",
@@ -474,16 +399,16 @@ func init() {
Help: "EU Cross Region Flex",
}, {
Value: "eu-gb-standard",
Help: "Great Britain Standard",
Help: "Great Britan Standard",
}, {
Value: "eu-gb-vault",
Help: "Great Britain Vault",
Help: "Great Britan Vault",
}, {
Value: "eu-gb-cold",
Help: "Great Britain Cold",
Help: "Great Britan Cold",
}, {
Value: "eu-gb-flex",
Help: "Great Britain Flex",
Help: "Great Britan Flex",
}, {
Value: "ap-standard",
Help: "APAC Standard",
@@ -524,13 +449,11 @@ func init() {
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
Provider: "!AWS,IBMCOS,Alibaba",
Provider: "!AWS,IBMCOS",
}, {
Name: "acl",
Help: `Canned ACL used when creating buckets and storing or copying objects.
This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Note that this ACL is applied when server side copying objects as S3
@@ -576,28 +499,6 @@ doesn't copy the ACL from the source but rather writes a fresh one.`,
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS",
Provider: "IBMCOS",
}},
}, {
Name: "bucket_acl",
Help: `Canned ACL used when creating buckets.
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Note that this ACL is applied when only when creating buckets. If it
isn't set then "acl" is used instead.`,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
}, {
Value: "public-read",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
}, {
Value: "public-read-write",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
}, {
Value: "authenticated-read",
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
}},
}, {
Name: "server_side_encryption",
Help: "The server-side encryption algorithm used when storing this object in S3.",
@@ -642,42 +543,13 @@ isn't set then "acl" is used instead.`,
}, {
Value: "ONEZONE_IA",
Help: "One Zone Infrequent Access storage class",
}, {
Value: "GLACIER",
Help: "Glacier storage class",
}},
}, {
// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
Name: "storage_class",
Help: "The storage class to use when storing new objects in OSS.",
Provider: "Alibaba",
Examples: []fs.OptionExample{{
Value: "",
Help: "Default",
}, {
Value: "STANDARD",
Help: "Standard storage class",
}, {
Value: "GLACIER",
Help: "Archive storage mode.",
}, {
Value: "STANDARD_IA",
Help: "Infrequent access storage mode.",
}},
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload
Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5GB.`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff they will be uploaded
as multipart uploads using this chunk size.
Any files larger than this will be uploaded in chunks of this
size. The default is 5MB. The minimum is 5MB.
Note that "--s3-upload-concurrency" chunks of this size are buffered
in memory per transfer.
@@ -705,7 +577,7 @@ concurrently.
If you are uploading small numbers of large file over high speed link
and these uploads do not fully utilize your bandwidth, then increasing
this may help to speed up the transfers.`,
Default: 4,
Default: 2,
Advanced: true,
}, {
Name: "force_path_style",
@@ -735,16 +607,14 @@ Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
// Constants
const (
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
listChunkSize = 1000 // number of items to read at once
maxRetries = 10 // number of retries to make of operations
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
listChunkSize = 1000 // number of items to read at once
maxRetries = 10 // number of retries to make of operations
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
)
// Options defines the configuration for this backend
@@ -757,11 +627,9 @@ type Options struct {
Endpoint string `config:"endpoint"`
LocationConstraint string `config:"location_constraint"`
ACL string `config:"acl"`
BucketACL string `config:"bucket_acl"`
ServerSideEncryption string `config:"server_side_encryption"`
SSEKMSKeyID string `config:"sse_kms_key_id"`
StorageClass string `config:"storage_class"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableChecksum bool `config:"disable_checksum"`
SessionToken string `config:"session_token"`
@@ -783,7 +651,6 @@ type Fs struct {
bucketOK bool // true if we have created the bucket
bucketDeleted bool // true if we have deleted the bucket
pacer *pacer.Pacer // To pace the API calls
srv *http.Client // a plain http client
}
// Object describes a s3 object
@@ -832,31 +699,23 @@ func (f *Fs) Features() *fs.Features {
// retryErrorCodes is a slice of error codes that we will retry
// See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
var retryErrorCodes = []int{
// 409, // Conflict - various states that could be resolved on a retry
409, // Conflict - various states that could be resolved on a retry
503, // Service Unavailable/Slow Down - "Reduce your request rate"
}
//S3 is pretty resilient, and the built in retry handling is probably sufficient
// as it should notice closed connections and timeouts which are the most likely
// sort of failure modes
func (f *Fs) shouldRetry(err error) (bool, error) {
func shouldRetry(err error) (bool, error) {
// If this is an awserr object, try and extract more useful information to determine if we should retry
if awsError, ok := err.(awserr.Error); ok {
// Simple case, check the original embedded error in case it's generically retryable
// Simple case, check the original embedded error in case it's generically retriable
if fserrors.ShouldRetry(awsError.OrigErr()) {
return true, err
}
// Failing that, if it's a RequestFailure it's probably got an http status code we can check
//Failing that, if it's a RequestFailure it's probably got an http status code we can check
if reqErr, ok := err.(awserr.RequestFailure); ok {
// 301 if wrong region for bucket
if reqErr.StatusCode() == http.StatusMovedPermanently {
urfbErr := f.updateRegionForBucket()
if urfbErr != nil {
fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr)
return false, err
}
return true, err
}
for _, e := range retryErrorCodes {
if reqErr.StatusCode() == e {
return true, err
@@ -864,7 +723,7 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
}
}
}
// Ok, not an awserr, check for generic failure conditions
//Ok, not an awserr, check for generic failure conditions
return fserrors.ShouldRetry(err), err
}
@@ -941,21 +800,13 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
if opt.Region == "" {
opt.Region = "us-east-1"
}
if opt.Provider == "Alibaba" || opt.Provider == "Netease" {
opt.ForcePathStyle = false
}
awsConfig := aws.NewConfig().
WithRegion(opt.Region).
WithMaxRetries(maxRetries).
WithCredentials(cred).
WithEndpoint(opt.Endpoint).
WithHTTPClient(fshttp.NewClient(fs.Config)).
WithS3ForcePathStyle(opt.ForcePathStyle)
if opt.Region != "" {
awsConfig.WithRegion(opt.Region)
}
if opt.Endpoint != "" {
awsConfig.WithEndpoint(opt.Endpoint)
}
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
awsSessionOpts := session.Options{
Config: *awsConfig,
@@ -1003,21 +854,6 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
@@ -1030,20 +866,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
return nil, errors.Wrap(err, "s3: chunk size")
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "s3: upload cutoff")
}
bucket, directory, err := s3ParsePath(root)
if err != nil {
return nil, err
}
if opt.ACL == "" {
opt.ACL = "private"
}
if opt.BucketACL == "" {
opt.BucketACL = opt.ACL
}
c, ses, err := s3Connection(opt)
if err != nil {
return nil, err
@@ -1056,7 +882,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
bucket: bucket,
ses: ses,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
srv: fshttp.NewClient(fs.Config),
}
f.features = (&fs.Features{
ReadMimeType: true,
@@ -1072,7 +897,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.c.HeadObject(&req)
return f.shouldRetry(err)
return shouldRetry(err)
})
if err == nil {
f.root = path.Dir(directory)
@@ -1122,51 +947,6 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// Gets the bucket location
func (f *Fs) getBucketLocation() (string, error) {
req := s3.GetBucketLocationInput{
Bucket: &f.bucket,
}
var resp *s3.GetBucketLocationOutput
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.GetBucketLocation(&req)
return f.shouldRetry(err)
})
if err != nil {
return "", err
}
return s3.NormalizeBucketLocation(aws.StringValue(resp.LocationConstraint)), nil
}
// Updates the region for the bucket by reading the region from the
// bucket then updating the session.
func (f *Fs) updateRegionForBucket() error {
region, err := f.getBucketLocation()
if err != nil {
return errors.Wrap(err, "reading bucket location failed")
}
if aws.StringValue(f.c.Config.Endpoint) != "" {
return errors.Errorf("can't set region to %q as endpoint is set", region)
}
if aws.StringValue(f.c.Config.Region) == region {
return errors.Errorf("region is already %q - not updating", region)
}
// Make a new session with the new region
oldRegion := f.opt.Region
f.opt.Region = region
c, ses, err := s3Connection(&f.opt)
if err != nil {
return errors.Wrap(err, "creating new session failed")
}
f.c = c
f.ses = ses
fs.Logf(f, "Switched region to %q from %q", region, oldRegion)
return nil
}
// listFn is called from list to handle an object.
type listFn func(remote string, object *s3.Object, isDirectory bool) error
@@ -1199,7 +979,7 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) error {
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.ListObjects(&req)
return f.shouldRetry(err)
return shouldRetry(err)
})
if err != nil {
if awsErr, ok := err.(awserr.RequestFailure); ok {
@@ -1328,7 +1108,7 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
var resp *s3.ListBucketsOutput
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.ListBuckets(&req)
return f.shouldRetry(err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -1416,7 +1196,7 @@ func (f *Fs) dirExists() (bool, error) {
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.HeadBucket(&req)
return f.shouldRetry(err)
return shouldRetry(err)
})
if err == nil {
return true, nil
@@ -1447,7 +1227,7 @@ func (f *Fs) Mkdir(dir string) error {
}
req := s3.CreateBucketInput{
Bucket: &f.bucket,
ACL: &f.opt.BucketACL,
ACL: &f.opt.ACL,
}
if f.opt.LocationConstraint != "" {
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
@@ -1456,7 +1236,7 @@ func (f *Fs) Mkdir(dir string) error {
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.CreateBucket(&req)
return f.shouldRetry(err)
return shouldRetry(err)
})
if err, ok := err.(awserr.Error); ok {
if err.Code() == "BucketAlreadyOwnedByYou" {
@@ -1466,7 +1246,6 @@ func (f *Fs) Mkdir(dir string) error {
if err == nil {
f.bucketOK = true
f.bucketDeleted = false
fs.Infof(f, "Bucket created with ACL %q", *req.ACL)
}
return err
}
@@ -1485,12 +1264,11 @@ func (f *Fs) Rmdir(dir string) error {
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.DeleteBucket(&req)
return f.shouldRetry(err)
return shouldRetry(err)
})
if err == nil {
f.bucketOK = false
f.bucketDeleted = true
fs.Infof(f, "Bucket deleted")
}
return err
}
@@ -1546,7 +1324,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.c.CopyObject(&req)
return f.shouldRetry(err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -1628,7 +1406,7 @@ func (o *Object) readMetaData() (err error) {
err = o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = o.fs.c.HeadObject(&req)
return o.fs.shouldRetry(err)
return shouldRetry(err)
})
if err != nil {
if awsErr, ok := err.(awserr.RequestFailure); ok {
@@ -1724,7 +1502,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
}
err = o.fs.pacer.Call(func() (bool, error) {
_, err := o.fs.c.CopyObject(&req)
return o.fs.shouldRetry(err)
return shouldRetry(err)
})
return err
}
@@ -1756,7 +1534,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
err = o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = o.fs.c.GetObject(&req)
return o.fs.shouldRetry(err)
return shouldRetry(err)
})
if err, ok := err.(awserr.RequestFailure); ok {
if err.Code() == "InvalidObjectState" {
@@ -1778,46 +1556,38 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
modTime := src.ModTime()
size := src.Size()
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
var uploader *s3manager.Uploader
if multipart {
uploader = s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
u.Concurrency = o.fs.opt.UploadConcurrency
u.LeavePartsOnError = false
u.S3 = o.fs.c
u.PartSize = int64(o.fs.opt.ChunkSize)
uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
u.Concurrency = o.fs.opt.UploadConcurrency
u.LeavePartsOnError = false
u.S3 = o.fs.c
u.PartSize = int64(o.fs.opt.ChunkSize)
if size == -1 {
// Make parts as small as possible while still being able to upload to the
// S3 file size limit. Rounded up to nearest MB.
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
return
}
// Adjust PartSize until the number of parts is small enough.
if size/u.PartSize >= s3manager.MaxUploadParts {
// Calculate partition size rounded up to the nearest MB
u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
}
})
}
if size == -1 {
// Make parts as small as possible while still being able to upload to the
// S3 file size limit. Rounded up to nearest MB.
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
return
}
// Adjust PartSize until the number of parts is small enough.
if size/u.PartSize >= s3manager.MaxUploadParts {
// Calculate partition size rounded up to the nearest MB
u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
}
})
// Set the mtime in the meta data
metadata := map[string]*string{
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
}
// read the md5sum if available for non multpart and if
// disable checksum isn't present.
var md5sum string
if !multipart || !o.fs.opt.DisableChecksum {
if !o.fs.opt.DisableChecksum && size > uploader.PartSize {
hash, err := src.Hash(hash.MD5)
if err == nil && matchMd5.MatchString(hash) {
hashBytes, err := hex.DecodeString(hash)
if err == nil {
md5sum = base64.StdEncoding.EncodeToString(hashBytes)
if multipart {
metadata[metaMD5Hash] = &md5sum
}
metadata[metaMD5Hash] = aws.String(base64.StdEncoding.EncodeToString(hashBytes))
}
}
}
@@ -1826,98 +1596,30 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
mimeType := fs.MimeType(src)
key := o.fs.root + o.remote
if multipart {
req := s3manager.UploadInput{
Bucket: &o.fs.bucket,
ACL: &o.fs.opt.ACL,
Key: &key,
Body: in,
ContentType: &mimeType,
Metadata: metadata,
//ContentLength: &size,
}
if o.fs.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
}
if o.fs.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
}
if o.fs.opt.StorageClass != "" {
req.StorageClass = &o.fs.opt.StorageClass
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
_, err = uploader.Upload(&req)
return o.fs.shouldRetry(err)
})
if err != nil {
return err
}
} else {
req := s3.PutObjectInput{
Bucket: &o.fs.bucket,
ACL: &o.fs.opt.ACL,
Key: &key,
ContentType: &mimeType,
Metadata: metadata,
}
if md5sum != "" {
req.ContentMD5 = &md5sum
}
if o.fs.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
}
if o.fs.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
}
if o.fs.opt.StorageClass != "" {
req.StorageClass = &o.fs.opt.StorageClass
}
// Create the request
putObj, _ := o.fs.c.PutObjectRequest(&req)
// Sign it so we can upload using a presigned request.
//
// Note the SDK doesn't currently support streaming to
// PutObject so we'll use this work-around.
url, headers, err := putObj.PresignRequest(15 * time.Minute)
if err != nil {
return errors.Wrap(err, "s3 upload: sign request")
}
// Set request to nil if empty so as not to make chunked encoding
if size == 0 {
in = nil
}
// create the vanilla http request
httpReq, err := http.NewRequest("PUT", url, in)
if err != nil {
return errors.Wrap(err, "s3 upload: new request")
}
// set the headers we signed and the length
httpReq.Header = headers
httpReq.ContentLength = size
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := o.fs.srv.Do(httpReq)
if err != nil {
return o.fs.shouldRetry(err)
}
body, err := rest.ReadBody(resp)
if err != nil {
return o.fs.shouldRetry(err)
}
if resp.StatusCode >= 200 && resp.StatusCode < 299 {
return false, nil
}
err = errors.Errorf("s3 upload: %s: %s", resp.Status, body)
return fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
})
if err != nil {
return err
}
req := s3manager.UploadInput{
Bucket: &o.fs.bucket,
ACL: &o.fs.opt.ACL,
Key: &key,
Body: in,
ContentType: &mimeType,
Metadata: metadata,
//ContentLength: &size,
}
if o.fs.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
}
if o.fs.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
}
if o.fs.opt.StorageClass != "" {
req.StorageClass = &o.fs.opt.StorageClass
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
_, err = uploader.Upload(&req)
return shouldRetry(err)
})
if err != nil {
return err
}
// Read the metadata from the newly created object
@@ -1935,7 +1637,7 @@ func (o *Object) Remove() error {
}
err := o.fs.pacer.Call(func() (bool, error) {
_, err := o.fs.c.DeleteObject(&req)
return o.fs.shouldRetry(err)
return shouldRetry(err)
})
return err
}

View File

@@ -23,8 +23,4 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View File

@@ -28,7 +28,7 @@ import (
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"github.com/pkg/sftp"
sshagent "github.com/xanzy/ssh-agent"
"github.com/xanzy/ssh-agent"
"golang.org/x/crypto/ssh"
"golang.org/x/time/rate"
)
@@ -66,22 +66,7 @@ func init() {
IsPassword: true,
}, {
Name: "key_file",
Help: "Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.",
}, {
Name: "key_file_pass",
Help: `The passphrase to decrypt the PEM-encoded private key file.
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
in the new OpenSSH format can't be used.`,
IsPassword: true,
}, {
Name: "key_use_agent",
Help: `When set forces the usage of the ssh-agent.
When key-file is also set, the ".pub" file of the specified key-file is read and only the associated key is
requested from the ssh-agent. This allows to avoid ` + "`Too many authentication failures for *username*`" + ` errors
when the ssh-agent contains many keys.`,
Default: false,
Help: "Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.",
}, {
Name: "use_insecure_cipher",
Help: "Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.",
@@ -137,8 +122,6 @@ type Options struct {
Port string `config:"port"`
Pass string `config:"pass"`
KeyFile string `config:"key_file"`
KeyFilePass string `config:"key_file_pass"`
KeyUseAgent bool `config:"key_use_agent"`
UseInsecureCipher bool `config:"use_insecure_cipher"`
DisableHashCheck bool `config:"disable_hashcheck"`
AskPassword bool `config:"ask_password"`
@@ -315,18 +298,6 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
f.poolMu.Unlock()
}
// shellExpand replaces a leading "~" with "${HOME}" and expands all environment
// variables afterwards.
func shellExpand(s string) string {
if s != "" {
if s[0] == '~' {
s = "${HOME}" + s[1:]
}
s = os.ExpandEnv(s)
}
return s
}
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
@@ -354,9 +325,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc")
}
keyFile := shellExpand(opt.KeyFile)
// Add ssh agent-auth if no password or file specified
if (opt.Pass == "" && keyFile == "") || opt.KeyUseAgent {
if opt.Pass == "" && opt.KeyFile == "" {
sshAgentClient, _, err := sshagent.New()
if err != nil {
return nil, errors.Wrap(err, "couldn't connect to ssh-agent")
@@ -365,46 +335,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
return nil, errors.Wrap(err, "couldn't read ssh agent signers")
}
if keyFile != "" {
pubBytes, err := ioutil.ReadFile(keyFile + ".pub")
if err != nil {
return nil, errors.Wrap(err, "failed to read public key file")
}
pub, _, _, _, err := ssh.ParseAuthorizedKey(pubBytes)
if err != nil {
return nil, errors.Wrap(err, "failed to parse public key file")
}
pubM := pub.Marshal()
found := false
for _, s := range signers {
if bytes.Equal(pubM, s.PublicKey().Marshal()) {
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(s))
found = true
break
}
}
if !found {
return nil, errors.New("private key not found in the ssh-agent")
}
} else {
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signers...))
}
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signers...))
}
// Load key file if specified
if keyFile != "" {
key, err := ioutil.ReadFile(keyFile)
if opt.KeyFile != "" {
key, err := ioutil.ReadFile(opt.KeyFile)
if err != nil {
return nil, errors.Wrap(err, "failed to read private key file")
}
clearpass := ""
if opt.KeyFilePass != "" {
clearpass, err = obscure.Reveal(opt.KeyFilePass)
if err != nil {
return nil, err
}
}
signer, err := ssh.ParsePrivateKeyWithPassphrase(key, []byte(clearpass))
signer, err := ssh.ParsePrivateKey(key)
if err != nil {
return nil, errors.Wrap(err, "failed to parse private key file")
}
@@ -565,13 +505,9 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
// If file is a symlink (not a regular file is the best cross platform test we can do), do a stat to
// pick up the size and type of the destination, instead of the size and type of the symlink.
if !info.Mode().IsRegular() {
oldInfo := info
info, err = f.stat(remote)
if err != nil {
if !os.IsNotExist(err) {
fs.Errorf(remote, "stat of non-regular file/dir failed: %v", err)
}
info = oldInfo
return nil, errors.Wrap(err, "stat of non-regular file/dir failed")
}
}
if info.IsDir() {
@@ -658,22 +594,12 @@ func (f *Fs) Mkdir(dir string) error {
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(dir string) error {
// Check to see if directory is empty as some servers will
// delete recursively with RemoveDirectory
entries, err := f.List(dir)
if err != nil {
return errors.Wrap(err, "Rmdir")
}
if len(entries) != 0 {
return fs.ErrorDirectoryNotEmpty
}
// Remove the directory
root := path.Join(f.root, dir)
c, err := f.getSftpConnection()
if err != nil {
return errors.Wrap(err, "Rmdir")
}
err = c.sftpClient.RemoveDirectory(root)
err = c.sftpClient.Remove(root)
f.putSftpConnection(&c, err)
return err
}

View File

@@ -43,20 +43,6 @@ Above this size files will be chunked into a _segments container. The
default for this is 5GB which is its maximum value.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "no_chunk",
Help: `Don't chunk files during streaming upload.
When doing streaming uploads (eg using rcat or mount) setting this
flag will cause the swift backend to not upload chunked files.
This will limit the maximum upload size to 5GB. However non chunked
files are easier to deal with and have an MD5SUM.
Rclone will still chunk files bigger than chunk_size when doing normal
copy operations.`,
Default: false,
Advanced: true,
}}
// Register with Fs
@@ -130,15 +116,6 @@ func init() {
}, {
Name: "auth_token",
Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)",
}, {
Name: "application_credential_id",
Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)",
}, {
Name: "application_credential_name",
Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)",
}, {
Name: "application_credential_secret",
Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)",
}, {
Name: "auth_version",
Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)",
@@ -182,26 +159,22 @@ provider.`,
// Options defines the configuration for this backend
type Options struct {
EnvAuth bool `config:"env_auth"`
User string `config:"user"`
Key string `config:"key"`
Auth string `config:"auth"`
UserID string `config:"user_id"`
Domain string `config:"domain"`
Tenant string `config:"tenant"`
TenantID string `config:"tenant_id"`
TenantDomain string `config:"tenant_domain"`
Region string `config:"region"`
StorageURL string `config:"storage_url"`
AuthToken string `config:"auth_token"`
AuthVersion int `config:"auth_version"`
ApplicationCredentialID string `config:"application_credential_id"`
ApplicationCredentialName string `config:"application_credential_name"`
ApplicationCredentialSecret string `config:"application_credential_secret"`
StoragePolicy string `config:"storage_policy"`
EndpointType string `config:"endpoint_type"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
NoChunk bool `config:"no_chunk"`
EnvAuth bool `config:"env_auth"`
User string `config:"user"`
Key string `config:"key"`
Auth string `config:"auth"`
UserID string `config:"user_id"`
Domain string `config:"domain"`
Tenant string `config:"tenant"`
TenantID string `config:"tenant_id"`
TenantDomain string `config:"tenant_domain"`
Region string `config:"region"`
StorageURL string `config:"storage_url"`
AuthToken string `config:"auth_token"`
AuthVersion int `config:"auth_version"`
StoragePolicy string `config:"storage_policy"`
EndpointType string `config:"endpoint_type"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
}
// Fs represents a remote swift server
@@ -223,13 +196,10 @@ type Fs struct {
//
// Will definitely have info but maybe not meta
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
size int64
lastModified time.Time
contentType string
md5 string
headers swift.Headers // The object headers if known
fs *Fs // what this object is part of
remote string // The remote path
info swift.Object // Info from the swift object if known
headers swift.Headers // The object headers if known
}
// ------------------------------------------------------------
@@ -305,25 +275,22 @@ func parsePath(path string) (container, directory string, err error) {
func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
c := &swift.Connection{
// Keep these in the same order as the Config for ease of checking
UserName: opt.User,
ApiKey: opt.Key,
AuthUrl: opt.Auth,
UserId: opt.UserID,
Domain: opt.Domain,
Tenant: opt.Tenant,
TenantId: opt.TenantID,
TenantDomain: opt.TenantDomain,
Region: opt.Region,
StorageUrl: opt.StorageURL,
AuthToken: opt.AuthToken,
AuthVersion: opt.AuthVersion,
ApplicationCredentialId: opt.ApplicationCredentialID,
ApplicationCredentialName: opt.ApplicationCredentialName,
ApplicationCredentialSecret: opt.ApplicationCredentialSecret,
EndpointType: swift.EndpointType(opt.EndpointType),
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(fs.Config),
UserName: opt.User,
ApiKey: opt.Key,
AuthUrl: opt.Auth,
UserId: opt.UserID,
Domain: opt.Domain,
Tenant: opt.Tenant,
TenantId: opt.TenantID,
TenantDomain: opt.TenantDomain,
Region: opt.Region,
StorageUrl: opt.StorageURL,
AuthToken: opt.AuthToken,
AuthVersion: opt.AuthVersion,
EndpointType: swift.EndpointType(opt.EndpointType),
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(fs.Config),
}
if opt.EnvAuth {
err := c.ApplyEnvironment()
@@ -333,13 +300,11 @@ func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
}
StorageUrl, AuthToken := c.StorageUrl, c.AuthToken // nolint
if !c.Authenticated() {
if (c.ApplicationCredentialId != "" || c.ApplicationCredentialName != "") && c.ApplicationCredentialSecret == "" {
if c.UserName == "" && c.UserId == "" {
return nil, errors.New("user name or user id not found for authentication (and no storage_url+auth_token is provided)")
}
if c.ApiKey == "" {
return nil, errors.New("key not found")
}
if c.UserName == "" && c.UserId == "" {
return nil, errors.New("user name or user id not found for authentication (and no storage_url+auth_token is provided)")
}
if c.ApiKey == "" {
return nil, errors.New("key not found")
}
if c.AuthUrl == "" {
return nil, errors.New("auth not found")
@@ -430,7 +395,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
return f, nil
}
// NewFs constructs an Fs from the path, container:path
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
@@ -467,10 +432,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er
}
if info != nil {
// Set info but not headers
err := o.decodeMetaData(info)
if err != nil {
return nil, err
}
o.info = *info
} else {
err := o.readMetaData() // reads info and headers, returning an error
if err != nil {
@@ -867,7 +829,7 @@ func (o *Object) Hash(t hash.Type) (string, error) {
fs.Debugf(o, "Returning empty Md5sum for swift large object")
return "", nil
}
return strings.ToLower(o.md5), nil
return strings.ToLower(o.info.Hash), nil
}
// hasHeader checks for the header passed in returning false if the
@@ -896,22 +858,7 @@ func (o *Object) isStaticLargeObject() (bool, error) {
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// decodeMetaData sets the metadata in the object from a swift.Object
//
// Sets
// o.lastModified
// o.size
// o.md5
// o.contentType
func (o *Object) decodeMetaData(info *swift.Object) (err error) {
o.lastModified = info.LastModified
o.size = info.Bytes
o.md5 = info.Hash
o.contentType = info.ContentType
return nil
return o.info.Bytes
}
// readMetaData gets the metadata if it hasn't already been fetched
@@ -935,11 +882,8 @@ func (o *Object) readMetaData() (err error) {
}
return err
}
o.info = info
o.headers = h
err = o.decodeMetaData(&info)
if err != nil {
return err
}
return nil
}
@@ -950,17 +894,17 @@ func (o *Object) readMetaData() (err error) {
// LastModified returned in the http headers
func (o *Object) ModTime() time.Time {
if fs.Config.UseServerModTime {
return o.lastModified
return o.info.LastModified
}
err := o.readMetaData()
if err != nil {
fs.Debugf(o, "Failed to read metadata: %s", err)
return o.lastModified
return o.info.LastModified
}
modTime, err := o.headers.ObjectMetadata().GetModTime()
if err != nil {
// fs.Logf(o, "Failed to read mtime from object: %v", err)
return o.lastModified
return o.info.LastModified
}
return modTime
}
@@ -994,7 +938,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
// It compares the Content-Type to directoryMarkerContentType - that
// makes it a directory marker which is not storable.
func (o *Object) Storable() bool {
return o.contentType != directoryMarkerContentType
return o.info.ContentType != directoryMarkerContentType
}
// Open an object for read
@@ -1161,31 +1105,20 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
contentType := fs.MimeType(src)
headers := m.ObjectHeaders()
uniquePrefix := ""
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)
if err != nil {
return err
}
o.headers = nil // wipe old metadata
} else {
if size >= 0 {
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length if we know it
}
var rxHeaders swift.Headers
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length as we know it
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
_, err = o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
return shouldRetry(err)
})
if err != nil {
return err
}
// set Metadata since ObjectPut checked the hash and length so we know the
// object has been safely uploaded
o.lastModified = modTime
o.size = size
o.md5 = rxHeaders["ETag"]
o.contentType = contentType
o.headers = headers
}
// If file was a dynamic large object then remove old/all segments
@@ -1196,7 +1129,8 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
}
}
// Read the metadata from the newly created object if necessary
// Read the metadata from the newly created object
o.headers = nil // wipe old metadata
return o.readMetaData()
}
@@ -1226,7 +1160,7 @@ func (o *Object) Remove() error {
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType() string {
return o.contentType
return o.info.ContentType
}
// Check the interfaces are satisfied

View File

@@ -177,8 +177,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
// At least one value will be written to the channel,
// specifying the initial value and updated values might
// follow. A 0 Duration should pause the polling.
// The ChangeNotify implementation must empty the channel
// regularly. When the channel gets closed, the implementation
// The ChangeNotify implemantion must empty the channel
// regulary. When the channel gets closed, the implemantion
// should stop polling and release resources.
func (f *Fs) ChangeNotify(fn func(string, fs.EntryType), ch <-chan time.Duration) {
var remoteChans []chan time.Duration
@@ -376,11 +376,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}).Fill(f)
features = features.Mask(f.wr) // mask the features just on the writable fs
// Really need the union of all remotes for these, so
// re-instate and calculate separately.
features.ChangeNotify = f.ChangeNotify
features.DirCacheFlush = f.DirCacheFlush
// FIXME maybe should be masking the bools here?
// Clear ChangeNotify and DirCacheFlush if all are nil

View File

@@ -6,11 +6,7 @@ import (
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
)
const (
@@ -66,13 +62,11 @@ type Response struct {
// Note that status collects all the status values for which we just
// check the first is OK.
type Prop struct {
Status []string `xml:"DAV: status"`
Name string `xml:"DAV: prop>displayname,omitempty"`
Type *xml.Name `xml:"DAV: prop>resourcetype>collection,omitempty"`
IsCollection *string `xml:"DAV: prop>iscollection,omitempty"` // this is a Microsoft extension see #2716
Size int64 `xml:"DAV: prop>getcontentlength,omitempty"`
Modified Time `xml:"DAV: prop>getlastmodified,omitempty"`
Checksums []string `xml:"prop>checksums>checksum,omitempty"`
Status []string `xml:"DAV: status"`
Name string `xml:"DAV: prop>displayname,omitempty"`
Type *xml.Name `xml:"DAV: prop>resourcetype>collection,omitempty"`
Size int64 `xml:"DAV: prop>getcontentlength,omitempty"`
Modified Time `xml:"DAV: prop>getlastmodified,omitempty"`
}
// Parse a status of the form "HTTP/1.1 200 OK" or "HTTP/1.1 200"
@@ -98,33 +92,13 @@ func (p *Prop) StatusOK() bool {
return false
}
// Hashes returns a map of all checksums - may be nil
func (p *Prop) Hashes() (hashes map[hash.Type]string) {
if len(p.Checksums) == 0 {
return nil
}
hashes = make(map[hash.Type]string)
for _, checksums := range p.Checksums {
checksums = strings.ToLower(checksums)
for _, checksum := range strings.Split(checksums, " ") {
switch {
case strings.HasPrefix(checksum, "sha1:"):
hashes[hash.SHA1] = checksum[5:]
case strings.HasPrefix(checksum, "md5:"):
hashes[hash.MD5] = checksum[4:]
}
}
}
return hashes
}
// PropValue is a tagged name and value
type PropValue struct {
XMLName xml.Name `xml:""`
Value string `xml:",chardata"`
}
// Error is used to describe webdav errors
// Error is used to desribe webdav errors
//
// <d:error xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns">
// <s:exception>Sabre\DAV\Exception\NotFound</s:exception>
@@ -137,7 +111,7 @@ type Error struct {
StatusCode int
}
// Error returns a string for the error and satisfies the error interface
// Error returns a string for the error and statistifes the error interface
func (e *Error) Error() string {
var out []string
if e.Message != "" {
@@ -174,8 +148,6 @@ var timeFormats = []string{
time.RFC3339, // Wed, 31 Oct 2018 13:57:11 CET (as used by komfortcloud.de)
}
var oneTimeError sync.Once
// UnmarshalXML turns XML into a Time
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var v string
@@ -199,33 +171,5 @@ func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
break
}
}
if err != nil {
oneTimeError.Do(func() {
fs.Errorf(nil, "Failed to parse time %q - using the epoch", v)
})
// Return the epoch instead
*t = Time(time.Unix(0, 0))
// ignore error
err = nil
}
return err
}
// Quota is used to read the bytes used and available
//
// <d:multistatus xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns" xmlns:oc="http://owncloud.org/ns" xmlns:nc="http://nextcloud.org/ns">
// <d:response>
// <d:href>/remote.php/webdav/</d:href>
// <d:propstat>
// <d:prop>
// <d:quota-available-bytes>-3</d:quota-available-bytes>
// <d:quota-used-bytes>376461895</d:quota-used-bytes>
// </d:prop>
// <d:status>HTTP/1.1 200 OK</d:status>
// </d:propstat>
// </d:response>
// </d:multistatus>
type Quota struct {
Available int64 `xml:"DAV: response>propstat>prop>quota-available-bytes"`
Used int64 `xml:"DAV: response>propstat>prop>quota-used-bytes"`
}

View File

@@ -102,7 +102,7 @@ func (ca *CookieAuth) Cookies() (*CookieResponse, error) {
func (ca *CookieAuth) getSPCookie(conf *SuccessResponse) (*CookieResponse, error) {
spRoot, err := url.Parse(ca.endpoint)
if err != nil {
return nil, errors.Wrap(err, "Error while constructing endpoint URL")
return nil, errors.Wrap(err, "Error while contructing endpoint URL")
}
u, err := url.Parse("https://" + spRoot.Host + "/_forms/default.aspx?wa=wsignin1.0")
@@ -121,7 +121,7 @@ func (ca *CookieAuth) getSPCookie(conf *SuccessResponse) (*CookieResponse, error
Jar: jar,
}
// Send the previously acquired Token as a Post parameter
// Send the previously aquired Token as a Post parameter
if _, err = client.Post(u.String(), "text/xml", strings.NewReader(conf.Succ.Token)); err != nil {
return nil, errors.Wrap(err, "Error while grabbing cookies from endpoint: %v")
}

View File

@@ -2,10 +2,13 @@ package odrvcookie
import (
"time"
"github.com/ncw/rclone/lib/rest"
)
// CookieRenew holds information for the renew
type CookieRenew struct {
srv *rest.Client
timer *time.Ticker
renewFn func()
}

View File

@@ -2,13 +2,23 @@
// object storage system.
package webdav
// Owncloud: Getting Oc-Checksum:
// SHA1:f572d396fae9206628714fb2ce00f72e94f2258f on HEAD but not on
// nextcloud?
// docs for file webdav
// https://docs.nextcloud.com/server/12/developer_manual/client_apis/WebDAV/index.html
// indicates checksums can be set as metadata here
// https://github.com/nextcloud/server/issues/6129
// owncloud seems to have checksums as metadata though - can read them
// SetModTime might be possible
// https://stackoverflow.com/questions/3579608/webdav-can-a-client-modify-the-mtime-of-a-file
// ...support for a PROPSET to lastmodified (mind the missing get) which does the utime() call might be an option.
// For example the ownCloud WebDAV server does it that way.
import (
"bytes"
"encoding/xml"
"fmt"
"io"
@@ -106,7 +116,6 @@ type Fs struct {
canStream bool // set if can stream
useOCMtime bool // set if can use X-OC-Mtime
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
hasChecksums bool // set if can use owncloud style checksums
}
// Object describes a webdav object
@@ -118,8 +127,7 @@ type Object struct {
hasMetaData bool // whether info below has been set
size int64 // size of the object
modTime time.Time // modification time of the object
sha1 string // SHA-1 of the object content if known
md5 string // MD5 of the object content if known
sha1 string // SHA-1 of the object content
}
// ------------------------------------------------------------
@@ -172,18 +180,6 @@ func itemIsDir(item *api.Response) bool {
}
fs.Debugf(nil, "Unknown resource type %q/%q on %q", t.Space, t.Local, item.Props.Name)
}
// the iscollection prop is a Microsoft extension, but if present it is a reliable indicator
// if the above check failed - see #2716. This can be an integer or a boolean - see #2964
if t := item.Props.IsCollection; t != nil {
switch x := strings.ToLower(*t); x {
case "0", "false":
return false
case "1", "true":
return true
default:
fs.Debugf(nil, "Unknown value %q for IsCollection", x)
}
}
return false
}
@@ -198,9 +194,6 @@ func (f *Fs) readMetaDataForPath(path string, depth string) (info *api.Prop, err
},
NoRedirect: true,
}
if f.hasChecksums {
opts.Body = bytes.NewBuffer(owncloudProps)
}
var result api.Multistatus
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
@@ -256,7 +249,7 @@ func errorHandler(resp *http.Response) error {
return errResponse
}
// addSlash makes sure s is terminated with a / if non empty
// addShlash makes sure s is terminated with a / if non empty
func addSlash(s string) string {
if s != "" && !strings.HasSuffix(s, "/") {
s += "/"
@@ -364,11 +357,9 @@ func (f *Fs) setQuirks(vendor string) error {
f.canStream = true
f.precision = time.Second
f.useOCMtime = true
f.hasChecksums = true
case "nextcloud":
f.precision = time.Second
f.useOCMtime = true
f.hasChecksums = true
case "sharepoint":
// To mount sharepoint, two Cookies are required
// They have to be set instead of BasicAuth
@@ -435,22 +426,6 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// Read the normal props, plus the checksums
//
// <oc:checksums><oc:checksum>SHA1:f572d396fae9206628714fb2ce00f72e94f2258f MD5:b1946ac92492d2347c6235b4d2611184 ADLER32:084b021f</oc:checksum></oc:checksums>
var owncloudProps = []byte(`<?xml version="1.0"?>
<d:propfind xmlns:d="DAV:" xmlns:oc="http://owncloud.org/ns" xmlns:nc="http://nextcloud.org/ns">
<d:prop>
<d:displayname />
<d:getlastmodified />
<d:getcontentlength />
<d:resourcetype />
<d:getcontenttype />
<oc:checksums />
</d:prop>
</d:propfind>
`)
// list the objects into the function supplied
//
// If directories is set it only sends directories
@@ -470,9 +445,6 @@ func (f *Fs) listAll(dir string, directoriesOnly bool, filesOnly bool, depth str
"Depth": depth,
},
}
if f.hasChecksums {
opts.Body = bytes.NewBuffer(owncloudProps)
}
var result api.Multistatus
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
@@ -629,9 +601,10 @@ func (f *Fs) mkParentDir(dirPath string) error {
return f.mkdir(parent)
}
// low level mkdir, only makes the directory, doesn't attempt to create parents
func (f *Fs) _mkdir(dirPath string) error {
// We assume the root is already created
// mkdir makes the directory and parents using native paths
func (f *Fs) mkdir(dirPath string) error {
// defer log.Trace(dirPath, "")("")
// We assume the root is already ceated
if dirPath == "" {
return nil
}
@@ -644,27 +617,20 @@ func (f *Fs) _mkdir(dirPath string) error {
Path: dirPath,
NoResponse: true,
}
return f.pacer.Call(func() (bool, error) {
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(&opts)
return shouldRetry(resp, err)
})
}
// mkdir makes the directory and parents using native paths
func (f *Fs) mkdir(dirPath string) error {
// defer log.Trace(dirPath, "")("")
err := f._mkdir(dirPath)
if apiErr, ok := err.(*api.Error); ok {
// already exists
// owncloud returns 423/StatusLocked if the create is already in progress
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable || apiErr.StatusCode == http.StatusLocked {
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable {
return nil
}
// parent does not exist
// parent does not exists
if apiErr.StatusCode == http.StatusConflict {
err = f.mkParentDir(dirPath)
if err == nil {
err = f._mkdir(dirPath)
err = f.mkdir(dirPath)
}
}
}
@@ -876,52 +842,9 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
if f.hasChecksums {
return hash.NewHashSet(hash.MD5, hash.SHA1)
}
return hash.Set(hash.None)
}
// About gets quota information
func (f *Fs) About() (*fs.Usage, error) {
opts := rest.Opts{
Method: "PROPFIND",
Path: "",
ExtraHeaders: map[string]string{
"Depth": "0",
},
}
opts.Body = bytes.NewBuffer([]byte(`<?xml version="1.0" ?>
<D:propfind xmlns:D="DAV:">
<D:prop>
<D:quota-available-bytes/>
<D:quota-used-bytes/>
</D:prop>
</D:propfind>
`))
var q = api.Quota{
Available: -1,
Used: -1,
}
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(&opts, nil, &q)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "about call failed")
}
usage := &fs.Usage{}
if q.Available >= 0 && q.Used >= 0 {
usage.Total = fs.NewUsageValue(q.Available + q.Used)
}
if q.Used >= 0 {
usage.Used = fs.NewUsageValue(q.Used)
}
return usage, nil
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -942,17 +865,12 @@ func (o *Object) Remote() string {
return o.remote
}
// Hash returns the SHA1 or MD5 of an object returning a lowercase hex string
// Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(t hash.Type) (string, error) {
if o.fs.hasChecksums {
switch t {
case hash.SHA1:
return o.sha1, nil
case hash.MD5:
return o.md5, nil
}
if t != hash.SHA1 {
return "", hash.ErrUnsupported
}
return "", hash.ErrUnsupported
return o.sha1, nil
}
// Size returns the size of an object in bytes
@@ -970,11 +888,6 @@ func (o *Object) setMetaData(info *api.Prop) (err error) {
o.hasMetaData = true
o.size = info.Size
o.modTime = time.Time(info.Modified)
if o.fs.hasChecksums {
hashes := info.Hashes()
o.sha1 = hashes[hash.SHA1]
o.md5 = hashes[hash.MD5]
}
return nil
}
@@ -1054,21 +967,9 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
ContentType: fs.MimeType(src),
}
if o.fs.useOCMtime || o.fs.hasChecksums {
opts.ExtraHeaders = map[string]string{}
if o.fs.useOCMtime {
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime().UnixNano())/1E9)
}
if o.fs.hasChecksums {
// Set an upload checksum - prefer SHA1
//
// This is used as an upload integrity test. If we set
// only SHA1 here, owncloud will calculate the MD5 too.
if sha1, _ := src.Hash(hash.SHA1); sha1 != "" {
opts.ExtraHeaders["OC-Checksum"] = "SHA1:" + sha1
} else if md5, _ := src.Hash(hash.MD5); md5 != "" {
opts.ExtraHeaders["OC-Checksum"] = "MD5:" + md5
}
if o.fs.useOCMtime {
opts.ExtraHeaders = map[string]string{
"X-OC-Mtime": fmt.Sprintf("%f", float64(src.ModTime().UnixNano())/1E9),
}
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
@@ -1112,6 +1013,5 @@ var (
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

View File

@@ -56,7 +56,7 @@ type AsyncInfo struct {
Templated bool `json:"templated"`
}
// AsyncStatus is returned when requesting the status of an async operations. Possible values in-progress, success, failure
// AsyncStatus is returned when requesting the status of an async operations. Possble values in-progress, success, failure
type AsyncStatus struct {
Status string `json:"status"`
}

View File

@@ -307,7 +307,7 @@ func (f *Fs) itemToDirEntry(remote string, object *api.ResourceInfoResponse) (fs
if err != nil {
return nil, errors.Wrap(err, "error parsing time in directory item")
}
d := fs.NewDir(remote, t).SetSize(object.Size)
d := fs.NewDir(remote, t).SetSize(int64(object.Size))
return d, nil
case "file":
o, err := f.newObjectWithInfo(remote, object)
@@ -634,7 +634,7 @@ func (f *Fs) Purge() error {
return f.purgeCheck("", false)
}
// copyOrMoves copies or moves directories or files depending on the method parameter
// copyOrMoves copys or moves directories or files depending on the mthod parameter
func (f *Fs) copyOrMove(method, src, dst string, overwrite bool) (err error) {
opts := rest.Opts{
Method: "POST",
@@ -1107,7 +1107,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
return err
}
//if file uploaded successfully then return metadata
//if file uploaded sucessfully then return metadata
o.modTime = modTime
o.md5sum = "" // according to unit tests after put the md5 is empty.
o.size = int64(in1.BytesRead()) // better solution o.readMetaData() ?

View File

@@ -1,5 +0,0 @@
#!/bin/bash
set -e
docker build -t rclone/xgo-cgofuse https://github.com/billziss-gh/cgofuse.git
docker images
docker push rclone/xgo-cgofuse

View File

@@ -8,8 +8,6 @@
package main
import (
"archive/tar"
"compress/gzip"
"encoding/json"
"flag"
"fmt"
@@ -21,7 +19,6 @@ import (
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"time"
@@ -34,13 +31,7 @@ var (
extract = flag.String("extract", "", "Extract the named executable from the .tar.gz and install into bindir.")
bindir = flag.String("bindir", defaultBinDir(), "Directory to install files downloaded with -extract.")
// Globals
matchProject = regexp.MustCompile(`^([\w-]+)/([\w-]+)$`)
osAliases = map[string][]string{
"darwin": []string{"macos", "osx"},
}
archAliases = map[string][]string{
"amd64": []string{"x86_64"},
}
matchProject = regexp.MustCompile(`^(\w+)/(\w+)$`)
)
// A github release
@@ -122,41 +113,25 @@ func writable(path string) bool {
// Directory to install releases in by default
//
// Find writable directories on $PATH. Use $GOPATH/bin if that is on
// the path and writable or use the first writable directory which is
// in $HOME or failing that the first writable directory.
// Find writable directories on $PATH. Use the first writable
// directory which is in $HOME or failing that the first writable
// directory.
//
// Returns "" if none of the above were found
func defaultBinDir() string {
home := os.Getenv("HOME")
var (
bin string
homeBin string
goHomeBin string
gopath = os.Getenv("GOPATH")
)
var binDir string
for _, dir := range strings.Split(os.Getenv("PATH"), ":") {
if writable(dir) {
if strings.HasPrefix(dir, home) {
if homeBin != "" {
homeBin = dir
}
if gopath != "" && strings.HasPrefix(dir, gopath) && goHomeBin == "" {
goHomeBin = dir
}
return dir
}
if bin == "" {
bin = dir
if binDir != "" {
binDir = dir
}
}
}
if goHomeBin != "" {
return goHomeBin
}
if homeBin != "" {
return homeBin
}
return bin
return binDir
}
// read the body or an error message
@@ -200,8 +175,7 @@ func getAsset(project string, matchName *regexp.Regexp) (string, string) {
}
for _, asset := range release.Assets {
//log.Printf("Finding %s", asset.Name)
if matchName.MatchString(asset.Name) && isOurOsArch(asset.Name) {
if matchName.MatchString(asset.Name) {
return asset.BrowserDownloadURL, asset.Name
}
}
@@ -209,22 +183,6 @@ func getAsset(project string, matchName *regexp.Regexp) (string, string) {
return "", ""
}
// isOurOsArch returns true if s contains our OS and our Arch
func isOurOsArch(s string) bool {
s = strings.ToLower(s)
check := func(base string, aliases map[string][]string) bool {
names := []string{base}
names = append(names, aliases[base]...)
for _, name := range names {
if strings.Contains(s, name) {
return true
}
}
return false
}
return check(runtime.GOARCH, archAliases) && check(runtime.GOOS, osAliases)
}
// get a file for download
func getFile(url, fileName string) {
log.Printf("Downloading %q from %q", fileName, url)
@@ -271,66 +229,6 @@ func run(args ...string) {
}
}
// Untars fileName from srcFile
func untar(srcFile, fileName, extractDir string) {
f, err := os.Open(srcFile)
if err != nil {
log.Fatalf("Couldn't open tar: %v", err)
}
defer func() {
err := f.Close()
if err != nil {
log.Fatalf("Couldn't close tar: %v", err)
}
}()
var in io.Reader = f
srcExt := filepath.Ext(srcFile)
if srcExt == ".gz" || srcExt == ".tgz" {
gzf, err := gzip.NewReader(f)
if err != nil {
log.Fatalf("Couldn't open gzip: %v", err)
}
in = gzf
}
tarReader := tar.NewReader(in)
for {
header, err := tarReader.Next()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("Trouble reading tar file: %v", err)
}
name := header.Name
switch header.Typeflag {
case tar.TypeReg:
baseName := filepath.Base(name)
if baseName == fileName {
outPath := filepath.Join(extractDir, fileName)
out, err := os.OpenFile(outPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0777)
if err != nil {
log.Fatalf("Couldn't open output file: %v", err)
}
defer func() {
err := out.Close()
if err != nil {
log.Fatalf("Couldn't close output: %v", err)
}
}()
n, err := io.Copy(out, tarReader)
if err != nil {
log.Fatalf("Couldn't write output file: %v", err)
}
log.Printf("Wrote %s (%d bytes) as %q", fileName, n, outPath)
}
}
}
}
func main() {
flag.Parse()
args := flag.Args()
@@ -359,6 +257,8 @@ func main() {
log.Fatalf("Need to set -bindir")
}
log.Printf("Unpacking %s from %s and installing into %s", *extract, fileName, *bindir)
untar(fileName, *extract, *bindir+"/")
run("tar", "xf", fileName, *extract)
run("chmod", "a+x", *extract)
run("mv", "-f", *extract, *bindir+"/")
}
}

View File

@@ -3,7 +3,7 @@
version="$1"
if [ "$version" = "" ]; then
echo "Syntax: $0 <version, eg v1.42> [delete]"
echo "Syntax: $0 <version> [delete]"
exit 1
fi
dry_run="--dry-run"
@@ -14,4 +14,4 @@ else
echo "Use '$0 $version delete' to actually delete files"
fi
rclone ${dry_run} -P --fast-list --checkers 16 --transfers 16 delete --include "**${version}**" memstore:beta-rclone-org
rclone ${dry_run} --fast-list -P --checkers 16 --transfers 16 delete --include "**/${version}**" memstore:beta-rclone-org

View File

@@ -29,7 +29,7 @@ github-release release \
--name "rclone" \
--description "Rclone - rsync for cloud storage. Sync files to and from many cloud storage providers."
for build in `ls build | grep -v current | grep -v testbuilds`; do
for build in `ls build | grep -v current`; do
echo "Uploading ${build}"
base="${build%.*}"
parts=(${base//-/ })

View File

@@ -51,7 +51,7 @@ var (
errorCommandNotFound = errors.New("command not found")
errorUncategorized = errors.New("uncategorized error")
errorNotEnoughArguments = errors.New("not enough arguments")
errorTooManyArguments = errors.New("too many arguments")
errorTooManyArguents = errors.New("too many arguments")
)
const (
@@ -294,12 +294,14 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
func CheckArgs(MinArgs, MaxArgs int, cmd *cobra.Command, args []string) {
if len(args) < MinArgs {
_ = cmd.Usage()
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments minimum: you provided %d non flag arguments: %q\n", cmd.Name(), MinArgs, len(args), args)
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments minimum\n", cmd.Name(), MinArgs)
// os.Exit(1)
resolveExitCode(errorNotEnoughArguments)
} else if len(args) > MaxArgs {
_ = cmd.Usage()
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments maximum: you provided %d non flag arguments: %q\n", cmd.Name(), MaxArgs, len(args), args)
resolveExitCode(errorTooManyArguments)
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments maximum\n", cmd.Name(), MaxArgs)
// os.Exit(1)
resolveExitCode(errorTooManyArguents)
}
}
@@ -456,7 +458,7 @@ func AddBackendFlags() {
help = help[:nl]
}
help = strings.TrimSpace(help)
flag := pflag.CommandLine.VarPF(opt, name, opt.ShortOpt, help)
flag := pflag.CommandLine.VarPF(opt, name, string(opt.ShortOpt), help)
if _, isBool := opt.Default.(bool); isBool {
flag.NoOptDefVal = "true"
}

View File

@@ -93,15 +93,6 @@ For example to make a swift remote of name myremote using auto config
you would do:
rclone config create myremote swift env_auth true
Note that if the config process would normally ask a question the
default is taken. Each time that happens rclone will print a message
saying how to affect the value taken.
So for example if you wanted to configure a Google Drive remote but
using remote authorization you would do this:
rclone config create mydrive drive config_is_local false
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(2, 256, command, args)
@@ -128,11 +119,6 @@ in pairs of <key> <value>.
For example to update the env_auth field of a remote of name myremote you would do:
rclone config update myremote swift env_auth true
If the remote uses oauth the token will be updated, if you don't
require this add an extra parameter thus:
rclone config update myremote swift env_auth true config_refresh_token false
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(3, 256, command, args)

View File

@@ -51,17 +51,6 @@ written a trailing / - meaning "copy the contents of this directory".
This applies to all commands and whether you are talking about the
source or destination.
See the [--no-traverse](/docs/#no-traverse) option for controlling
whether rclone lists the destination directory or not. Supplying this
option when copying a small number of files into a large destination
can speed transfers up greatly.
For example, if you have many files in /path/to/src but only a few of
them change every day, you can to copy all the files which have
changed recently very efficiently like this:
rclone copy --max-age 24h --no-traverse /path/to/src remote:
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics
`,
Run: func(command *cobra.Command, args []string) {

View File

@@ -32,47 +32,8 @@ documentation, changelog and configuration walkthroughs.
fs.Debugf("rclone", "Version %q finishing with parameters %q", fs.Version, os.Args)
atexit.Run()
},
BashCompletionFunction: bashCompletionFunc,
}
const (
bashCompletionFunc = `
__rclone_custom_func() {
if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
local cur cword prev words
if declare -F _init_completion > /dev/null; then
_init_completion -n : || return
else
__rclone_init_completion -n : || return
fi
if [[ $cur =~ ^[[:alnum:]]*$ ]]; then
local remote
while IFS= read -r remote; do
[[ $remote != $cur* ]] || COMPREPLY+=("$remote")
done < <(command rclone listremotes)
if [[ ${COMPREPLY[@]} ]]; then
local paths=("$cur"*)
[[ ! -f ${paths[0]} ]] || COMPREPLY+=("${paths[@]}")
fi
elif [[ $cur =~ ^[[:alnum:]]+: ]]; then
local path=${cur#*:}
if [[ $path == */* ]]; then
local prefix=${path%/*}
else
local prefix=
fi
local line
while IFS= read -r line; do
local reply=${prefix:+$prefix/}$line
[[ $reply != $path* ]] || COMPREPLY+=("$reply")
done < <(rclone lsf "${cur%%:*}:$prefix" 2>/dev/null)
fi
[[ ! ${COMPREPLY[@]} ]] || compopt -o nospace
fi
}
`
)
// root help command
var helpCommand = &cobra.Command{
Use: "help",

View File

@@ -21,22 +21,11 @@ import (
"github.com/spf13/cobra"
)
type position int
const (
positionMiddle position = 1 << iota
positionLeft
positionRight
positionNone position = 0
positionAll position = positionRight<<1 - 1
)
var (
checkNormalization bool
checkControl bool
checkLength bool
checkStreaming bool
positionList = []position{positionMiddle, positionLeft, positionRight}
)
func init() {
@@ -70,7 +59,7 @@ a bit of go code for each one.
type results struct {
f fs.Fs
mu sync.Mutex
stringNeedsEscaping map[string]position
charNeedsEscaping map[rune]bool
maxFileLength int
canWriteUnnormalized bool
canReadUnnormalized bool
@@ -80,8 +69,8 @@ type results struct {
func newResults(f fs.Fs) *results {
return &results{
f: f,
stringNeedsEscaping: make(map[string]position),
f: f,
charNeedsEscaping: make(map[rune]bool),
}
}
@@ -90,13 +79,13 @@ func (r *results) Print() {
fmt.Printf("// %s\n", r.f.Name())
if checkControl {
escape := []string{}
for c, needsEscape := range r.stringNeedsEscaping {
if needsEscape != positionNone {
for c, needsEscape := range r.charNeedsEscaping {
if needsEscape {
escape = append(escape, fmt.Sprintf("0x%02X", c))
}
}
sort.Strings(escape)
fmt.Printf("stringNeedsEscaping = []byte{\n")
fmt.Printf("charNeedsEscaping = []byte{\n")
fmt.Printf("\t%s\n", strings.Join(escape, ", "))
fmt.Printf("}\n")
}
@@ -141,45 +130,20 @@ func (r *results) checkUTF8Normalization() {
}
}
func (r *results) checkStringPositions(s string) {
fs.Infof(r.f, "Writing position file 0x%0X", s)
positionError := positionNone
for _, pos := range positionList {
path := ""
switch pos {
case positionMiddle:
path = fmt.Sprintf("position-middle-%0X-%s-", s, s)
case positionLeft:
path = fmt.Sprintf("%s-position-left-%0X", s, s)
case positionRight:
path = fmt.Sprintf("position-right-%0X-%s", s, s)
default:
panic("invalid position: " + pos.String())
}
_, writeErr := r.writeFile(path)
if writeErr != nil {
fs.Infof(r.f, "Writing %s position file 0x%0X Error: %s", pos.String(), s, writeErr)
} else {
fs.Infof(r.f, "Writing %s position file 0x%0X OK", pos.String(), s)
}
obj, getErr := r.f.NewObject(path)
if getErr != nil {
fs.Infof(r.f, "Getting %s position file 0x%0X Error: %s", pos.String(), s, getErr)
} else {
if obj.Size() != 50 {
fs.Infof(r.f, "Getting %s position file 0x%0X Invalid Size: %d", pos.String(), s, obj.Size())
} else {
fs.Infof(r.f, "Getting %s position file 0x%0X OK", pos.String(), s)
}
}
if writeErr != nil || getErr != nil {
positionError += pos
}
// check we can write file with the rune passed in
func (r *results) checkChar(c rune) {
fs.Infof(r.f, "Writing file 0x%02X", c)
path := fmt.Sprintf("0x%02X-%c-", c, c)
_, err := r.writeFile(path)
escape := false
if err != nil {
fs.Infof(r.f, "Couldn't write file 0x%02X", c)
escape = true
} else {
fs.Infof(r.f, "OK writing file 0x%02X", c)
}
r.mu.Lock()
r.stringNeedsEscaping[s] = positionError
r.charNeedsEscaping[c] = escape
r.mu.Unlock()
}
@@ -193,28 +157,19 @@ func (r *results) checkControls() {
}
var wg sync.WaitGroup
for i := rune(0); i < 128; i++ {
s := string(i)
if i == 0 || i == '/' {
// We're not even going to check NULL or /
r.stringNeedsEscaping[s] = positionAll
r.charNeedsEscaping[i] = true
continue
}
wg.Add(1)
go func(s string) {
c := i
go func() {
defer wg.Done()
token := <-tokens
r.checkStringPositions(s)
r.checkChar(c)
tokens <- token
}(s)
}
for _, s := range []string{"", "\xBF", "\xFE"} {
wg.Add(1)
go func(s string) {
defer wg.Done()
token := <-tokens
r.checkStringPositions(s)
tokens <- token
}(s)
}()
}
wg.Wait()
fs.Infof(r.f, "Done trying to create control character file names")
@@ -313,35 +268,3 @@ func readInfo(f fs.Fs) error {
r.Print()
return nil
}
func (e position) String() string {
switch e {
case positionNone:
return "none"
case positionAll:
return "all"
}
var buf bytes.Buffer
if e&positionMiddle != 0 {
buf.WriteString("middle")
e &= ^positionMiddle
}
if e&positionLeft != 0 {
if buf.Len() != 0 {
buf.WriteRune(',')
}
buf.WriteString("left")
e &= ^positionLeft
}
if e&positionRight != 0 {
if buf.Len() != 0 {
buf.WriteRune(',')
}
buf.WriteString("right")
e &= ^positionRight
}
if e != positionNone {
panic("invalid position")
}
return buf.String()
}

View File

@@ -1,40 +0,0 @@
set -euo pipefail
for f in info-*.log; do
for pos in middle left right; do
egrep -oe " Writing $pos position file [^ ]* \w+" $f | sort | cut -d' ' -f 7 > $f.write_$pos
egrep -oe " Getting $pos position file [^ ]* \w+" $f | sort | cut -d' ' -f 7 > $f.get_$pos
done
{
echo "${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}"
echo "Write\tWrite\tWrite\tGet\tGet\tGet"
echo "Mid\tLeft\tRight\tMid\tLeft\tRight"
paste $f.write_{middle,left,right} $f.get_{middle,left,right}
} > $f.csv
done
for f in info-*.list; do
for pos in middle left right; do
cat $f | perl -lne 'print $1 if /^\s+[0-9]+\s+(.*)/' | grep -a "position-$pos-" | sort > $f.$pos
done
{
echo "${${f%.list}#info-}\t${${f%.list}#info-}\t${${f%.list}#info-}"
echo "List\tList\tList"
echo "Mid\tLeft\tRight"
for e in 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F BF EFBCBC FE; do
echo -n $(perl -lne 'print "'$e'-$1" if /^position-middle-'$e'-(.*)-/' $f.middle | tr -d "\t\r" | grep -a . || echo Miss)
echo -n "\t"
echo -n $(perl -lne 'print "'$e'-$1" if /^(.*)-position-left-'$e'/' $f.left | tr -d "\t\r" | grep -a . || echo Miss)
echo -n "\t"
echo $(perl -lne 'print "'$e'-$1" if /^position-right-'$e'-(.*)/' $f.right | tr -d "\t\r" | grep -a . || echo Miss)
# echo -n $(grep -a "position-middle-$e-" $f.middle | tr -d "\t\r" || echo Miss)"\t"
# echo -n $(grep -a "position-left-$e" $f.left | tr -d "\t\r" || echo Miss)"\t"
# echo $(grep -a "position-right-$e-" $f.right | tr -d "\t\r" || echo Miss)
done
} > $f.csv
done
for f in info-*.list; do
paste ${f%.list}.log.csv $f.csv > ${f%.list}.full.csv
done
paste *.full.csv > info-complete.csv

View File

@@ -1,3 +0,0 @@
rclone.exe purge info
rclone.exe info -vv info > info-LocalWindows.log 2>&1
rclone.exe ls -vv info > info-LocalWindows.list 2>&1

View File

@@ -1,43 +0,0 @@
#!/usr/bin/env zsh
#
# example usage:
# $GOPATH/src/github.com/ncw/rclone/cmd/info/test.sh --list | \
# parallel -P20 $GOPATH/src/github.com/ncw/rclone/cmd/info/test.sh
export PATH=$GOPATH/src/github.com/ncw/rclone:$PATH
typeset -A allRemotes
allRemotes=(
TestAmazonCloudDrive '--low-level-retries=2 --checkers=5'
TestB2 ''
TestBox ''
TestDrive '--tpslimit=5'
TestCrypt ''
TestDropbox '--checkers=1'
TestJottacloud ''
TestMega ''
TestOneDrive ''
TestOpenDrive '--low-level-retries=2 --checkers=5'
TestPcloud '--low-level-retries=2 --timeout=15s'
TestS3 ''
Local ''
)
set -euo pipefail
if [[ $# -eq 0 ]]; then
set -- ${(k)allRemotes[@]}
elif [[ $1 = --list ]]; then
printf '%s\n' ${(k)allRemotes[@]}
exit 0
fi
for remote; do
dir=$remote:infotest
if [[ $remote = Local ]]; then
dir=infotest
fi
rclone purge $dir || :
rclone info -vv $dir ${=allRemotes[$remote]} &> info-$remote.log
rclone ls -vv $dir &> info-$remote.list
done

View File

@@ -16,7 +16,7 @@ var (
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&listLong, "long", "", listLong, "Show the type as well as names.")
commandDefintion.Flags().BoolVarP(&listLong, "long", "l", listLong, "Show the type as well as names.")
}
var commandDefintion = &cobra.Command{

View File

@@ -60,13 +60,7 @@ If "remote:path" contains the file "subfolder/file.txt", the Path for "file.txt"
will be "subfolder/file.txt", not "remote:path/subfolder/file.txt".
When used without --recursive the Path will always be the same as Name.
The time is in RFC3339 format with up to nanosecond precision. The
number of decimal digits in the seconds will depend on the precision
that the remote can hold the times, so if times are accurate to the
nearest millisecond (eg Google Drive) then 3 digits will always be
shown ("2017-05-31T16:15:57.034+01:00") whereas if the times are
accurate to the nearest second (Dropbox, Box, WebDav etc) no digits
will be shown ("2017-05-31T16:15:57+01:00").
The time is in RFC3339 format with nanosecond precision.
The whole output can be processed as a JSON blob, or alternatively it
can be processed line by line as each item is written one to a line.

View File

@@ -45,7 +45,7 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
if err != nil {
return translateError(err)
}
resp.Size = n
resp.Size = int(n)
return nil
}

View File

@@ -20,12 +20,12 @@ var (
)
func randomSeekTest(size int64, in *os.File, name string) {
startTime := time.Now()
start := rand.Int63n(size)
blockSize := rand.Intn(*maxBlockSize)
if int64(blockSize) > size-start {
blockSize = int(size - start)
}
log.Printf("Reading %d from %d", blockSize, start)
_, err := in.Seek(start, io.SeekStart)
if err != nil {
@@ -37,8 +37,6 @@ func randomSeekTest(size int64, in *os.File, name string) {
if err != nil {
log.Fatalf("Read failed on %q: %v", name, err)
}
log.Printf("Reading %d from %d took %v ", blockSize, start, time.Since(startTime))
}
func main() {
@@ -50,12 +48,10 @@ func main() {
rand.Seed(*randSeed)
name := args[0]
openStart := time.Now()
in, err := os.Open(name)
if err != nil {
log.Fatalf("Couldn't open %q: %v", name, err)
}
log.Printf("File Open took %v", time.Since(openStart))
fi, err := in.Stat()
if err != nil {

View File

@@ -7,7 +7,7 @@ package mountlib
import (
"log"
daemon "github.com/sevlyar/go-daemon"
"github.com/sevlyar/go-daemon"
)
func startBackgroundMode() bool {

View File

@@ -4,7 +4,6 @@ import (
"io"
"log"
"os"
"path/filepath"
"runtime"
"strings"
"time"
@@ -63,28 +62,6 @@ func checkMountEmpty(mountpoint string) error {
return nil
}
// Check the root doesn't overlap the mountpoint
func checkMountpointOverlap(root, mountpoint string) error {
abs := func(x string) string {
if absX, err := filepath.EvalSymlinks(x); err == nil {
x = absX
}
if absX, err := filepath.Abs(x); err == nil {
x = absX
}
x = filepath.ToSlash(x)
if !strings.HasSuffix(x, "/") {
x += "/"
}
return x
}
rootAbs, mountpointAbs := abs(root), abs(mountpoint)
if strings.HasPrefix(rootAbs, mountpointAbs) || strings.HasPrefix(mountpointAbs, rootAbs) {
return errors.Errorf("mount point %q and directory to be mounted %q mustn't overlap", mountpoint, root)
}
return nil
}
// NewMountCommand makes a mount command with the given name and Mount function
func NewMountCommand(commandName string, Mount func(f fs.Fs, mountpoint string) error) *cobra.Command {
var commandDefintion = &cobra.Command{
@@ -243,14 +220,7 @@ be copied to the vfs cache before opening with --vfs-cache-mode full.
config.PassConfigKeyForDaemonization = true
}
mountpoint := args[1]
fdst := cmd.NewFsDir(args)
if fdst.Name() == "" || fdst.Name() == "local" {
err := checkMountpointOverlap(fdst.Root(), mountpoint)
if err != nil {
log.Fatalf("Fatal error: %v", err)
}
}
// Show stats if the user has specifically requested them
if cmd.ShowStats() {
@@ -260,7 +230,7 @@ be copied to the vfs cache before opening with --vfs-cache-mode full.
// Skip checkMountEmpty if --allow-non-empty flag is used or if
// the Operating System is Windows
if !AllowNonEmpty && runtime.GOOS != "windows" {
err := checkMountEmpty(mountpoint)
err := checkMountEmpty(args[1])
if err != nil {
log.Fatalf("Fatal error: %v", err)
}
@@ -283,7 +253,7 @@ be copied to the vfs cache before opening with --vfs-cache-mode full.
}
}
err := Mount(fdst, mountpoint)
err := Mount(fdst, args[1])
if err != nil {
log.Fatalf("Fatal error: %v", err)
}
@@ -326,11 +296,7 @@ func ClipBlocks(b *uint64) {
var max uint64
switch runtime.GOOS {
case "windows":
if runtime.GOARCH == "386" {
max = (1 << 32) - 1
} else {
max = (1 << 43) - 1
}
max = (1 << 43) - 1
case "darwin":
// OSX FUSE only supports 32 bit number of blocks
// https://github.com/osxfuse/osxfuse/issues/396

View File

@@ -37,11 +37,6 @@ into ` + "`dest:path`" + ` then delete the original (if no errors on copy) in
If you want to delete empty source directories after move, use the --delete-empty-src-dirs flag.
See the [--no-traverse](/docs/#no-traverse) option for controlling
whether rclone lists the destination directory or not. Supplying this
option when moving a small number of files into a large destination
can speed transfers up greatly.
**Important**: Since this can cause data loss, test first with the
--dry-run flag.

View File

@@ -27,11 +27,6 @@ const (
//
// It returns a func which should be called to stop the stats.
func startProgress() func() {
err := initTerminal()
if err != nil {
fs.Errorf(nil, "Failed to start progress: %v", err)
return func() {}
}
stopStats := make(chan struct{})
oldLogPrint := fs.LogPrint
if !log.Redirected() {

View File

@@ -4,10 +4,6 @@ package cmd
import "os"
func initTerminal() error {
return nil
}
func writeToTerminal(b []byte) {
_, _ = os.Stdout.Write(b)
}

View File

@@ -5,31 +5,22 @@ package cmd
import (
"fmt"
"os"
"syscall"
"sync"
ansiterm "github.com/Azure/go-ansiterm"
"github.com/Azure/go-ansiterm/winterm"
"github.com/pkg/errors"
)
var (
ansiParser *ansiterm.AnsiParser
initAnsiParser sync.Once
ansiParser *ansiterm.AnsiParser
)
func initTerminal() error {
winEventHandler := winterm.CreateWinEventHandler(os.Stdout.Fd(), os.Stdout)
if winEventHandler == nil {
err := syscall.GetLastError()
if err == nil {
err = errors.New("initialization failed")
}
return errors.Wrap(err, "windows terminal")
}
ansiParser = ansiterm.CreateParser("Ground", winEventHandler)
return nil
}
func writeToTerminal(b []byte) {
initAnsiParser.Do(func() {
winEventHandler := winterm.CreateWinEventHandler(os.Stdout.Fd(), os.Stdout)
ansiParser = ansiterm.CreateParser("Ground", winEventHandler)
})
// Remove all non-ASCII characters until this is fixed
// https://github.com/Azure/go-ansiterm/issues/26
r := []rune(string(b))

View File

@@ -17,7 +17,7 @@ var commandDefintion = &cobra.Command{
Use: "rcd <path to files to serve>*",
Short: `Run rclone listening to remote control commands only.`,
Long: `
This runs rclone so that it only listens to remote control commands.
This runs rclone so that it only listents to remote control commands.
This is useful if you are controlling rclone via the rc API.

View File

@@ -1,451 +0,0 @@
package dlna
const contentDirectoryServiceDescription = `<?xml version="1.0"?>
<scpd xmlns="urn:schemas-upnp-org:service-1-0">
<specVersion>
<major>1</major>
<minor>0</minor>
</specVersion>
<actionList>
<action>
<name>GetSearchCapabilities</name>
<argumentList>
<argument>
<name>SearchCaps</name>
<direction>out</direction>
<relatedStateVariable>SearchCapabilities</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>GetSortCapabilities</name>
<argumentList>
<argument>
<name>SortCaps</name>
<direction>out</direction>
<relatedStateVariable>SortCapabilities</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>GetSortExtensionCapabilities</name>
<argumentList>
<argument>
<name>SortExtensionCaps</name>
<direction>out</direction>
<relatedStateVariable>SortExtensionCapabilities</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>GetFeatureList</name>
<argumentList>
<argument>
<name>FeatureList</name>
<direction>out</direction>
<relatedStateVariable>FeatureList</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>GetSystemUpdateID</name>
<argumentList>
<argument>
<name>Id</name>
<direction>out</direction>
<relatedStateVariable>SystemUpdateID</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>Browse</name>
<argumentList>
<argument>
<name>ObjectID</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
</argument>
<argument>
<name>BrowseFlag</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_BrowseFlag</relatedStateVariable>
</argument>
<argument>
<name>Filter</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_Filter</relatedStateVariable>
</argument>
<argument>
<name>StartingIndex</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_Index</relatedStateVariable>
</argument>
<argument>
<name>RequestedCount</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_Count</relatedStateVariable>
</argument>
<argument>
<name>SortCriteria</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_SortCriteria</relatedStateVariable>
</argument>
<argument>
<name>Result</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_Result</relatedStateVariable>
</argument>
<argument>
<name>NumberReturned</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_Count</relatedStateVariable>
</argument>
<argument>
<name>TotalMatches</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_Count</relatedStateVariable>
</argument>
<argument>
<name>UpdateID</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_UpdateID</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>Search</name>
<argumentList>
<argument>
<name>ContainerID</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
</argument>
<argument>
<name>SearchCriteria</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_SearchCriteria</relatedStateVariable>
</argument>
<argument>
<name>Filter</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_Filter</relatedStateVariable>
</argument>
<argument>
<name>StartingIndex</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_Index</relatedStateVariable>
</argument>
<argument>
<name>RequestedCount</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_Count</relatedStateVariable>
</argument>
<argument>
<name>SortCriteria</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_SortCriteria</relatedStateVariable>
</argument>
<argument>
<name>Result</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_Result</relatedStateVariable>
</argument>
<argument>
<name>NumberReturned</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_Count</relatedStateVariable>
</argument>
<argument>
<name>TotalMatches</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_Count</relatedStateVariable>
</argument>
<argument>
<name>UpdateID</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_UpdateID</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>CreateObject</name>
<argumentList>
<argument>
<name>ContainerID</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
</argument>
<argument>
<name>Elements</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_Result</relatedStateVariable>
</argument>
<argument>
<name>ObjectID</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
</argument>
<argument>
<name>Result</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_Result</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>DestroyObject</name>
<argumentList>
<argument>
<name>ObjectID</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>UpdateObject</name>
<argumentList>
<argument>
<name>ObjectID</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
</argument>
<argument>
<name>CurrentTagValue</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_TagValueList</relatedStateVariable>
</argument>
<argument>
<name>NewTagValue</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_TagValueList</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>MoveObject</name>
<argumentList>
<argument>
<name>ObjectID</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
</argument>
<argument>
<name>NewParentID</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
</argument>
<argument>
<name>NewObjectID</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>ImportResource</name>
<argumentList>
<argument>
<name>SourceURI</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_URI</relatedStateVariable>
</argument>
<argument>
<name>DestinationURI</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_URI</relatedStateVariable>
</argument>
<argument>
<name>TransferID</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_TransferID</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>ExportResource</name>
<argumentList>
<argument>
<name>SourceURI</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_URI</relatedStateVariable>
</argument>
<argument>
<name>DestinationURI</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_URI</relatedStateVariable>
</argument>
<argument>
<name>TransferID</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_TransferID</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>StopTransferResource</name>
<argumentList>
<argument>
<name>TransferID</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_TransferID</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>DeleteResource</name>
<argumentList>
<argument>
<name>ResourceURI</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_URI</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>GetTransferProgress</name>
<argumentList>
<argument>
<name>TransferID</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_TransferID</relatedStateVariable>
</argument>
<argument>
<name>TransferStatus</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_TransferStatus</relatedStateVariable>
</argument>
<argument>
<name>TransferLength</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_TransferLength</relatedStateVariable>
</argument>
<argument>
<name>TransferTotal</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_TransferTotal</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>CreateReference</name>
<argumentList>
<argument>
<name>ContainerID</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
</argument>
<argument>
<name>ObjectID</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
</argument>
<argument>
<name>NewID</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_ObjectID</relatedStateVariable>
</argument>
</argumentList>
</action>
</actionList>
<serviceStateTable>
<stateVariable sendEvents="no">
<name>SearchCapabilities</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>SortCapabilities</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>SortExtensionCapabilities</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="yes">
<name>SystemUpdateID</name>
<dataType>ui4</dataType>
</stateVariable>
<stateVariable sendEvents="yes">
<name>ContainerUpdateIDs</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="yes">
<name>TransferIDs</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>FeatureList</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_ObjectID</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_Result</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_SearchCriteria</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_BrowseFlag</name>
<dataType>string</dataType>
<allowedValueList>
<allowedValue>BrowseMetadata</allowedValue>
<allowedValue>BrowseDirectChildren</allowedValue>
</allowedValueList>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_Filter</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_SortCriteria</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_Index</name>
<dataType>ui4</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_Count</name>
<dataType>ui4</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_UpdateID</name>
<dataType>ui4</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_TransferID</name>
<dataType>ui4</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_TransferStatus</name>
<dataType>string</dataType>
<allowedValueList>
<allowedValue>COMPLETED</allowedValue>
<allowedValue>ERROR</allowedValue>
<allowedValue>IN_PROGRESS</allowedValue>
<allowedValue>STOPPED</allowedValue>
</allowedValueList>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_TransferLength</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_TransferTotal</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_TagValueList</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_URI</name>
<dataType>uri</dataType>
</stateVariable>
</serviceStateTable>
</scpd>`

View File

@@ -1,240 +0,0 @@
package dlna
import (
"encoding/xml"
"fmt"
"log"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"sort"
"github.com/anacrolix/dms/dlna"
"github.com/anacrolix/dms/upnp"
"github.com/anacrolix/dms/upnpav"
"github.com/ncw/rclone/vfs"
"github.com/pkg/errors"
)
type contentDirectoryService struct {
*server
upnp.Eventing
}
func (cds *contentDirectoryService) updateIDString() string {
return fmt.Sprintf("%d", uint32(os.Getpid()))
}
// Turns the given entry and DMS host into a UPnP object. A nil object is
// returned if the entry is not of interest.
func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fileInfo os.FileInfo, host string) (ret interface{}, err error) {
obj := upnpav.Object{
ID: cdsObject.ID(),
Restricted: 1,
ParentID: cdsObject.ParentID(),
}
if fileInfo.IsDir() {
obj.Class = "object.container.storageFolder"
obj.Title = fileInfo.Name()
ret = upnpav.Container{Object: obj}
return
}
if !fileInfo.Mode().IsRegular() {
return
}
// Hardcode "videoItem" so that files show up in VLC.
obj.Class = "object.item.videoItem"
obj.Title = fileInfo.Name()
item := upnpav.Item{
Object: obj,
Res: make([]upnpav.Resource, 0, 1),
}
item.Res = append(item.Res, upnpav.Resource{
URL: (&url.URL{
Scheme: "http",
Host: host,
Path: resPath,
RawQuery: url.Values{
"path": {cdsObject.Path},
}.Encode(),
}).String(),
// Hardcode "video/x-matroska" so that files show up in VLC.
ProtocolInfo: fmt.Sprintf("http-get:*:video/x-matroska:%s", dlna.ContentFeatures{
SupportRange: true,
}.String()),
Bitrate: 0,
Duration: "",
Size: uint64(fileInfo.Size()),
Resolution: "",
})
ret = item
return
}
// Returns all the upnpav objects in a directory.
func (cds *contentDirectoryService) readContainer(o object, host string) (ret []interface{}, err error) {
node, err := cds.vfs.Stat(o.Path)
if err != nil {
return
}
if !node.IsDir() {
err = errors.New("not a directory")
return
}
dir := node.(*vfs.Dir)
dirEntries, err := dir.ReadDirAll()
if err != nil {
err = errors.New("failed to list directory")
return
}
sort.Sort(dirEntries)
for _, de := range dirEntries {
child := object{
path.Join(o.Path, de.Name()),
}
obj, err := cds.cdsObjectToUpnpavObject(child, de, host)
if err != nil {
log.Printf("error with %s: %s", child.FilePath(), err)
continue
}
if obj != nil {
ret = append(ret, obj)
} else {
log.Printf("bad %s", de)
}
}
return
}
type browse struct {
ObjectID string
BrowseFlag string
Filter string
StartingIndex int
RequestedCount int
}
// ContentDirectory object from ObjectID.
func (cds *contentDirectoryService) objectFromID(id string) (o object, err error) {
o.Path, err = url.QueryUnescape(id)
if err != nil {
return
}
if o.Path == "0" {
o.Path = "/"
}
o.Path = path.Clean(o.Path)
if !path.IsAbs(o.Path) {
err = fmt.Errorf("bad ObjectID %v", o.Path)
return
}
return
}
func (cds *contentDirectoryService) Handle(action string, argsXML []byte, r *http.Request) (map[string]string, error) {
host := r.Host
switch action {
case "GetSystemUpdateID":
return map[string]string{
"Id": cds.updateIDString(),
}, nil
case "GetSortCapabilities":
return map[string]string{
"SortCaps": "dc:title",
}, nil
case "Browse":
var browse browse
if err := xml.Unmarshal(argsXML, &browse); err != nil {
return nil, err
}
obj, err := cds.objectFromID(browse.ObjectID)
if err != nil {
return nil, upnp.Errorf(upnpav.NoSuchObjectErrorCode, err.Error())
}
switch browse.BrowseFlag {
case "BrowseDirectChildren":
objs, err := cds.readContainer(obj, host)
if err != nil {
return nil, upnp.Errorf(upnpav.NoSuchObjectErrorCode, err.Error())
}
totalMatches := len(objs)
objs = objs[func() (low int) {
low = browse.StartingIndex
if low > len(objs) {
low = len(objs)
}
return
}():]
if browse.RequestedCount != 0 && browse.RequestedCount < len(objs) {
objs = objs[:browse.RequestedCount]
}
result, err := xml.Marshal(objs)
if err != nil {
return nil, err
}
return map[string]string{
"TotalMatches": fmt.Sprint(totalMatches),
"NumberReturned": fmt.Sprint(len(objs)),
"Result": didlLite(string(result)),
"UpdateID": cds.updateIDString(),
}, nil
default:
return nil, upnp.Errorf(upnp.ArgumentValueInvalidErrorCode, "unhandled browse flag: %v", browse.BrowseFlag)
}
case "GetSearchCapabilities":
return map[string]string{
"SearchCaps": "",
}, nil
default:
return nil, upnp.InvalidActionError
}
}
// Represents a ContentDirectory object.
type object struct {
Path string // The cleaned, absolute path for the object relative to the server.
}
// Returns the actual local filesystem path for the object.
func (o *object) FilePath() string {
return filepath.FromSlash(o.Path)
}
// Returns the ObjectID for the object. This is used in various ContentDirectory actions.
func (o object) ID() string {
if !path.IsAbs(o.Path) {
log.Panicf("Relative object path: %s", o.Path)
}
if len(o.Path) == 1 {
return "0"
}
return url.QueryEscape(o.Path)
}
func (o *object) IsRoot() bool {
return o.Path == "/"
}
// Returns the object's parent ObjectID. Fortunately it can be deduced from the
// ObjectID (for now).
func (o object) ParentID() string {
if o.IsRoot() {
return "-1"
}
o.Path = path.Dir(o.Path)
return o.ID()
}

Some files were not shown because too many files have changed in this diff Show More