mirror of
https://github.com/rclone/rclone.git
synced 2026-01-23 21:03:24 +00:00
Compare commits
4 Commits
vfs-async-
...
adb-remote
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c3e1a0f368 | ||
|
|
2683939a4b | ||
|
|
ed88ae878e | ||
|
|
2ba5c35e88 |
@@ -1,5 +1,9 @@
|
||||
# golangci-lint configuration options
|
||||
|
||||
run:
|
||||
build-tags:
|
||||
- cmount
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- deadcode
|
||||
|
||||
108
.travis.yml
108
.travis.yml
@@ -1,33 +1,27 @@
|
||||
---
|
||||
language: go
|
||||
sudo: required
|
||||
dist: xenial
|
||||
dist: trusty
|
||||
os:
|
||||
- linux
|
||||
- linux
|
||||
go:
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.12rc1
|
||||
- tip
|
||||
go_import_path: github.com/ncw/rclone
|
||||
before_install:
|
||||
- git fetch --unshallow --tags
|
||||
- |
|
||||
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
fi
|
||||
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
|
||||
brew update
|
||||
brew tap caskroom/cask
|
||||
brew cask install osxfuse
|
||||
fi
|
||||
if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then
|
||||
choco install -y winfsp zip make
|
||||
cd ../.. # fix crlf in git checkout
|
||||
mv $TRAVIS_REPO_SLUG _old
|
||||
git config --global core.autocrlf false
|
||||
git clone _old $TRAVIS_REPO_SLUG
|
||||
cd $TRAVIS_REPO_SLUG
|
||||
fi
|
||||
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi
|
||||
- if [[ $TRAVIS_OS_NAME == osx ]]; then brew update && brew tap caskroom/cask && brew cask install osxfuse ; fi
|
||||
install:
|
||||
- make vars
|
||||
- git fetch --unshallow --tags
|
||||
- make vars
|
||||
- make build_dep
|
||||
script:
|
||||
- make check
|
||||
- make quicktest
|
||||
- make compile_all
|
||||
env:
|
||||
global:
|
||||
- GOTAGS=cmount
|
||||
@@ -38,63 +32,23 @@ env:
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- fuse
|
||||
- libfuse-dev
|
||||
- rpm
|
||||
- pkg-config
|
||||
- fuse
|
||||
- libfuse-dev
|
||||
- rpm
|
||||
- pkg-config
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/go-build
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
- go: tip
|
||||
include:
|
||||
- go: 1.9.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.10.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.11.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.12.x
|
||||
env:
|
||||
- GOTAGS=cmount
|
||||
script:
|
||||
- make build_dep
|
||||
- make check
|
||||
- make quicktest
|
||||
- make racequicktest
|
||||
- make compile_all
|
||||
- os: osx
|
||||
go: 1.12.x
|
||||
env:
|
||||
- GOTAGS= # cmount doesn't work on osx travis for some reason
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/Library/Caches/go-build
|
||||
script:
|
||||
- make
|
||||
- make quicktest
|
||||
- make racequicktest
|
||||
# - os: windows
|
||||
# go: 1.12.x
|
||||
# env:
|
||||
# - GOTAGS=cmount
|
||||
# - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse'
|
||||
# #filter_secrets: false # works around a problem with secrets under windows
|
||||
# cache:
|
||||
# directories:
|
||||
# - ${LocalAppData}/go-build
|
||||
# script:
|
||||
# - make
|
||||
# - make quicktest
|
||||
# - make racequicktest
|
||||
- go: tip
|
||||
script:
|
||||
- make quicktest
|
||||
|
||||
- os: osx
|
||||
go: 1.12rc1
|
||||
env: GOTAGS=""
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/Library/Caches/go-build
|
||||
deploy:
|
||||
provider: script
|
||||
script: make travis_beta
|
||||
@@ -102,5 +56,5 @@ deploy:
|
||||
on:
|
||||
repo: ncw/rclone
|
||||
all_branches: true
|
||||
go: 1.12.x
|
||||
condition: $TRAVIS_PULL_REQUEST == false && $TRAVIS_OS_NAME != "windows"
|
||||
go: 1.12rc1
|
||||
condition: $TRAVIS_PULL_REQUEST == false
|
||||
|
||||
@@ -135,7 +135,7 @@ then change into the project root and run
|
||||
|
||||
make test
|
||||
|
||||
This command is run daily on the integration test server. You can
|
||||
This command is run daily on the the integration test server. You can
|
||||
find the results at https://pub.rclone.org/integration-tests/
|
||||
|
||||
## Code Organisation ##
|
||||
|
||||
4148
MANUAL.html
4148
MANUAL.html
File diff suppressed because it is too large
Load Diff
3775
MANUAL.txt
3775
MANUAL.txt
File diff suppressed because it is too large
Load Diff
30
Makefile
30
Makefile
@@ -17,6 +17,8 @@ ifneq ($(TAG),$(LAST_TAG))
|
||||
endif
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||
# Run full tests if go >= go1.11
|
||||
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 11)')
|
||||
BETA_PATH := $(BRANCH_PATH)$(TAG)
|
||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||
@@ -24,7 +26,6 @@ BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
||||
# Pass in GOTAGS=xyz on the make command line to set build tags
|
||||
ifdef GOTAGS
|
||||
BUILDTAGS=-tags "$(GOTAGS)"
|
||||
LINTTAGS=--build-tags "$(GOTAGS)"
|
||||
endif
|
||||
|
||||
.PHONY: rclone vars version
|
||||
@@ -41,6 +42,7 @@ vars:
|
||||
@echo LAST_TAG="'$(LAST_TAG)'"
|
||||
@echo NEW_TAG="'$(NEW_TAG)'"
|
||||
@echo GO_VERSION="'$(GO_VERSION)'"
|
||||
@echo FULL_TESTS="'$(FULL_TESTS)'"
|
||||
@echo BETA_URL="'$(BETA_URL)'"
|
||||
|
||||
version:
|
||||
@@ -55,19 +57,28 @@ test: rclone
|
||||
# Quick test
|
||||
quicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
|
||||
|
||||
racequicktest:
|
||||
ifdef FULL_TESTS
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
|
||||
endif
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
ifdef FULL_TESTS
|
||||
@# we still run go vet for -printfuncs which golangci-lint doesn't do yet
|
||||
@# see: https://github.com/golangci/golangci-lint/issues/204
|
||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||
@golangci-lint run $(LINTTAGS) ./...
|
||||
@go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
|
||||
@golangci-lint run ./...
|
||||
@echo "-- END CODE QUALITY REPORT ---------------------------------"
|
||||
else
|
||||
@echo Skipping source quality tests as version of go too old
|
||||
endif
|
||||
|
||||
# Get the build dependencies
|
||||
build_dep:
|
||||
ifdef FULL_TESTS
|
||||
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
||||
endif
|
||||
|
||||
# Get the release dependencies
|
||||
release_dep:
|
||||
@@ -151,7 +162,11 @@ log_since_last_release:
|
||||
git log $(LAST_TAG)..
|
||||
|
||||
compile_all:
|
||||
ifdef FULL_TESTS
|
||||
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)
|
||||
else
|
||||
@echo Skipping compile all as version of go too old
|
||||
endif
|
||||
|
||||
appveyor_upload:
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
@@ -171,11 +186,6 @@ BUILD_FLAGS := -exclude "^(windows|darwin)/"
|
||||
ifeq ($(TRAVIS_OS_NAME),osx)
|
||||
BUILD_FLAGS := -include "^darwin/" -cgo
|
||||
endif
|
||||
ifeq ($(TRAVIS_OS_NAME),windows)
|
||||
# BUILD_FLAGS := -include "^windows/" -cgo
|
||||
# 386 doesn't build yet
|
||||
BUILD_FLAGS := -include "^windows/amd64" -cgo
|
||||
endif
|
||||
|
||||
travis_beta:
|
||||
ifeq ($(TRAVIS_OS_NAME),linux)
|
||||
@@ -191,7 +201,7 @@ endif
|
||||
|
||||
# Fetch the binary builds from travis and appveyor
|
||||
fetch_binaries:
|
||||
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
||||
rclone -P sync $(BETA_UPLOAD) build/
|
||||
|
||||
serve: website
|
||||
cd docs && hugo server -v -w
|
||||
|
||||
@@ -7,11 +7,11 @@
|
||||
[Changelog](https://rclone.org/changelog/) |
|
||||
[Installation](https://rclone.org/install/) |
|
||||
[Forum](https://forum.rclone.org/) |
|
||||
[G+](https://google.com/+RcloneOrg)
|
||||
|
||||
[](https://travis-ci.org/ncw/rclone)
|
||||
[](https://ci.appveyor.com/project/ncw/rclone)
|
||||
[](https://circleci.com/gh/ncw/rclone/tree/master)
|
||||
[](https://goreportcard.com/report/github.com/ncw/rclone)
|
||||
[](https://godoc.org/github.com/ncw/rclone)
|
||||
|
||||
# Rclone
|
||||
@@ -36,7 +36,6 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
|
||||
@@ -11,7 +11,7 @@ Making a release
|
||||
* edit docs/content/changelog.md
|
||||
* make doc
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX.0"
|
||||
* git commit -a -v -m "Version v1.XX"
|
||||
* make retag
|
||||
* git push --tags origin master
|
||||
* # Wait for the appveyor and travis builds to complete then...
|
||||
@@ -27,7 +27,6 @@ Making a release
|
||||
|
||||
Early in the next release cycle update the vendored dependencies
|
||||
* Review any pinned packages in go.mod and remove if possible
|
||||
* GO111MODULE=on go get -u github.com/spf13/cobra@master
|
||||
* make update
|
||||
* git status
|
||||
* git add new files
|
||||
|
||||
821
backend/adb/adb.go
Normal file
821
backend/adb/adb.go
Normal file
@@ -0,0 +1,821 @@
|
||||
package adb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
adb "github.com/thinkhy/go-adb"
|
||||
"github.com/thinkhy/go-adb/wire"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "adb",
|
||||
Description: "Android Debug Bridge",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "serial",
|
||||
Help: "The device serial to use. Leave empty for auto selection.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "host",
|
||||
Default: "localhost",
|
||||
Help: "The ADB server host.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "port",
|
||||
Default: 5037,
|
||||
Help: "The ADB server port.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "executable",
|
||||
Help: "The ADB executable path.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Serial string
|
||||
Host string
|
||||
Port uint16
|
||||
Executable string
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
}
|
||||
|
||||
// Fs represents a adb device
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
client *adb.Adb
|
||||
device *execDevice
|
||||
statFunc statFunc
|
||||
statFuncMu sync.Mutex
|
||||
touchFunc touchFunc
|
||||
touchFuncMu sync.Mutex
|
||||
}
|
||||
|
||||
// Object describes a adb file
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("ADB root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if root == "" {
|
||||
root = "/"
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
statFunc: (*Object).statTry,
|
||||
touchFunc: (*Object).touchTry,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
|
||||
f.client, err = adb.NewWithConfig(adb.ServerConfig{
|
||||
Host: opt.Host,
|
||||
Port: int(opt.Port),
|
||||
PathToAdb: opt.Executable,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not configure ADB server")
|
||||
}
|
||||
err = f.client.StartServer()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not start ADB server")
|
||||
}
|
||||
|
||||
serverVersion, err := f.client.ServerVersion()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get ADB server version")
|
||||
}
|
||||
fs.Debugf(f, "ADB server version: 0x%X", serverVersion)
|
||||
|
||||
serials, err := f.client.ListDeviceSerials()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get ADB devices")
|
||||
}
|
||||
descriptor := adb.AnyDevice()
|
||||
if opt.Serial != "" {
|
||||
descriptor = adb.DeviceWithSerial(opt.Serial)
|
||||
}
|
||||
if len(serials) > 1 && opt.Serial == "" {
|
||||
return nil, errors.New("Multiple ADB devices found. Use the serial config to select a specific device")
|
||||
}
|
||||
f.device = &execDevice{f.client.Device(descriptor)}
|
||||
|
||||
// follow symlinks for root pathes
|
||||
entry, err := f.newEntryFollowSymlinks("")
|
||||
switch err {
|
||||
case nil:
|
||||
case fs.ErrorObjectNotFound:
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
switch entry.(type) {
|
||||
case fs.Object:
|
||||
f.root = path.Dir(f.root)
|
||||
return f, fs.ErrorIsFile
|
||||
case nil:
|
||||
return f, nil
|
||||
case fs.Directory:
|
||||
return f, nil
|
||||
default:
|
||||
return nil, errors.Errorf("Invalid root entry type %t", entry)
|
||||
}
|
||||
}
|
||||
|
||||
// Precision of the object storage system
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return 1 * time.Second
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
p := path.Join(f.root, dir)
|
||||
dirEntries, err := f.device.ListDirEntries(p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ListDirEntries")
|
||||
}
|
||||
|
||||
defer fs.CheckClose(dirEntries, &err)
|
||||
|
||||
found := false
|
||||
for dirEntries.Next() {
|
||||
found = true
|
||||
dirEntry := dirEntries.Entry()
|
||||
switch dirEntry.Name {
|
||||
case ".", "..":
|
||||
continue
|
||||
}
|
||||
fsEntry, err := f.entryForDirEntry(path.Join(dir, dirEntry.Name), dirEntry, f.opt.FollowSymlinks)
|
||||
if err != nil {
|
||||
fs.Errorf(p, "Listing error: %q: %v", dirEntry.Name, err)
|
||||
return nil, err
|
||||
} else if fsEntry != nil {
|
||||
entries = append(entries, fsEntry)
|
||||
} else {
|
||||
fs.Debugf(f, "Skipping DirEntry %#v", dirEntry)
|
||||
}
|
||||
}
|
||||
err = dirEntries.Err()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ListDirEntries")
|
||||
}
|
||||
if !found {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) entryForDirEntry(remote string, e *adb.DirEntry, followSymlinks bool) (fs.DirEntry, error) {
|
||||
o := f.newObjectWithInfo(remote, e)
|
||||
// Follow symlinks if required
|
||||
if followSymlinks && (e.Mode&os.ModeSymlink) != 0 {
|
||||
err := f.statFunc(&o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if o.mode.IsDir() {
|
||||
return fs.NewDir(remote, o.modTime), nil
|
||||
}
|
||||
return &o, nil
|
||||
}
|
||||
|
||||
func (f *Fs) newEntry(remote string) (fs.DirEntry, error) {
|
||||
return f.newEntryWithFollow(remote, f.opt.FollowSymlinks)
|
||||
}
|
||||
func (f *Fs) newEntryFollowSymlinks(remote string) (fs.DirEntry, error) {
|
||||
return f.newEntryWithFollow(remote, true)
|
||||
}
|
||||
func (f *Fs) newEntryWithFollow(remote string, followSymlinks bool) (fs.DirEntry, error) {
|
||||
entry, err := f.device.Stat(path.Join(f.root, remote))
|
||||
if err != nil {
|
||||
if adb.HasErrCode(err, adb.FileNoExistError) {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, errors.Wrapf(err, "Stat failed")
|
||||
}
|
||||
return f.entryForDirEntry(remote, entry, followSymlinks)
|
||||
}
|
||||
|
||||
func (f *Fs) newObjectWithInfo(remote string, e *adb.DirEntry) Object {
|
||||
return Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: int64(e.Size),
|
||||
mode: e.Mode,
|
||||
modTime: e.ModifiedAt,
|
||||
}
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
entry, err := f.newEntry(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj, ok := entry.(fs.Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
|
||||
// return an error or upload it properly (rather than e.g. calling panic).
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
remote := src.Remote()
|
||||
// Temporary Object under construction - info filled in by Update()
|
||||
o := f.newObject(remote)
|
||||
err := o.Update(in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// newObject makes a half completed Object
|
||||
func (f *Fs) newObject(remote string) *Object {
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
p := path.Join(f.root, dir)
|
||||
output, code, err := f.device.execCommandWithExitCode("mkdir -p", p)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case adb.ShellExitError:
|
||||
entry, _ := f.newEntry(p)
|
||||
if _, ok := entry.(fs.Directory); ok {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("mkdir %q failed with %d: %q", dir, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "mkdir")
|
||||
}
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
p := path.Join(f.root, dir)
|
||||
output, code, err := f.device.execCommandWithExitCode("rmdir", p)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("rmdir %q failed with %d: %q", dir, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "rmdir")
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
// It should return a best guess if one isn't available
|
||||
func (o *Object) ModTime() time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Storable says whether this object can be stored
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
func (o *Object) SetModTime(t time.Time) error {
|
||||
return o.fs.touchFunc(o, t)
|
||||
}
|
||||
|
||||
func (o *Object) stat() error {
|
||||
return o.statStatArg(statArgC, path.Join(o.fs.root, o.remote))
|
||||
}
|
||||
|
||||
func (o *Object) setMetadata(entry *adb.DirEntry) {
|
||||
// Don't overwrite the values if we don't need to
|
||||
// this avoids upsetting the race detector
|
||||
if o.size != int64(entry.Size) {
|
||||
o.size = int64(entry.Size)
|
||||
}
|
||||
if !o.modTime.Equal(entry.ModifiedAt) {
|
||||
o.modTime = entry.ModifiedAt
|
||||
}
|
||||
if o.mode != entry.Mode {
|
||||
o.mode = decodeEntryMode(uint32(entry.Mode))
|
||||
}
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
const blockSize = 1 << 12
|
||||
|
||||
var offset, count int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
if offset > o.size {
|
||||
offset = o.size
|
||||
}
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
} else if count+offset > o.size {
|
||||
count = o.size - offset
|
||||
}
|
||||
fs.Debugf(o, "Open: remote: %q offset: %d count: %d", o.remote, offset, count)
|
||||
|
||||
if count == 0 {
|
||||
return ioutil.NopCloser(bytes.NewReader(nil)), nil
|
||||
}
|
||||
offsetBlocks, offsetRest := offset/blockSize, offset%blockSize
|
||||
countBlocks := (count-1)/blockSize + 1
|
||||
|
||||
conn, err := o.fs.device.execCommand(fmt.Sprintf("sh -c 'dd \"if=$0\" bs=%d skip=%d count=%d 2>/dev/null'", blockSize, offsetBlocks, countBlocks), path.Join(o.fs.root, o.remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &adbReader{
|
||||
ReadCloser: readers.NewLimitedReadCloser(conn, count+offsetRest),
|
||||
skip: offsetRest,
|
||||
expected: count,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
for _, option := range options {
|
||||
if option.Mandatory() {
|
||||
fs.Logf(option, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
written, err := o.writeToFile(path.Join(o.fs.root, o.remote), in, 0666, src.ModTime())
|
||||
if err != nil {
|
||||
if removeErr := o.Remove(); removeErr != nil {
|
||||
fs.Errorf(o, "Failed to remove partially written file: %v", removeErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
expected := src.Size()
|
||||
if expected == -1 {
|
||||
expected = written
|
||||
}
|
||||
for _, t := range []int64{100, 250, 500, 1000, 2500, 5000, 10000} {
|
||||
err = o.stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if o.size == expected {
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(o, "Invalid size after update, expected: %d got: %d", expected, o.size)
|
||||
time.Sleep(time.Duration(t) * time.Millisecond)
|
||||
}
|
||||
return o.stat()
|
||||
}
|
||||
|
||||
// Remove this object
|
||||
func (o *Object) Remove() error {
|
||||
p := path.Join(o.fs.root, o.remote)
|
||||
output, code, err := o.fs.device.execCommandWithExitCode("rm", p)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("rm %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "rm")
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Object) writeToFile(path string, rd io.Reader, perms os.FileMode, modeTime time.Time) (written int64, err error) {
|
||||
dst, err := o.fs.device.OpenWrite(path, perms, modeTime)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer fs.CheckClose(dst, &err)
|
||||
return io.Copy(dst, rd)
|
||||
}
|
||||
|
||||
type statFunc func(*Object) error
|
||||
|
||||
func (o *Object) statTry() error {
|
||||
o.fs.statFuncMu.Lock()
|
||||
defer o.fs.statFuncMu.Unlock()
|
||||
|
||||
for _, f := range []statFunc{
|
||||
(*Object).statStatL, (*Object).statRealPath, (*Object).statReadLink,
|
||||
} {
|
||||
err := f(o)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "%s", err)
|
||||
} else {
|
||||
o.fs.statFunc = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Errorf("unable to resolve link target")
|
||||
}
|
||||
|
||||
const (
|
||||
statArgLc = "-Lc"
|
||||
statArgC = "-c"
|
||||
)
|
||||
|
||||
func (o *Object) statStatL() error {
|
||||
return o.statStatArg(statArgLc, path.Join(o.fs.root, o.remote))
|
||||
}
|
||||
|
||||
func (o *Object) statStatArg(arg, path string) error {
|
||||
output, code, err := o.fs.device.execCommandWithExitCode(fmt.Sprintf("stat %s %s", arg, "%f,%s,%Y"), path)
|
||||
output = strings.TrimSpace(output)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("stat %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "stat")
|
||||
}
|
||||
|
||||
parts := strings.Split(output, ",")
|
||||
if len(parts) != 3 {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
|
||||
mode, err := strconv.ParseUint(parts[0], 16, 32)
|
||||
if err != nil {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
size, err := strconv.ParseUint(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
modTime, err := strconv.ParseInt(parts[2], 10, 64)
|
||||
if err != nil {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
|
||||
o.size = int64(size)
|
||||
o.modTime = time.Unix(modTime, 0)
|
||||
o.mode = decodeEntryMode(uint32(mode))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) statReadLink() error {
|
||||
p := path.Join(o.fs.root, o.remote)
|
||||
output, code, err := o.fs.device.execCommandWithExitCode("readlink -f", p)
|
||||
output = strings.TrimSuffix(output, "\n")
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("readlink %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "readlink")
|
||||
}
|
||||
return o.statStatArg(statArgC, output)
|
||||
}
|
||||
func (o *Object) statRealPath() error {
|
||||
p := path.Join(o.fs.root, o.remote)
|
||||
output, code, err := o.fs.device.execCommandWithExitCode("realpath", p)
|
||||
output = strings.TrimSuffix(output, "\n")
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("realpath %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "realpath")
|
||||
}
|
||||
return o.statStatArg(statArgC, output)
|
||||
}
|
||||
|
||||
type touchFunc func(*Object, time.Time) error
|
||||
|
||||
func (o *Object) touchTry(t time.Time) error {
|
||||
o.fs.touchFuncMu.Lock()
|
||||
defer o.fs.touchFuncMu.Unlock()
|
||||
|
||||
for _, f := range []touchFunc{
|
||||
(*Object).touchCmd, (*Object).touchCd,
|
||||
} {
|
||||
err := f(o, t)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "%s", err)
|
||||
} else {
|
||||
o.fs.touchFunc = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Errorf("unable to resolve link target")
|
||||
}
|
||||
|
||||
const (
|
||||
touchArgCmd = "-cmd"
|
||||
touchArgCd = "-cd"
|
||||
)
|
||||
|
||||
func (o *Object) touchCmd(t time.Time) error {
|
||||
return o.touchStatArg(touchArgCmd, path.Join(o.fs.root, o.remote), t)
|
||||
}
|
||||
func (o *Object) touchCd(t time.Time) error {
|
||||
return o.touchStatArg(touchArgCd, path.Join(o.fs.root, o.remote), t)
|
||||
}
|
||||
|
||||
func (o *Object) touchStatArg(arg, path string, t time.Time) error {
|
||||
output, code, err := o.fs.device.execCommandWithExitCode(fmt.Sprintf("touch %s %s", arg, t.Format(time.RFC3339Nano)), path)
|
||||
output = strings.TrimSpace(output)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("touch %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "touch")
|
||||
}
|
||||
|
||||
err = o.stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if diff, ok := checkTimeEqualWithPrecision(t, o.modTime, o.fs.Precision()); !ok {
|
||||
return errors.Errorf("touch %q to %s was ineffective: %d", o.remote, t, diff)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
|
||||
dt := t0.Sub(t1)
|
||||
if dt >= precision || dt <= -precision {
|
||||
return dt, false
|
||||
}
|
||||
return dt, true
|
||||
}
|
||||
|
||||
func decodeEntryMode(entryMode uint32) os.FileMode {
|
||||
const (
|
||||
unixIFBLK = 0x6000
|
||||
unixIFMT = 0xf000
|
||||
unixIFCHR = 0x2000
|
||||
unixIFDIR = 0x4000
|
||||
unixIFIFO = 0x1000
|
||||
unixIFLNK = 0xa000
|
||||
unixIFREG = 0x8000
|
||||
unixIFSOCK = 0xc000
|
||||
unixISGID = 0x400
|
||||
unixISUID = 0x800
|
||||
unixISVTX = 0x200
|
||||
)
|
||||
|
||||
mode := os.FileMode(entryMode & 0777)
|
||||
switch entryMode & unixIFMT {
|
||||
case unixIFBLK:
|
||||
mode |= os.ModeDevice
|
||||
case unixIFCHR:
|
||||
mode |= os.ModeDevice | os.ModeCharDevice
|
||||
case unixIFDIR:
|
||||
mode |= os.ModeDir
|
||||
case unixIFIFO:
|
||||
mode |= os.ModeNamedPipe
|
||||
case unixIFLNK:
|
||||
mode |= os.ModeSymlink
|
||||
case unixIFREG:
|
||||
// nothing to do
|
||||
case unixIFSOCK:
|
||||
mode |= os.ModeSocket
|
||||
}
|
||||
if entryMode&unixISGID != 0 {
|
||||
mode |= os.ModeSetgid
|
||||
}
|
||||
if entryMode&unixISUID != 0 {
|
||||
mode |= os.ModeSetuid
|
||||
}
|
||||
if entryMode&unixISVTX != 0 {
|
||||
mode |= os.ModeSticky
|
||||
}
|
||||
return mode
|
||||
}
|
||||
|
||||
type execDevice struct {
|
||||
*adb.Device
|
||||
}
|
||||
|
||||
func (d *execDevice) execCommandWithExitCode(cmd string, arg string) (string, int, error) {
|
||||
cmdLine := fmt.Sprintf("sh -c '%s \"$0\"; echo :$?' '%s'", cmd, strings.Replace(arg, "'", "'\\''", -1))
|
||||
fs.Debugf("adb", "exec: %s", cmdLine)
|
||||
conn, err := d.execCommand(cmdLine)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
resp, err := conn.ReadUntilEof()
|
||||
if err != nil {
|
||||
return "", -1, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
|
||||
outStr := string(resp)
|
||||
idx := strings.LastIndexByte(outStr, ':')
|
||||
if idx == -1 {
|
||||
return outStr, -1, fmt.Errorf("adb shell aborted, can not parse exit code")
|
||||
}
|
||||
exitCode, _ := strconv.Atoi(strings.TrimSpace(outStr[idx+1:]))
|
||||
if exitCode != 0 {
|
||||
err = adb.ShellExitError{Command: cmdLine, ExitCode: exitCode}
|
||||
}
|
||||
return outStr[:idx], exitCode, err
|
||||
}
|
||||
|
||||
func (d *execDevice) execCommand(cmd string, args ...string) (*wire.Conn, error) {
|
||||
cmd = prepareCommandLineEscaped(cmd, args...)
|
||||
conn, err := d.Dial()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
defer func() {
|
||||
if err != nil && conn != nil {
|
||||
_ = conn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
req := fmt.Sprintf("exec:%s", cmd)
|
||||
|
||||
if err = conn.SendMessage([]byte(req)); err != nil {
|
||||
return nil, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
if _, err = conn.ReadStatus(req); err != nil {
|
||||
return nil, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func prepareCommandLineEscaped(cmd string, args ...string) string {
|
||||
for i, arg := range args {
|
||||
args[i] = fmt.Sprintf("'%s'", strings.Replace(arg, "'", "'\\''", -1))
|
||||
}
|
||||
|
||||
// Prepend the command to the args array.
|
||||
if len(args) > 0 {
|
||||
cmd = fmt.Sprintf("%s %s", cmd, strings.Join(args, " "))
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
type adbReader struct {
|
||||
io.ReadCloser
|
||||
skip int64
|
||||
read int64
|
||||
expected int64
|
||||
}
|
||||
|
||||
func (r *adbReader) Read(b []byte) (n int, err error) {
|
||||
n, err = r.ReadCloser.Read(b)
|
||||
if s := r.skip; n > 0 && s > 0 {
|
||||
_n := int64(n)
|
||||
if _n <= s {
|
||||
r.skip -= _n
|
||||
return r.Read(b)
|
||||
}
|
||||
r.skip = 0
|
||||
copy(b, b[s:n])
|
||||
n -= int(s)
|
||||
}
|
||||
r.read += int64(n)
|
||||
if err == io.EOF && r.read < r.expected {
|
||||
fs.Debugf("adb", "Read: read: %d expected: %d n: %d", r.read, r.expected, n)
|
||||
return n, io.ErrUnexpectedEOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
20
backend/adb/adb_test.go
Normal file
20
backend/adb/adb_test.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Test ADB filesystem interface
|
||||
package adb_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/adb"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAdb:/data/local/tmp",
|
||||
NilObject: (*adb.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: "TestAdb", Key: "copy_links", Value: "true"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package all
|
||||
|
||||
import (
|
||||
// Active file systems
|
||||
_ "github.com/ncw/rclone/backend/adb"
|
||||
_ "github.com/ncw/rclone/backend/alias"
|
||||
_ "github.com/ncw/rclone/backend/amazonclouddrive"
|
||||
_ "github.com/ncw/rclone/backend/azureblob"
|
||||
@@ -16,7 +17,6 @@ import (
|
||||
_ "github.com/ncw/rclone/backend/http"
|
||||
_ "github.com/ncw/rclone/backend/hubic"
|
||||
_ "github.com/ncw/rclone/backend/jottacloud"
|
||||
_ "github.com/ncw/rclone/backend/koofr"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
_ "github.com/ncw/rclone/backend/mega"
|
||||
_ "github.com/ncw/rclone/backend/onedrive"
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
"github.com/ncw/rclone/lib/dircache"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
@@ -154,7 +155,7 @@ type Fs struct {
|
||||
noAuthClient *http.Client // unauthenticated http client
|
||||
root string // the path we are working on
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
trueRootID string // ID of true root directory
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
}
|
||||
@@ -272,7 +273,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
c: c,
|
||||
pacer: fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
|
||||
noAuthClient: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
@@ -1092,7 +1093,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if !bigObject {
|
||||
in, resp, err = file.OpenHeaders(headers)
|
||||
} else {
|
||||
in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers)
|
||||
in, resp, err = file.OpenTempURLHeaders(rest.ClientWithHeaderReset(o.fs.noAuthClient, headers), headers)
|
||||
}
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
// +build !plan9,!solaris
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -144,7 +144,7 @@ type Fs struct {
|
||||
containerOKMu sync.Mutex // mutex to protect container OK
|
||||
containerOK bool // true if we have created the container
|
||||
containerDeleted bool // true if we have deleted the container
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
pacer *pacer.Pacer // To pace and retry the API calls
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
}
|
||||
|
||||
@@ -347,7 +347,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt: *opt,
|
||||
container: container,
|
||||
root: directory,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant).SetPacer(pacer.S3Pacer),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
client: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
@@ -1386,16 +1386,16 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
blob := o.getBlobReference()
|
||||
httpHeaders := azblob.BlobHTTPHeaders{}
|
||||
httpHeaders.ContentType = fs.MimeType(o)
|
||||
// Compute the Content-MD5 of the file, for multiparts uploads it
|
||||
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
||||
// Note: If multipart, a MD5 checksum will also be computed for each uploaded block
|
||||
// in order to validate its integrity during transport
|
||||
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
httpHeaders.ContentMD5 = sourceMD5bytes
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
// Multipart upload doesn't support MD5 checksums at put block calls, hence calculate
|
||||
// MD5 only for PutBlob requests
|
||||
if size < int64(o.fs.opt.UploadCutoff) {
|
||||
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
httpHeaders.ContentMD5 = sourceMD5bytes
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!solaris
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
// +build !plan9,!solaris
|
||||
// +build !plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9 solaris
|
||||
// +build plan9 solaris !go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -167,7 +167,7 @@ type Fs struct {
|
||||
uploadMu sync.Mutex // lock for upload variable
|
||||
uploads []*api.GetUploadURLResponse // result of get upload URL calls
|
||||
authMu sync.Mutex // lock for authorizing the account
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
pacer *pacer.Pacer // To pace and retry the API calls
|
||||
bufferTokens chan []byte // control concurrency of multipart uploads
|
||||
}
|
||||
|
||||
@@ -251,7 +251,13 @@ func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
|
||||
fs.Errorf(f, "Malformed %s header %q: %v", retryAfterHeader, retryAfterString, err)
|
||||
}
|
||||
}
|
||||
return true, pacer.RetryAfterError(err, time.Duration(retryAfter)*time.Second)
|
||||
retryAfterDuration := time.Duration(retryAfter) * time.Second
|
||||
if f.pacer.GetSleep() < retryAfterDuration {
|
||||
fs.Debugf(f, "Setting sleep to %v after error: %v", retryAfterDuration, err)
|
||||
// We set 1/2 the value here because the pacer will double it immediately
|
||||
f.pacer.SetSleep(retryAfterDuration / 2)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
@@ -357,7 +363,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -952,13 +958,6 @@ func (f *Fs) hide(Name string) error {
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.Code == "already_hidden" {
|
||||
// sometimes eventual consistency causes this, so
|
||||
// ignore this error since it is harmless
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.Wrapf(err, "failed to hide %q", Name)
|
||||
}
|
||||
return nil
|
||||
@@ -1213,7 +1212,7 @@ func (o *Object) parseTimeString(timeString string) (err error) {
|
||||
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
o.modTime = time.Unix(unixMilliseconds/1E3, (unixMilliseconds%1E3)*1E6).UTC()
|
||||
return nil
|
||||
|
||||
@@ -111,7 +111,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
}
|
||||
@@ -260,7 +260,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
|
||||
4
backend/cache/cache.go
vendored
4
backend/cache/cache.go
vendored
@@ -1191,7 +1191,7 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
}
|
||||
|
||||
var queuedEntries []*Object
|
||||
err = walk.ListR(f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
err = walk.Walk(f.tempFs, dir, true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||
for _, o := range entries {
|
||||
if oo, ok := o.(fs.Object); ok {
|
||||
co := ObjectFromOriginal(f, oo)
|
||||
@@ -1287,7 +1287,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
|
||||
var queuedEntries []*Object
|
||||
err := walk.ListR(f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
err := walk.Walk(f.tempFs, srcRemote, true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||
for _, o := range entries {
|
||||
if oo, ok := o.(fs.Object); ok {
|
||||
co := ObjectFromOriginal(f, oo)
|
||||
|
||||
6
backend/cache/cache_test.go
vendored
6
backend/cache/cache_test.go
vendored
@@ -15,9 +15,7 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "MergeDirs"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
2
backend/cache/storage_persistent.go
vendored
2
backend/cache/storage_persistent.go
vendored
@@ -1023,7 +1023,7 @@ func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
|
||||
}
|
||||
|
||||
var queuedEntries []fs.Object
|
||||
err = walk.ListR(cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
err = walk.Walk(cacheFs.tempFs, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||
for _, o := range entries {
|
||||
if oo, ok := o.(fs.Object); ok {
|
||||
queuedEntries = append(queuedEntries, oo)
|
||||
|
||||
@@ -169,10 +169,23 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
WriteMimeType: false,
|
||||
BucketBased: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
doChangeNotify := wrappedFs.Features().ChangeNotify
|
||||
if doChangeNotify != nil {
|
||||
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
decrypted, err := f.DecryptFileName(path)
|
||||
if err != nil {
|
||||
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
|
||||
return
|
||||
}
|
||||
notifyFunc(decrypted, entryType)
|
||||
}
|
||||
doChangeNotify(wrappedNotifyFunc, pollInterval)
|
||||
}
|
||||
}
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
@@ -189,7 +202,6 @@ type Options struct {
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
fs.Fs
|
||||
wrapper fs.Fs
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
@@ -532,16 +544,6 @@ func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.Fs
|
||||
}
|
||||
|
||||
// WrapFs returns the Fs that is wrapping this Fs
|
||||
func (f *Fs) WrapFs() fs.Fs {
|
||||
return f.wrapper
|
||||
}
|
||||
|
||||
// SetWrapper sets the Fs that is wrapping this Fs
|
||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
||||
f.wrapper = wrapper
|
||||
}
|
||||
|
||||
// EncryptFileName returns an encrypted file name
|
||||
func (f *Fs) EncryptFileName(fileName string) string {
|
||||
return f.cipher.EncryptFileName(fileName)
|
||||
@@ -614,75 +616,6 @@ func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr
|
||||
return m.Sums()[hashType], nil
|
||||
}
|
||||
|
||||
// MergeDirs merges the contents of all the directories passed
|
||||
// in into the first one and rmdirs the other directories.
|
||||
func (f *Fs) MergeDirs(dirs []fs.Directory) error {
|
||||
do := f.Fs.Features().MergeDirs
|
||||
if do == nil {
|
||||
return errors.New("MergeDirs not supported")
|
||||
}
|
||||
out := make([]fs.Directory, len(dirs))
|
||||
for i, dir := range dirs {
|
||||
out[i] = fs.NewDirCopy(dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
|
||||
}
|
||||
return do(out)
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing
|
||||
// as an optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
do := f.Fs.Features().DirCacheFlush
|
||||
if do != nil {
|
||||
do()
|
||||
}
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(remote string) (string, error) {
|
||||
do := f.Fs.Features().PublicLink
|
||||
if do == nil {
|
||||
return "", errors.New("PublicLink not supported")
|
||||
}
|
||||
o, err := f.NewObject(remote)
|
||||
if err != nil {
|
||||
// assume it is a directory
|
||||
return do(f.cipher.EncryptDirName(remote))
|
||||
}
|
||||
return do(o.(*Object).Object.Remote())
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
// that has had changes. If the implementation
|
||||
// uses polling, it should adhere to the given interval.
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
do := f.Fs.Features().ChangeNotify
|
||||
if do == nil {
|
||||
return
|
||||
}
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
fs.Logf(f, "path %q entryType %d", path, entryType)
|
||||
var (
|
||||
err error
|
||||
decrypted string
|
||||
)
|
||||
switch entryType {
|
||||
case fs.EntryDirectory:
|
||||
decrypted, err = f.cipher.DecryptDirName(path)
|
||||
case fs.EntryObject:
|
||||
decrypted, err = f.cipher.DecryptFileName(path)
|
||||
default:
|
||||
fs.Errorf(path, "crypt ChangeNotify: ignoring unknown EntryType %d", entryType)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
|
||||
return
|
||||
}
|
||||
notifyFunc(decrypted, entryType)
|
||||
}
|
||||
do(wrappedNotifyFunc, pollIntervalChan)
|
||||
}
|
||||
|
||||
// Object describes a wrapped for being read from the Fs
|
||||
//
|
||||
// This decrypts the remote name and decrypts the data
|
||||
@@ -841,34 +774,6 @@ func (o *ObjectInfo) Hash(hash hash.Type) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
do, ok := o.Object.(fs.IDer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.ID()
|
||||
}
|
||||
|
||||
// SetTier performs changing storage tier of the Object if
|
||||
// multiple storage classes supported
|
||||
func (o *Object) SetTier(tier string) error {
|
||||
do, ok := o.Object.(fs.SetTierer)
|
||||
if !ok {
|
||||
return errors.New("crypt: underlying remote does not support SetTier")
|
||||
}
|
||||
return do.SetTier(tier)
|
||||
}
|
||||
|
||||
// GetTier returns storage tier or class of the Object
|
||||
func (o *Object) GetTier() string {
|
||||
do, ok := o.Object.(fs.GetTierer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.GetTier()
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -882,15 +787,7 @@ var (
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Wrapper = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.SetTierer = (*Object)(nil)
|
||||
_ fs.GetTierer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -21,9 +21,8 @@ func TestIntegration(t *testing.T) {
|
||||
t.Skip("Skipping as -remote not set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -43,7 +42,6 @@ func TestStandard(t *testing.T) {
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -63,7 +61,6 @@ func TestOff(t *testing.T) {
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "off"},
|
||||
},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -83,7 +80,6 @@ func TestObfuscate(t *testing.T) {
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||
},
|
||||
SkipBadWindowsCharacters: true,
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
SkipBadWindowsCharacters: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Package drive interfaces with the Google Drive object storage system
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
// FIXME need to deal with some corner cases
|
||||
@@ -183,10 +186,10 @@ func init() {
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance.",
|
||||
Help: "Google Application Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret\nSetting your own is recommended.",
|
||||
Help: "Google Application Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "scope",
|
||||
Help: "Scope that rclone should use when requesting access from drive.",
|
||||
@@ -237,22 +240,6 @@ func init() {
|
||||
Default: false,
|
||||
Help: "Skip google documents in all listings.\nIf given, gdocs practically become invisible to rclone.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_checksum_gphotos",
|
||||
Default: false,
|
||||
Help: `Skip MD5 checksum on Google photos and videos only.
|
||||
|
||||
Use this if you get checksum errors when transferring Google photos or
|
||||
videos.
|
||||
|
||||
Setting this flag will cause Google photos and videos to return a
|
||||
blank MD5 checksum.
|
||||
|
||||
Google photos are identifed by being in the "photos" space.
|
||||
|
||||
Corrupted checksums are caused by Google modifying the image/video but
|
||||
not updating the checksum.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "shared_with_me",
|
||||
Default: false,
|
||||
@@ -409,7 +396,6 @@ type Options struct {
|
||||
AuthOwnerOnly bool `config:"auth_owner_only"`
|
||||
UseTrash bool `config:"use_trash"`
|
||||
SkipGdocs bool `config:"skip_gdocs"`
|
||||
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
|
||||
SharedWithMe bool `config:"shared_with_me"`
|
||||
TrashedOnly bool `config:"trashed_only"`
|
||||
Extensions string `config:"formats"`
|
||||
@@ -440,7 +426,7 @@ type Fs struct {
|
||||
client *http.Client // authorized client
|
||||
rootFolderID string // the id of the root folder
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
exportExtensions []string // preferred extensions to download docs
|
||||
importMimeTypes []string // MIME types to convert to docs
|
||||
isTeamDrive bool // true if this is a team drive
|
||||
@@ -629,9 +615,6 @@ func (f *Fs) list(dirIDs []string, title string, directoriesOnly, filesOnly, inc
|
||||
if f.opt.AuthOwnerOnly {
|
||||
fields += ",owners"
|
||||
}
|
||||
if f.opt.SkipChecksumGphotos {
|
||||
fields += ",spaces"
|
||||
}
|
||||
|
||||
fields = fmt.Sprintf("files(%s),nextPageToken", fields)
|
||||
|
||||
@@ -693,33 +676,28 @@ func isPowerOfTwo(x int64) bool {
|
||||
}
|
||||
|
||||
// add a charset parameter to all text/* MIME types
|
||||
func fixMimeType(mimeTypeIn string) string {
|
||||
if mimeTypeIn == "" {
|
||||
return ""
|
||||
}
|
||||
mediaType, param, err := mime.ParseMediaType(mimeTypeIn)
|
||||
func fixMimeType(mimeType string) string {
|
||||
mediaType, param, err := mime.ParseMediaType(mimeType)
|
||||
if err != nil {
|
||||
return mimeTypeIn
|
||||
return mimeType
|
||||
}
|
||||
mimeTypeOut := mimeTypeIn
|
||||
if strings.HasPrefix(mediaType, "text/") && param["charset"] == "" {
|
||||
if strings.HasPrefix(mimeType, "text/") && param["charset"] == "" {
|
||||
param["charset"] = "utf-8"
|
||||
mimeTypeOut = mime.FormatMediaType(mediaType, param)
|
||||
mimeType = mime.FormatMediaType(mediaType, param)
|
||||
}
|
||||
if mimeTypeOut == "" {
|
||||
panic(errors.Errorf("unable to fix MIME type %q", mimeTypeIn))
|
||||
}
|
||||
return mimeTypeOut
|
||||
return mimeType
|
||||
}
|
||||
func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
out = make(map[string][]string, len(in))
|
||||
for k, v := range in {
|
||||
func fixMimeTypeMap(m map[string][]string) map[string][]string {
|
||||
for _, v := range m {
|
||||
for i, mt := range v {
|
||||
v[i] = fixMimeType(mt)
|
||||
fixed := fixMimeType(mt)
|
||||
if fixed == "" {
|
||||
panic(errors.Errorf("unable to fix MIME type %q", mt))
|
||||
}
|
||||
v[i] = fixed
|
||||
}
|
||||
out[fixMimeType(k)] = v
|
||||
}
|
||||
return out
|
||||
return m
|
||||
}
|
||||
func isInternalMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
|
||||
@@ -811,8 +789,8 @@ func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
|
||||
}
|
||||
|
||||
// newPacer makes a pacer configured for drive
|
||||
func newPacer(opt *Options) *fs.Pacer {
|
||||
return fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst)))
|
||||
func newPacer(opt *Options) *pacer.Pacer {
|
||||
return pacer.New().SetMinSleep(time.Duration(opt.PacerMinSleep)).SetBurst(opt.PacerBurst).SetPacer(pacer.GoogleDrivePacer)
|
||||
}
|
||||
|
||||
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
|
||||
@@ -924,7 +902,6 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ServerSideAcrossConfigs: true,
|
||||
}).Fill(f)
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
@@ -1019,15 +996,6 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
|
||||
|
||||
// newRegularObject creates a fs.Object for a normal drive.File
|
||||
func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
|
||||
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
|
||||
if f.opt.SkipChecksumGphotos {
|
||||
for _, space := range info.Spaces {
|
||||
if space == "photos" {
|
||||
info.Md5Checksum = ""
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return &Object{
|
||||
baseObject: f.newBaseObject(remote, info),
|
||||
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, info.Id),
|
||||
@@ -1869,24 +1837,16 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
remote = remote[:len(remote)-len(ext)]
|
||||
}
|
||||
|
||||
// Look to see if there is an existing object
|
||||
existingObject, _ := f.NewObject(remote)
|
||||
|
||||
createInfo, err := f.createFileInfo(remote, src.ModTime())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
supportTeamDrives, err := f.ShouldSupportTeamDrives(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var info *drive.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, err = f.svc.Files.Copy(srcObj.id, createInfo).
|
||||
Fields(partialFields).
|
||||
SupportsTeamDrives(supportTeamDrives).
|
||||
SupportsTeamDrives(f.isTeamDrive).
|
||||
KeepRevisionForever(f.opt.KeepRevisionForever).
|
||||
Do()
|
||||
return shouldRetry(err)
|
||||
@@ -1894,17 +1854,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newObject, err := f.newObjectWithInfo(remote, info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if existingObject != nil {
|
||||
err = existingObject.Remove()
|
||||
if err != nil {
|
||||
fs.Errorf(existingObject, "Failed to remove existing object after copy: %v", err)
|
||||
}
|
||||
}
|
||||
return newObject, nil
|
||||
return f.newObjectWithInfo(remote, info)
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
@@ -2030,11 +1980,6 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
dstParents := strings.Join(dstInfo.Parents, ",")
|
||||
dstInfo.Parents = nil
|
||||
|
||||
supportTeamDrives, err := f.ShouldSupportTeamDrives(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
var info *drive.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -2042,7 +1987,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
RemoveParents(srcParentID).
|
||||
AddParents(dstParents).
|
||||
Fields(partialFields).
|
||||
SupportsTeamDrives(supportTeamDrives).
|
||||
SupportsTeamDrives(f.isTeamDrive).
|
||||
Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
@@ -2053,20 +1998,6 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, info)
|
||||
}
|
||||
|
||||
// ShouldSupportTeamDrives returns the request should support TeamDrives
|
||||
func (f *Fs) ShouldSupportTeamDrives(src fs.Object) (bool, error) {
|
||||
srcIsTeamDrive := false
|
||||
if srcFs, ok := src.Fs().(*Fs); ok {
|
||||
srcIsTeamDrive = srcFs.isTeamDrive
|
||||
}
|
||||
|
||||
if f.isTeamDrive {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return srcIsTeamDrive, nil
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||
id, err := f.dirCache.FindDir(remote, false)
|
||||
@@ -2499,10 +2430,6 @@ func (o *baseObject) httpResponse(url, method string, options []fs.OpenOption) (
|
||||
return req, nil, err
|
||||
}
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
if o.bytes == 0 {
|
||||
// Don't supply range requests for 0 length objects as they always fail
|
||||
delete(req.Header, "Range")
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.client.Do(req)
|
||||
if err == nil {
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
// Test Drive filesystem interface
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
|
||||
6
backend/drive/drive_unsupported.go
Normal file
6
backend/drive/drive_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package drive
|
||||
@@ -8,6 +8,8 @@
|
||||
//
|
||||
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
|
||||
@@ -160,7 +160,7 @@ type Fs struct {
|
||||
team team.Client // for the Teams API
|
||||
slashRoot string // root with "/" prefix, lowercase
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
ns string // The namespace we are using or "" for none
|
||||
}
|
||||
|
||||
@@ -209,7 +209,7 @@ func shouldRetry(err error) (bool, error) {
|
||||
case auth.RateLimitAPIError:
|
||||
if e.RateLimitError.RetryAfter > 0 {
|
||||
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
||||
time.Sleep(time.Duration(e.RateLimitError.RetryAfter) * time.Second)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
@@ -273,7 +273,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
config := dropbox.Config{
|
||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -46,11 +45,6 @@ func init() {
|
||||
Help: "FTP password",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
@@ -58,11 +52,10 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Port string `config:"port"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Port string `config:"port"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
@@ -77,7 +70,6 @@ type Fs struct {
|
||||
dialAddr string
|
||||
poolMu sync.Mutex
|
||||
pool []*ftp.ServerConn
|
||||
tokens *pacer.TokenDispenser
|
||||
}
|
||||
|
||||
// Object describes an FTP file
|
||||
@@ -136,9 +128,6 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
||||
|
||||
// Get an FTP connection from the pool, or open a new one
|
||||
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
||||
if f.opt.Concurrency > 0 {
|
||||
f.tokens.Get()
|
||||
}
|
||||
f.poolMu.Lock()
|
||||
if len(f.pool) > 0 {
|
||||
c = f.pool[0]
|
||||
@@ -158,9 +147,6 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
||||
// if err is not nil then it checks the connection is alive using a
|
||||
// NOOP request
|
||||
func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
if f.opt.Concurrency > 0 {
|
||||
defer f.tokens.Put()
|
||||
}
|
||||
c := *pc
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
@@ -212,7 +198,6 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
user: user,
|
||||
pass: pass,
|
||||
dialAddr: dialAddr,
|
||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Package googlecloudstorage provides an interface to Google Cloud Storage
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package googlecloudstorage
|
||||
|
||||
/*
|
||||
@@ -13,7 +16,6 @@ FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 erro
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
@@ -43,8 +45,6 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
// NOTE: This API is deprecated
|
||||
storage "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
@@ -144,22 +144,6 @@ func init() {
|
||||
Value: "publicReadWrite",
|
||||
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
|
||||
}},
|
||||
}, {
|
||||
Name: "bucket_policy_only",
|
||||
Help: `Access checks should use bucket-level IAM policies.
|
||||
|
||||
If you want to upload objects to a bucket with Bucket Policy Only set
|
||||
then you will need to set this.
|
||||
|
||||
When it is set, rclone:
|
||||
|
||||
- ignores ACLs set on buckets
|
||||
- ignores ACLs set on objects
|
||||
- creates buckets with Bucket Policy Only set
|
||||
|
||||
Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "location",
|
||||
Help: "Location for the newly created buckets.",
|
||||
@@ -257,7 +241,6 @@ type Options struct {
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
ObjectACL string `config:"object_acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
||||
Location string `config:"location"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
}
|
||||
@@ -273,7 +256,7 @@ type Fs struct {
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
@@ -398,11 +381,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
|
||||
if err != nil {
|
||||
ctx := context.Background()
|
||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||
}
|
||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -416,7 +395,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -730,19 +709,8 @@ func (f *Fs) Mkdir(dir string) (err error) {
|
||||
Location: f.opt.Location,
|
||||
StorageClass: f.opt.StorageClass,
|
||||
}
|
||||
if f.opt.BucketPolicyOnly {
|
||||
bucket.IamConfiguration = &storage.BucketIamConfiguration{
|
||||
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
}
|
||||
_, err = insertBucket.Do()
|
||||
_, err = f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket).PredefinedAcl(f.opt.BucketACL).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -1008,11 +976,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
insertObject := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = insertObject.Do()
|
||||
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.opt.ObjectACL).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
// Test GoogleCloudStorage filesystem interface
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package googlecloudstorage_test
|
||||
|
||||
import (
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package googlecloudstorage
|
||||
@@ -6,7 +6,6 @@ package http
|
||||
|
||||
import (
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -45,22 +44,6 @@ func init() {
|
||||
Value: "https://user:pass@example.com",
|
||||
Help: "Connect to example.com using a username and password",
|
||||
}},
|
||||
}, {
|
||||
Name: "no_slash",
|
||||
Help: `Set this if the site doesn't end directories with /
|
||||
|
||||
Use this if your target website does not use / on the end of
|
||||
directories.
|
||||
|
||||
A / on the end of a path is how rclone normally tells the difference
|
||||
between files and directories. If this flag is set, then rclone will
|
||||
treat all files with Content-Type: text/html as directories and read
|
||||
URLs from them rather than downloading them.
|
||||
|
||||
Note that this may cause rclone to confuse genuine HTML files with
|
||||
directories.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -69,7 +52,6 @@ directories.`,
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"url"`
|
||||
NoSlash bool `config:"no_slash"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote HTTP files
|
||||
@@ -288,20 +270,14 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var (
|
||||
walk func(*html.Node)
|
||||
seen = make(map[string]struct{})
|
||||
)
|
||||
var walk func(*html.Node)
|
||||
walk = func(n *html.Node) {
|
||||
if n.Type == html.ElementNode && n.Data == "a" {
|
||||
for _, a := range n.Attr {
|
||||
if a.Key == "href" {
|
||||
name, err := parseName(base, a.Val)
|
||||
if err == nil {
|
||||
if _, found := seen[name]; !found {
|
||||
names = append(names, name)
|
||||
seen[name] = struct{}{}
|
||||
}
|
||||
names = append(names, name)
|
||||
}
|
||||
break
|
||||
}
|
||||
@@ -326,16 +302,14 @@ func (f *Fs) readDir(dir string) (names []string, err error) {
|
||||
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||
}
|
||||
res, err := f.httpClient.Get(URL)
|
||||
if err == nil {
|
||||
defer fs.CheckClose(res.Body, &err)
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to readDir")
|
||||
}
|
||||
defer fs.CheckClose(res.Body, &err)
|
||||
|
||||
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
|
||||
switch contentType {
|
||||
@@ -379,16 +353,11 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
switch err = file.stat(); err {
|
||||
case nil:
|
||||
entries = append(entries, file)
|
||||
case fs.ErrorNotAFile:
|
||||
// ...found a directory not a file
|
||||
dir := fs.NewDir(remote, timeUnset)
|
||||
entries = append(entries, dir)
|
||||
default:
|
||||
if err = file.stat(); err != nil {
|
||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||
continue
|
||||
}
|
||||
entries = append(entries, file)
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
@@ -464,16 +433,6 @@ func (o *Object) stat() error {
|
||||
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
|
||||
o.modTime = t
|
||||
o.contentType = res.Header.Get("Content-Type")
|
||||
// If NoSlash is set then check ContentType to see if it is a directory
|
||||
if o.fs.opt.NoSlash {
|
||||
mediaType, _, err := mime.ParseMediaType(o.contentType)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse Content-Type: %q", o.contentType)
|
||||
}
|
||||
if mediaType == "text/html" {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build go1.8
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
@@ -63,7 +65,7 @@ func prepare(t *testing.T) (fs.Fs, func()) {
|
||||
return f, tidy
|
||||
}
|
||||
|
||||
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
func testListRoot(t *testing.T, f fs.Fs) {
|
||||
entries, err := f.List("")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -91,29 +93,15 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
|
||||
e = entries[3]
|
||||
assert.Equal(t, "two.html", e.Remote())
|
||||
if noSlash {
|
||||
assert.Equal(t, int64(-1), e.Size())
|
||||
_, ok = e.(fs.Directory)
|
||||
assert.True(t, ok)
|
||||
} else {
|
||||
assert.Equal(t, int64(41), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
assert.Equal(t, int64(7), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func TestListRoot(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
testListRoot(t, f, false)
|
||||
}
|
||||
|
||||
func TestListRootNoSlash(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
f.(*Fs).opt.NoSlash = true
|
||||
defer tidy()
|
||||
|
||||
testListRoot(t, f, true)
|
||||
testListRoot(t, f)
|
||||
}
|
||||
|
||||
func TestListSubDir(t *testing.T) {
|
||||
@@ -206,7 +194,7 @@ func TestIsAFileRoot(t *testing.T) {
|
||||
f, err := NewFs(remoteName, "one%.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
testListRoot(t, f, false)
|
||||
testListRoot(t, f)
|
||||
}
|
||||
|
||||
func TestIsAFileSubDir(t *testing.T) {
|
||||
|
||||
@@ -1 +1 @@
|
||||
<a href="two.html/file.txt">file.txt</a>
|
||||
potato
|
||||
|
||||
@@ -11,9 +11,7 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestHubic:",
|
||||
NilObject: (*hubic.Object)(nil),
|
||||
SkipFsCheckWrap: true,
|
||||
SkipObjectCheckWrap: true,
|
||||
RemoteName: "TestHubic:",
|
||||
NilObject: (*hubic.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -314,9 +314,3 @@ type UploadResponse struct {
|
||||
Deleted interface{} `json:"deleted"`
|
||||
Mime string `json:"mime"`
|
||||
}
|
||||
|
||||
// DeviceRegistrationResponse is the response to registering a device
|
||||
type DeviceRegistrationResponse struct {
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -46,14 +45,10 @@ const (
|
||||
apiURL = "https://api.jottacloud.com/files/v1/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
tokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||
registerURL = "https://api.jottacloud.com/auth/v1/register"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
configUsername = "user"
|
||||
configClientID = "client_id"
|
||||
configClientSecret = "client_secret"
|
||||
charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -63,7 +58,9 @@ var (
|
||||
AuthURL: tokenURL,
|
||||
TokenURL: tokenURL,
|
||||
},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -82,55 +79,6 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
|
||||
// ask if we should create a device specifc token: https://github.com/ncw/rclone/issues/2995
|
||||
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
|
||||
if config.Confirm() {
|
||||
// random generator to generate random device names
|
||||
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
randonDeviceNamePartLength := 21
|
||||
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
|
||||
for i := range randomDeviceNamePart {
|
||||
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
|
||||
}
|
||||
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
|
||||
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
|
||||
|
||||
values := url.Values{}
|
||||
values.Set("device_id", randomDeviceName)
|
||||
|
||||
// all information comes from https://github.com/ttyridal/aiojotta/wiki/Jotta-protocol-3.-Authentication#token-authentication
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: registerURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
var deviceRegistration api.DeviceRegistrationResponse
|
||||
_, err := srv.CallJSON(&opts, nil, &deviceRegistration)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to register device: %v", err)
|
||||
}
|
||||
|
||||
m.Set(configClientID, deviceRegistration.ClientID)
|
||||
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
||||
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
|
||||
}
|
||||
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = rcloneClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = rcloneEncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
username, ok := m.Get(configUsername)
|
||||
if !ok {
|
||||
log.Fatalf("No username defined")
|
||||
@@ -138,6 +86,7 @@ func init() {
|
||||
password := config.GetPassword("Your Jottacloud password is only required during config and will not be stored.")
|
||||
|
||||
// prepare out token request with username and password
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
values := url.Values{}
|
||||
values.Set("grant_type", "PASSWORD")
|
||||
values.Set("password", password)
|
||||
@@ -241,7 +190,7 @@ type Fs struct {
|
||||
endpointURL string
|
||||
srv *rest.Client
|
||||
apiSrv *rest.Client
|
||||
pacer *fs.Pacer
|
||||
pacer *pacer.Pacer
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
}
|
||||
|
||||
@@ -432,20 +381,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = parsePath(root)
|
||||
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = rcloneClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = rcloneEncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
// add jottacloud to the long list of sites that don't follow the oauth spec correctly
|
||||
oauth2.RegisterBrokenAuthHeaderProvider("https://www.jottacloud.com/")
|
||||
|
||||
// the oauth client for the api servers needs
|
||||
// a filter to fix the grant_type issues (see above)
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
@@ -468,7 +403,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
|
||||
@@ -1,589 +0,0 @@
|
||||
package koofr
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
|
||||
httpclient "github.com/koofr/go-httpclient"
|
||||
koofrclient "github.com/koofr/go-koofrclient"
|
||||
)
|
||||
|
||||
// Register Fs with rclone
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "koofr",
|
||||
Description: "Koofr",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "endpoint",
|
||||
Help: "The Koofr API endpoint to use",
|
||||
Default: "https://app.koofr.net",
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "mountid",
|
||||
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
|
||||
Required: false,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your Koofr user name",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options represent the configuration of the Koofr backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
MountID string `config:"mountid"`
|
||||
User string `config:"user"`
|
||||
Password string `config:"password"`
|
||||
}
|
||||
|
||||
// A Fs is a representation of a remote Koofr Fs
|
||||
type Fs struct {
|
||||
name string
|
||||
mountID string
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
client *koofrclient.KoofrClient
|
||||
}
|
||||
|
||||
// An Object on the remote Koofr Fs
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
info koofrclient.FileInfo
|
||||
}
|
||||
|
||||
func base(pth string) string {
|
||||
rv := path.Base(pth)
|
||||
if rv == "" || rv == "." {
|
||||
rv = "/"
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
func dir(pth string) string {
|
||||
rv := path.Dir(pth)
|
||||
if rv == "" || rv == "." {
|
||||
rv = "/"
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
// String returns a string representation of the remote Object
|
||||
func (o *Object) String() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path of the Object, relative to Fs root
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the Object
|
||||
func (o *Object) ModTime() time.Time {
|
||||
return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000)
|
||||
}
|
||||
|
||||
// Size return the size of the Object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.info.Size
|
||||
}
|
||||
|
||||
// Fs returns a reference to the Koofr Fs containing the Object
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash returns an MD5 hash of the Object
|
||||
func (o *Object) Hash(typ hash.Type) (string, error) {
|
||||
if typ == hash.MD5 {
|
||||
return o.info.Hash, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// fullPath returns full path of the remote Object (including Fs root)
|
||||
func (o *Object) fullPath() string {
|
||||
return o.fs.fullPath(o.remote)
|
||||
}
|
||||
|
||||
// Storable returns true if the Object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime is not supported
|
||||
func (o *Object) SetModTime(mtime time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open opens the Object for reading
|
||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
var sOff, eOff int64 = 0, -1
|
||||
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
sOff = x.Offset
|
||||
case *fs.RangeOption:
|
||||
sOff = x.Start
|
||||
eOff = x.End
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
if sOff == 0 && eOff < 0 {
|
||||
return o.fs.client.FilesGet(o.fs.mountID, o.fullPath())
|
||||
}
|
||||
if sOff < 0 {
|
||||
sOff = o.Size() - eOff
|
||||
eOff = o.Size()
|
||||
}
|
||||
if eOff > o.Size() {
|
||||
eOff = o.Size()
|
||||
}
|
||||
span := &koofrclient.FileSpan{
|
||||
Start: sOff,
|
||||
End: eOff,
|
||||
}
|
||||
return o.fs.client.FilesGetRange(o.fs.mountID, o.fullPath(), span)
|
||||
}
|
||||
|
||||
// Update updates the Object contents
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
putopts := &koofrclient.PutFilter{
|
||||
ForceOverwrite: true,
|
||||
NoRename: true,
|
||||
IgnoreNonExisting: true,
|
||||
}
|
||||
fullPath := o.fullPath()
|
||||
dirPath := dir(fullPath)
|
||||
name := base(fullPath)
|
||||
err := o.fs.mkdir(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := o.fs.client.FilesPutOptions(o.fs.mountID, dirPath, name, in, putopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.info = *info
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove deletes the remote Object
|
||||
func (o *Object) Remove() error {
|
||||
return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath())
|
||||
}
|
||||
|
||||
// Name returns the name of the Fs
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root returns the root path of the Fs
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns a string representation of the Fs
|
||||
func (f *Fs) String() string {
|
||||
return "koofr:" + f.mountID + ":" + f.root
|
||||
}
|
||||
|
||||
// Features returns the optional features supported by this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Precision denotes that setting modification times is not supported
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Hashes returns a set of hashes are Provided by the Fs
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
// fullPath constructs a full, absolute path from a Fs root relative path,
|
||||
func (f *Fs) fullPath(part string) string {
|
||||
return path.Join("/", f.root, part)
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and configuration options
|
||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pass, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := koofrclient.NewKoofrClient(opt.Endpoint, false)
|
||||
basicAuth := fmt.Sprintf("Basic %s",
|
||||
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
|
||||
client.HTTPClient.Headers.Set("Authorization", basicAuth)
|
||||
mounts, err := client.Mounts()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
client: client,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
BucketBased: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
for _, m := range mounts {
|
||||
if opt.MountID != "" {
|
||||
if m.Id == opt.MountID {
|
||||
f.mountID = m.Id
|
||||
break
|
||||
}
|
||||
} else if m.IsPrimary {
|
||||
f.mountID = m.Id
|
||||
break
|
||||
}
|
||||
}
|
||||
if f.mountID == "" {
|
||||
if opt.MountID == "" {
|
||||
return nil, errors.New("Failed to find primary mount")
|
||||
}
|
||||
return nil, errors.New("Failed to find mount " + opt.MountID)
|
||||
}
|
||||
rootFile, err := f.client.FilesInfo(f.mountID, "/"+f.root)
|
||||
if err == nil && rootFile.Type != "dir" {
|
||||
f.root = dir(f.root)
|
||||
err = fs.ErrorIsFile
|
||||
} else {
|
||||
err = nil
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
|
||||
// List returns a list of items in a directory
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
|
||||
if err != nil {
|
||||
return nil, translateErrorsDir(err)
|
||||
}
|
||||
entries = make([]fs.DirEntry, len(files))
|
||||
for i, file := range files {
|
||||
if file.Type == "dir" {
|
||||
entries[i] = fs.NewDir(path.Join(dir, file.Name), time.Unix(0, 0))
|
||||
} else {
|
||||
entries[i] = &Object{
|
||||
fs: f,
|
||||
info: file,
|
||||
remote: path.Join(dir, file.Name),
|
||||
}
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject creates a new remote Object for a given remote path
|
||||
func (f *Fs) NewObject(remote string) (obj fs.Object, err error) {
|
||||
info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote))
|
||||
if err != nil {
|
||||
return nil, translateErrorsObject(err)
|
||||
}
|
||||
if info.Type == "dir" {
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
return &Object{
|
||||
fs: f,
|
||||
info: info,
|
||||
remote: remote,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Put updates a remote Object
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) {
|
||||
putopts := &koofrclient.PutFilter{
|
||||
ForceOverwrite: true,
|
||||
NoRename: true,
|
||||
IgnoreNonExisting: true,
|
||||
}
|
||||
fullPath := f.fullPath(src.Remote())
|
||||
dirPath := dir(fullPath)
|
||||
name := base(fullPath)
|
||||
err = f.mkdir(dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := f.client.FilesPutOptions(f.mountID, dirPath, name, in, putopts)
|
||||
if err != nil {
|
||||
return nil, translateErrorsObject(err)
|
||||
}
|
||||
return &Object{
|
||||
fs: f,
|
||||
info: *info,
|
||||
remote: src.Remote(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PutStream updates a remote Object with a stream of unknown size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(in, src, options...)
|
||||
}
|
||||
|
||||
// isBadRequest is a predicate which holds true iff the error returned was
|
||||
// HTTP status 400
|
||||
func isBadRequest(err error) bool {
|
||||
switch err := err.(type) {
|
||||
case httpclient.InvalidStatusError:
|
||||
if err.Got == http.StatusBadRequest {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// translateErrorsDir translates koofr errors to rclone errors (for a dir
|
||||
// operation)
|
||||
func translateErrorsDir(err error) error {
|
||||
switch err := err.(type) {
|
||||
case httpclient.InvalidStatusError:
|
||||
if err.Got == http.StatusNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// translatesErrorsObject translates Koofr errors to rclone errors (for an object operation)
|
||||
func translateErrorsObject(err error) error {
|
||||
switch err := err.(type) {
|
||||
case httpclient.InvalidStatusError:
|
||||
if err.Got == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// mkdir creates a directory at the given remote path. Creates ancestors if
|
||||
// neccessary
|
||||
func (f *Fs) mkdir(fullPath string) error {
|
||||
if fullPath == "/" {
|
||||
return nil
|
||||
}
|
||||
info, err := f.client.FilesInfo(f.mountID, fullPath)
|
||||
if err == nil && info.Type == "dir" {
|
||||
return nil
|
||||
}
|
||||
err = translateErrorsDir(err)
|
||||
if err != nil && err != fs.ErrorDirNotFound {
|
||||
return err
|
||||
}
|
||||
dirs := strings.Split(fullPath, "/")
|
||||
parent := "/"
|
||||
for _, part := range dirs {
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
info, err = f.client.FilesInfo(f.mountID, path.Join(parent, part))
|
||||
if err != nil || info.Type != "dir" {
|
||||
err = translateErrorsDir(err)
|
||||
if err != nil && err != fs.ErrorDirNotFound {
|
||||
return err
|
||||
}
|
||||
err = f.client.FilesNewFolder(f.mountID, parent, part)
|
||||
if err != nil && !isBadRequest(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
parent = path.Join(parent, part)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mkdir creates a directory at the given remote path. Creates ancestors if
|
||||
// necessary
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
fullPath := f.fullPath(dir)
|
||||
return f.mkdir(fullPath)
|
||||
}
|
||||
|
||||
// Rmdir removes an (empty) directory at the given remote path
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
|
||||
if err != nil {
|
||||
return translateErrorsDir(err)
|
||||
}
|
||||
if len(files) > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
err = f.client.FilesDelete(f.mountID, f.fullPath(dir))
|
||||
if err != nil {
|
||||
return translateErrorsDir(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy copies a remote Object to the given path
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
dstFullPath := f.fullPath(remote)
|
||||
dstDir := dir(dstFullPath)
|
||||
err := f.mkdir(dstDir)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err = f.client.FilesCopy((src.(*Object)).fs.mountID,
|
||||
(src.(*Object)).fs.fullPath((src.(*Object)).remote),
|
||||
f.mountID, dstFullPath)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
return f.NewObject(remote)
|
||||
}
|
||||
|
||||
// Move moves a remote Object to the given path
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj := src.(*Object)
|
||||
dstFullPath := f.fullPath(remote)
|
||||
dstDir := dir(dstFullPath)
|
||||
err := f.mkdir(dstDir)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
err = f.client.FilesMove(srcObj.fs.mountID,
|
||||
srcObj.fs.fullPath(srcObj.remote), f.mountID, dstFullPath)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
return f.NewObject(remote)
|
||||
}
|
||||
|
||||
// DirMove moves a remote directory to the given path
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs := src.(*Fs)
|
||||
srcFullPath := srcFs.fullPath(srcRemote)
|
||||
dstFullPath := f.fullPath(dstRemote)
|
||||
if srcFs.mountID == f.mountID && srcFullPath == dstFullPath {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
dstDir := dir(dstFullPath)
|
||||
err := f.mkdir(dstDir)
|
||||
if err != nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
err = f.client.FilesMove(srcFs.mountID, srcFullPath, f.mountID, dstFullPath)
|
||||
if err != nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// About reports space usage (with a MB precision)
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
mount, err := f.client.MountsDetails(f.mountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fs.Usage{
|
||||
Total: fs.NewUsageValue(mount.SpaceTotal * 1024 * 1024),
|
||||
Used: fs.NewUsageValue(mount.SpaceUsed * 1024 * 1024),
|
||||
Trashed: nil,
|
||||
Other: nil,
|
||||
Free: fs.NewUsageValue((mount.SpaceTotal - mount.SpaceUsed) * 1024 * 1024),
|
||||
Objects: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Purge purges the complete Fs
|
||||
func (f *Fs) Purge() error {
|
||||
err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath("")))
|
||||
return err
|
||||
}
|
||||
|
||||
// linkCreate is a Koofr API request for creating a public link
|
||||
type linkCreate struct {
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// link is a Koofr API response to creating a public link
|
||||
type link struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Counter int64 `json:"counter"`
|
||||
URL string `json:"url"`
|
||||
ShortURL string `json:"shortUrl"`
|
||||
Hash string `json:"hash"`
|
||||
Host string `json:"host"`
|
||||
HasPassword bool `json:"hasPassword"`
|
||||
Password string `json:"password"`
|
||||
ValidFrom int64 `json:"validFrom"`
|
||||
ValidTo int64 `json:"validTo"`
|
||||
PasswordRequired bool `json:"passwordRequired"`
|
||||
}
|
||||
|
||||
// createLink makes a Koofr API call to create a public link
|
||||
func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link, error) {
|
||||
linkCreate := linkCreate{
|
||||
Path: path,
|
||||
}
|
||||
linkData := link{}
|
||||
|
||||
request := httpclient.RequestData{
|
||||
Method: "POST",
|
||||
Path: "/api/v2/mounts/" + mountID + "/links",
|
||||
ExpectedStatus: []int{http.StatusOK, http.StatusCreated},
|
||||
ReqEncoding: httpclient.EncodingJSON,
|
||||
ReqValue: linkCreate,
|
||||
RespEncoding: httpclient.EncodingJSON,
|
||||
RespValue: &linkData,
|
||||
}
|
||||
|
||||
_, err := c.Request(&request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &linkData, nil
|
||||
}
|
||||
|
||||
// PublicLink creates a public link to the remote path
|
||||
func (f *Fs) PublicLink(remote string) (string, error) {
|
||||
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
|
||||
if err != nil {
|
||||
return "", translateErrorsDir(err)
|
||||
}
|
||||
return linkData.ShortURL, nil
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
package koofr_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestKoofr:",
|
||||
})
|
||||
}
|
||||
@@ -98,7 +98,7 @@ type Fs struct {
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
srv *mega.Mega // the connection to the server
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
rootNodeMu sync.Mutex // mutex for _rootNode
|
||||
_rootNode *mega.Node // root node - call findRoot to use this
|
||||
mkdirMu sync.Mutex // used to serialize calls to mkdir / rmdir
|
||||
@@ -217,7 +217,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: srv,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
DuplicateFiles: true,
|
||||
@@ -402,27 +402,6 @@ func (f *Fs) clearRoot() {
|
||||
//log.Printf("cleared root directory")
|
||||
}
|
||||
|
||||
// CleanUp deletes all files currently in trash
|
||||
func (f *Fs) CleanUp() (err error) {
|
||||
trash := f.srv.FS.GetTrash()
|
||||
items := []*mega.Node{}
|
||||
_, err = f.list(trash, func(item *mega.Node) bool {
|
||||
items = append(items, item)
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "CleanUp failed to list items in trash")
|
||||
}
|
||||
// similar to f.deleteNode(trash) but with HardDelete as true
|
||||
for _, item := range items {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.srv.Delete(item, true)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
|
||||
@@ -14,8 +14,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/lib/atexit"
|
||||
|
||||
"github.com/ncw/rclone/backend/onedrive/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
@@ -263,7 +261,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
driveID string // ID to use for querying Microsoft Graph
|
||||
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
|
||||
@@ -337,13 +335,8 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
|
||||
// readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
|
||||
// if `relPath` == "", it reads the metadata for the item with that ID.
|
||||
//
|
||||
// We address items using the pattern `drives/driveID/items/itemID:/relativePath`
|
||||
// instead of simply using `drives/driveID/root:/itemPath` because it works for
|
||||
// "shared with me" folders in OneDrive Personal (See #2536, #2778)
|
||||
// This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
opts := newOptsCall(normalizedID, "GET", ":/"+withTrailingColon(rest.URLPathEscape(replaceReservedChars(relPath))))
|
||||
opts := newOptsCall(normalizedID, "GET", ":/"+rest.URLPathEscape(replaceReservedChars(relPath)))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
@@ -482,7 +475,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
driveID: opt.DriveID,
|
||||
driveType: opt.DriveType,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(graphURL + "/drives/" + opt.DriveID),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -710,7 +703,9 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
id := info.GetID()
|
||||
f.dirCache.Put(remote, id)
|
||||
d := fs.NewDir(remote, time.Time(info.GetLastModifiedDateTime())).SetID(id)
|
||||
d.SetItems(folder.ChildCount)
|
||||
if folder != nil {
|
||||
d.SetItems(folder.ChildCount)
|
||||
}
|
||||
entries = append(entries, d)
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(remote, info)
|
||||
@@ -824,6 +819,9 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
return err
|
||||
}
|
||||
f.dirCache.FlushDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1342,12 +1340,12 @@ func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
||||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(leaf)),
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf),
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/root:/" + withTrailingColon(rest.URLPathEscape(o.srvPath())),
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()),
|
||||
}
|
||||
}
|
||||
update := api.SetFileSystemInfo{
|
||||
@@ -1493,40 +1491,23 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (i
|
||||
return nil, errors.New("unknown-sized upload not supported")
|
||||
}
|
||||
|
||||
uploadURLChan := make(chan string, 1)
|
||||
gracefulCancel := func() {
|
||||
uploadURL, ok := <-uploadURLChan
|
||||
// Reading from uploadURLChan blocks the atexit process until
|
||||
// we are able to use uploadURL to cancel the upload
|
||||
if !ok { // createUploadSession failed - no need to cancel upload
|
||||
return
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Cancelling multipart upload")
|
||||
cancelErr := o.cancelUploadSession(uploadURL)
|
||||
if cancelErr != nil {
|
||||
fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr)
|
||||
}
|
||||
}
|
||||
cancelFuncHandle := atexit.Register(gracefulCancel)
|
||||
|
||||
// Create upload session
|
||||
fs.Debugf(o, "Starting multipart upload")
|
||||
session, err := o.createUploadSession(modTime)
|
||||
if err != nil {
|
||||
close(uploadURLChan)
|
||||
atexit.Unregister(cancelFuncHandle)
|
||||
return nil, err
|
||||
}
|
||||
uploadURL := session.UploadURL
|
||||
uploadURLChan <- uploadURL
|
||||
|
||||
// Cancel the session if something went wrong
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Error encountered during upload: %v", err)
|
||||
gracefulCancel()
|
||||
fs.Debugf(o, "Cancelling multipart upload: %v", err)
|
||||
cancelErr := o.cancelUploadSession(uploadURL)
|
||||
if cancelErr != nil {
|
||||
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
|
||||
}
|
||||
}
|
||||
atexit.Unregister(cancelFuncHandle)
|
||||
}()
|
||||
|
||||
// Upload the chunks
|
||||
@@ -1687,21 +1668,6 @@ func getRelativePathInsideBase(base, target string) (string, bool) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Adds a ":" at the end of `remotePath` in a proper manner.
|
||||
// If `remotePath` already ends with "/", change it to ":/"
|
||||
// If `remotePath` is "", return "".
|
||||
// A workaround for #2720 and #3039
|
||||
func withTrailingColon(remotePath string) string {
|
||||
if remotePath == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
if strings.HasSuffix(remotePath, "/") {
|
||||
return remotePath[:len(remotePath)-1] + ":/"
|
||||
}
|
||||
return remotePath + ":"
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
|
||||
@@ -65,7 +65,7 @@ type Fs struct {
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
pacer *pacer.Pacer // To pace and retry the API calls
|
||||
session UserSessionInfo // contains the session data
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
}
|
||||
@@ -144,7 +144,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, "0", f)
|
||||
@@ -287,6 +287,9 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
return err
|
||||
}
|
||||
f.dirCache.FlushDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -95,7 +95,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
}
|
||||
|
||||
@@ -254,7 +254,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
|
||||
@@ -645,9 +645,6 @@ isn't set then "acl" is used instead.`,
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Glacier storage class",
|
||||
}, {
|
||||
Value: "DEEP_ARCHIVE",
|
||||
Help: "Glacier Deep Archive storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
|
||||
@@ -732,14 +729,6 @@ If it is set then rclone will use v2 authentication.
|
||||
Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_accelerate_endpoint",
|
||||
Provider: "AWS",
|
||||
Help: `If true use the AWS S3 accelerated endpoint.
|
||||
|
||||
See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html)`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -760,26 +749,25 @@ const (
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Provider string `config:"provider"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
LocationConstraint string `config:"location_constraint"`
|
||||
ACL string `config:"acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
ServerSideEncryption string `config:"server_side_encryption"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SessionToken string `config:"session_token"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
ForcePathStyle bool `config:"force_path_style"`
|
||||
V2Auth bool `config:"v2_auth"`
|
||||
UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"`
|
||||
Provider string `config:"provider"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
LocationConstraint string `config:"location_constraint"`
|
||||
ACL string `config:"acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
ServerSideEncryption string `config:"server_side_encryption"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SessionToken string `config:"session_token"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
ForcePathStyle bool `config:"force_path_style"`
|
||||
V2Auth bool `config:"v2_auth"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@@ -794,7 +782,7 @@ type Fs struct {
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketDeleted bool // true if we have deleted the bucket
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
srv *http.Client // a plain http client
|
||||
}
|
||||
|
||||
@@ -953,15 +941,14 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
if opt.Region == "" {
|
||||
opt.Region = "us-east-1"
|
||||
}
|
||||
if opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.UseAccelerateEndpoint {
|
||||
if opt.Provider == "Alibaba" || opt.Provider == "Netease" {
|
||||
opt.ForcePathStyle = false
|
||||
}
|
||||
awsConfig := aws.NewConfig().
|
||||
WithMaxRetries(maxRetries).
|
||||
WithCredentials(cred).
|
||||
WithHTTPClient(fshttp.NewClient(fs.Config)).
|
||||
WithS3ForcePathStyle(opt.ForcePathStyle).
|
||||
WithS3UseAccelerate(opt.UseAccelerateEndpoint)
|
||||
WithS3ForcePathStyle(opt.ForcePathStyle)
|
||||
if opt.Region != "" {
|
||||
awsConfig.WithRegion(opt.Region)
|
||||
}
|
||||
@@ -1068,7 +1055,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
c: c,
|
||||
bucket: bucket,
|
||||
ses: ses,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
|
||||
srv: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package sftp provides a filesystem interface using github.com/pkg/sftp
|
||||
|
||||
// +build !plan9
|
||||
// +build !plan9,go1.9
|
||||
|
||||
package sftp
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"os/user"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -428,12 +427,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
||||
}
|
||||
|
||||
return NewFsWithConnection(name, root, opt, sshConfig)
|
||||
}
|
||||
|
||||
// NewFsWithConnection creates a new Fs object from the name and root and a ssh.ClientConfig. It connects to
|
||||
// the host specified in the ssh.ClientConfig
|
||||
func NewFsWithConnection(name string, root string, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
@@ -814,47 +807,6 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return set
|
||||
}
|
||||
|
||||
// About gets usage stats
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
c, err := f.getSftpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "About get SFTP connection")
|
||||
}
|
||||
session, err := c.sshClient.NewSession()
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "About put SFTP connection")
|
||||
}
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
session.Stdout = &stdout
|
||||
session.Stderr = &stderr
|
||||
escapedPath := shellEscape(f.root)
|
||||
if f.opt.PathOverride != "" {
|
||||
escapedPath = shellEscape(path.Join(f.opt.PathOverride, f.root))
|
||||
}
|
||||
if len(escapedPath) == 0 {
|
||||
escapedPath = "/"
|
||||
}
|
||||
err = session.Run("df -k " + escapedPath)
|
||||
if err != nil {
|
||||
_ = session.Close()
|
||||
return nil, errors.Wrap(err, "About invocation of df failed. Your remote may not support about.")
|
||||
}
|
||||
_ = session.Close()
|
||||
|
||||
usageTotal, usageUsed, usageAvail := parseUsage(stdout.Bytes())
|
||||
if usageTotal < 0 || usageUsed < 0 || usageAvail < 0 {
|
||||
return nil, errors.Wrap(err, "About failed to parse information")
|
||||
}
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(usageTotal),
|
||||
Used: fs.NewUsageValue(usageUsed),
|
||||
Free: fs.NewUsageValue(usageAvail),
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Fs is the filesystem this remote sftp file object is located within
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
@@ -945,34 +897,6 @@ func parseHash(bytes []byte) string {
|
||||
return strings.Split(string(bytes), " ")[0] // Split at hash / filename separator
|
||||
}
|
||||
|
||||
// Parses the byte array output from the SSH session
|
||||
// returned by an invocation of df into
|
||||
// the disk size, used space, and avaliable space on the disk, in that order.
|
||||
// Only works when `df` has output info on only one disk
|
||||
func parseUsage(bytes []byte) (int64, int64, int64) {
|
||||
lines := strings.Split(string(bytes), "\n")
|
||||
if len(lines) < 2 {
|
||||
return -1, -1, -1
|
||||
}
|
||||
split := strings.Fields(lines[1])
|
||||
if len(split) < 6 {
|
||||
return -1, -1, -1
|
||||
}
|
||||
spaceTotal, err := strconv.ParseInt(split[1], 10, 64)
|
||||
if err != nil {
|
||||
return -1, -1, -1
|
||||
}
|
||||
spaceUsed, err := strconv.ParseInt(split[2], 10, 64)
|
||||
if err != nil {
|
||||
return -1, -1, -1
|
||||
}
|
||||
spaceAvail, err := strconv.ParseInt(split[3], 10, 64)
|
||||
if err != nil {
|
||||
return -1, -1, -1
|
||||
}
|
||||
return spaceTotal * 1024, spaceUsed * 1024, spaceAvail * 1024
|
||||
}
|
||||
|
||||
// Size returns the size in bytes of the remote sftp file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !plan9
|
||||
// +build !plan9,go1.9
|
||||
|
||||
package sftp
|
||||
|
||||
@@ -35,17 +35,3 @@ func TestParseHash(t *testing.T) {
|
||||
assert.Equal(t, test.checksum, got, fmt.Sprintf("Test %d sshOutput = %q", i, test.sshOutput))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseUsage(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
sshOutput string
|
||||
usage [3]int64
|
||||
}{
|
||||
{"Filesystem 1K-blocks Used Available Use% Mounted on\n/dev/root 91283092 81111888 10154820 89% /", [3]int64{93473886208, 83058573312, 10398535680}},
|
||||
{"Filesystem 1K-blocks Used Available Use% Mounted on\ntmpfs 818256 1636 816620 1% /run", [3]int64{837894144, 1675264, 836218880}},
|
||||
{"Filesystem 1024-blocks Used Available Capacity iused ifree %iused Mounted on\n/dev/disk0s2 244277768 94454848 149566920 39% 997820 4293969459 0% /", [3]int64{250140434432, 96721764352, 153156526080}},
|
||||
} {
|
||||
gotSpaceTotal, gotSpaceUsed, gotSpaceAvail := parseUsage([]byte(test.sshOutput))
|
||||
assert.Equal(t, test.usage, [3]int64{gotSpaceTotal, gotSpaceUsed, gotSpaceAvail}, fmt.Sprintf("Test %d sshOutput = %q", i, test.sshOutput))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Test Sftp filesystem interface
|
||||
|
||||
// +build !plan9
|
||||
// +build !plan9,go1.9
|
||||
|
||||
package sftp_test
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for sftp for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9
|
||||
// +build plan9 !go1.9
|
||||
|
||||
package sftp
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !plan9
|
||||
// +build !plan9,go1.9
|
||||
|
||||
package sftp
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !plan9
|
||||
// +build !plan9,go1.9
|
||||
|
||||
package sftp
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package swift
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift"
|
||||
)
|
||||
@@ -66,14 +65,6 @@ func (a *auth) Token() string {
|
||||
return a.parentAuth.Token()
|
||||
}
|
||||
|
||||
// Expires returns the time the token expires if known or Zero if not.
|
||||
func (a *auth) Expires() (t time.Time) {
|
||||
if do, ok := a.parentAuth.(swift.Expireser); ok {
|
||||
t = do.Expires()
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// The CDN url if available
|
||||
func (a *auth) CdnUrl() string { // nolint
|
||||
if a.parentAuth == nil {
|
||||
@@ -83,7 +74,4 @@ func (a *auth) CdnUrl() string { // nolint
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ swift.Authenticator = (*auth)(nil)
|
||||
_ swift.Expireser = (*auth)(nil)
|
||||
)
|
||||
var _ swift.Authenticator = (*auth)(nil)
|
||||
|
||||
@@ -216,7 +216,7 @@ type Fs struct {
|
||||
containerOK bool // true if we have created the container
|
||||
segmentsContainer string // container to store the segments (if any) in
|
||||
noCheckContainer bool // don't check the container before creating it
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a swift object
|
||||
@@ -286,31 +286,6 @@ func shouldRetry(err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// shouldRetryHeaders returns a boolean as to whether this err
|
||||
// deserves to be retried. It reads the headers passed in looking for
|
||||
// `Retry-After`. It returns the err as a convenience
|
||||
func shouldRetryHeaders(headers swift.Headers, err error) (bool, error) {
|
||||
if swiftError, ok := err.(*swift.Error); ok && swiftError.StatusCode == 429 {
|
||||
if value := headers["Retry-After"]; value != "" {
|
||||
retryAfter, parseErr := strconv.Atoi(value)
|
||||
if parseErr != nil {
|
||||
fs.Errorf(nil, "Failed to parse Retry-After: %q: %v", value, parseErr)
|
||||
} else {
|
||||
duration := time.Second * time.Duration(retryAfter)
|
||||
if duration <= 60*time.Second {
|
||||
// Do a short sleep immediately
|
||||
fs.Debugf(nil, "Sleeping for %v to obey Retry-After", duration)
|
||||
time.Sleep(duration)
|
||||
return true, err
|
||||
}
|
||||
// Delay a long sleep for a retry
|
||||
return false, fserrors.NewErrorRetryAfter(duration)
|
||||
}
|
||||
}
|
||||
}
|
||||
return shouldRetry(err)
|
||||
}
|
||||
|
||||
// Pattern to match a swift path
|
||||
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
|
||||
|
||||
@@ -426,7 +401,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
||||
segmentsContainer: container + "_segments",
|
||||
root: directory,
|
||||
noCheckContainer: noCheckContainer,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -438,9 +413,8 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
||||
// Check to see if the object exists - ignoring directory markers
|
||||
var info swift.Object
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
info, rxHeaders, err = f.c.Object(container, directory)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
info, _, err = f.c.Object(container, directory)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil && info.ContentType != directoryMarkerContentType {
|
||||
f.root = path.Dir(directory)
|
||||
@@ -749,9 +723,8 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
var err error = swift.ContainerNotFound
|
||||
if !f.noCheckContainer {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
_, rxHeaders, err = f.c.Container(f.container)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
_, _, err = f.c.Container(f.container)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
if err == swift.ContainerNotFound {
|
||||
@@ -843,9 +816,8 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
srcFs := srcObj.fs
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
rxHeaders, err = f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
_, err = f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -955,7 +927,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
var h swift.Headers
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, h, err = o.fs.c.Object(o.fs.container, o.fs.root+o.remote)
|
||||
return shouldRetryHeaders(h, err)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if err == swift.ObjectNotFound {
|
||||
@@ -1030,9 +1002,8 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
_, isRanging := headers["Range"]
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
in, rxHeaders, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, !isRanging, headers)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
in, _, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, !isRanging, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return
|
||||
}
|
||||
@@ -1103,9 +1074,8 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
// Create the segmentsContainer if it doesn't exist
|
||||
var err error
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
_, rxHeaders, err = o.fs.c.Container(o.fs.segmentsContainer)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
_, _, err = o.fs.c.Container(o.fs.segmentsContainer)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == swift.ContainerNotFound {
|
||||
headers := swift.Headers{}
|
||||
@@ -1145,9 +1115,8 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i)
|
||||
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, o.fs.segmentsContainer)
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
rxHeaders, err = o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
_, err = o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -1160,9 +1129,8 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
emptyReader := bytes.NewReader(nil)
|
||||
manifestName := o.fs.root + o.remote
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", contentType, headers)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
_, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", contentType, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return uniquePrefix + "/", err
|
||||
}
|
||||
@@ -1206,7 +1174,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
var rxHeaders swift.Headers
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -1,13 +1,6 @@
|
||||
package swift
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/swift"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
import "testing"
|
||||
|
||||
func TestInternalUrlEncode(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
@@ -30,37 +23,3 @@ func TestInternalUrlEncode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInternalShouldRetryHeaders(t *testing.T) {
|
||||
headers := swift.Headers{
|
||||
"Content-Length": "64",
|
||||
"Content-Type": "text/html; charset=UTF-8",
|
||||
"Date": "Mon: 18 Mar 2019 12:11:23 GMT",
|
||||
"Retry-After": "1",
|
||||
}
|
||||
err := &swift.Error{
|
||||
StatusCode: 429,
|
||||
Text: "Too Many Requests",
|
||||
}
|
||||
|
||||
// Short sleep should just do the sleep
|
||||
start := time.Now()
|
||||
retry, gotErr := shouldRetryHeaders(headers, err)
|
||||
dt := time.Since(start)
|
||||
assert.True(t, retry)
|
||||
assert.Equal(t, err, gotErr)
|
||||
assert.True(t, dt > time.Second/2)
|
||||
|
||||
// Long sleep should return RetryError
|
||||
headers["Retry-After"] = "3600"
|
||||
start = time.Now()
|
||||
retry, gotErr = shouldRetryHeaders(headers, err)
|
||||
dt = time.Since(start)
|
||||
assert.True(t, dt < time.Second)
|
||||
assert.False(t, retry)
|
||||
assert.Equal(t, true, fserrors.IsRetryAfterError(gotErr))
|
||||
after := gotErr.(fserrors.RetryAfter).RetryAfter()
|
||||
dt = after.Sub(start)
|
||||
assert.True(t, dt >= time.Hour-time.Second && dt <= time.Hour+time.Second)
|
||||
|
||||
}
|
||||
|
||||
@@ -101,7 +101,7 @@ type Fs struct {
|
||||
endpoint *url.URL // URL of the host
|
||||
endpointURL string // endpoint as a string
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
precision time.Duration // mod time precision
|
||||
canStream bool // set if can stream
|
||||
useOCMtime bool // set if can use X-OC-Mtime
|
||||
@@ -318,7 +318,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
precision: fs.ModTimeNotSupported,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
@@ -644,18 +644,10 @@ func (f *Fs) _mkdir(dirPath string) error {
|
||||
Path: dirPath,
|
||||
NoResponse: true,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// already exists
|
||||
// owncloud returns 423/StatusLocked if the create is already in progress
|
||||
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable || apiErr.StatusCode == http.StatusLocked {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using native paths
|
||||
@@ -663,7 +655,12 @@ func (f *Fs) mkdir(dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
err := f._mkdir(dirPath)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// parent does not exist so create it first then try again
|
||||
// already exists
|
||||
// owncloud returns 423/StatusLocked if the create is already in progress
|
||||
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable || apiErr.StatusCode == http.StatusLocked {
|
||||
return nil
|
||||
}
|
||||
// parent does not exist
|
||||
if apiErr.StatusCode == http.StatusConflict {
|
||||
err = f.mkParentDir(dirPath)
|
||||
if err == nil {
|
||||
@@ -916,13 +913,11 @@ func (f *Fs) About() (*fs.Usage, error) {
|
||||
return nil, errors.Wrap(err, "about call failed")
|
||||
}
|
||||
usage := &fs.Usage{}
|
||||
if q.Available != 0 || q.Used != 0 {
|
||||
if q.Available >= 0 && q.Used >= 0 {
|
||||
usage.Total = fs.NewUsageValue(q.Available + q.Used)
|
||||
}
|
||||
if q.Used >= 0 {
|
||||
usage.Used = fs.NewUsageValue(q.Used)
|
||||
}
|
||||
if q.Available >= 0 && q.Used >= 0 {
|
||||
usage.Total = fs.NewUsageValue(q.Available + q.Used)
|
||||
}
|
||||
if q.Used >= 0 {
|
||||
usage.Used = fs.NewUsageValue(q.Used)
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
@@ -93,7 +93,7 @@ type Fs struct {
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the yandex server
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
diskRoot string // root path with "disk:/" container name
|
||||
}
|
||||
|
||||
@@ -269,7 +269,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
name: name,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
|
||||
@@ -17,18 +17,14 @@ import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"golang.org/x/net/html"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@@ -37,7 +33,6 @@ var (
|
||||
install = flag.Bool("install", false, "Install the downloaded package using sudo dpkg -i.")
|
||||
extract = flag.String("extract", "", "Extract the named executable from the .tar.gz and install into bindir.")
|
||||
bindir = flag.String("bindir", defaultBinDir(), "Directory to install files downloaded with -extract.")
|
||||
useAPI = flag.Bool("use-api", false, "Use the API for finding the release instead of scraping the page.")
|
||||
// Globals
|
||||
matchProject = regexp.MustCompile(`^([\w-]+)/([\w-]+)$`)
|
||||
osAliases = map[string][]string{
|
||||
@@ -214,57 +209,6 @@ func getAsset(project string, matchName *regexp.Regexp) (string, string) {
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// Get an asset URL and name by scraping the downloads page
|
||||
//
|
||||
// This doesn't use the API so isn't rate limited when not using GITHUB login details
|
||||
func getAssetFromReleasesPage(project string, matchName *regexp.Regexp) (assetURL string, assetName string) {
|
||||
baseURL := "https://github.com/" + project + "/releases"
|
||||
log.Printf("Fetching asset info for %q from %q", project, baseURL)
|
||||
base, err := url.Parse(baseURL)
|
||||
if err != nil {
|
||||
log.Fatalf("URL Parse failed: %v", err)
|
||||
}
|
||||
resp, err := http.Get(baseURL)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to fetch release info %q: %v", baseURL, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
log.Printf("Error: %s", readBody(resp.Body))
|
||||
log.Fatalf("Bad status %d when fetching %q release info: %s", resp.StatusCode, baseURL, resp.Status)
|
||||
}
|
||||
doc, err := html.Parse(resp.Body)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse web page: %v", err)
|
||||
}
|
||||
var walk func(*html.Node)
|
||||
walk = func(n *html.Node) {
|
||||
if n.Type == html.ElementNode && n.Data == "a" {
|
||||
for _, a := range n.Attr {
|
||||
if a.Key == "href" {
|
||||
if name := path.Base(a.Val); matchName.MatchString(name) && isOurOsArch(name) {
|
||||
if u, err := rest.URLJoin(base, a.Val); err == nil {
|
||||
if assetName == "" {
|
||||
assetName = name
|
||||
assetURL = u.String()
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||
walk(c)
|
||||
}
|
||||
}
|
||||
walk(doc)
|
||||
if assetName == "" || assetURL == "" {
|
||||
log.Fatalf("Didn't find URL in page")
|
||||
}
|
||||
return assetURL, assetName
|
||||
}
|
||||
|
||||
// isOurOsArch returns true if s contains our OS and our Arch
|
||||
func isOurOsArch(s string) bool {
|
||||
s = strings.ToLower(s)
|
||||
@@ -402,12 +346,7 @@ func main() {
|
||||
log.Fatalf("Invalid regexp for name %q: %v", nameRe, err)
|
||||
}
|
||||
|
||||
var assetURL, assetName string
|
||||
if *useAPI {
|
||||
assetURL, assetName = getAsset(project, matchName)
|
||||
} else {
|
||||
assetURL, assetName = getAssetFromReleasesPage(project, matchName)
|
||||
}
|
||||
assetURL, assetName := getAsset(project, matchName)
|
||||
fileName := filepath.Join(os.TempDir(), assetName)
|
||||
getFile(assetURL, fileName)
|
||||
|
||||
|
||||
@@ -36,7 +36,6 @@ docs = [
|
||||
"http.md",
|
||||
"hubic.md",
|
||||
"jottacloud.md",
|
||||
"koofr.md",
|
||||
"mega.md",
|
||||
"azureblob.md",
|
||||
"onedrive.md",
|
||||
|
||||
22
cmd/cmd.go
22
cmd/cmd.go
@@ -230,31 +230,22 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
SigInfoHandler()
|
||||
for try := 1; try <= *retries; try++ {
|
||||
err = f()
|
||||
fs.CountError(err)
|
||||
if !Retry || !accounting.Stats.Errored() {
|
||||
if !Retry || (err == nil && !accounting.Stats.Errored()) {
|
||||
if try > 1 {
|
||||
fs.Errorf(nil, "Attempt %d/%d succeeded", try, *retries)
|
||||
}
|
||||
break
|
||||
}
|
||||
if accounting.Stats.HadFatalError() {
|
||||
if fserrors.IsFatalError(err) || accounting.Stats.HadFatalError() {
|
||||
fs.Errorf(nil, "Fatal error received - not attempting retries")
|
||||
break
|
||||
}
|
||||
if accounting.Stats.Errored() && !accounting.Stats.HadRetryError() {
|
||||
if fserrors.IsNoRetryError(err) || (accounting.Stats.Errored() && !accounting.Stats.HadRetryError()) {
|
||||
fs.Errorf(nil, "Can't retry this error - not attempting retries")
|
||||
break
|
||||
}
|
||||
if retryAfter := accounting.Stats.RetryAfter(); !retryAfter.IsZero() {
|
||||
d := retryAfter.Sub(time.Now())
|
||||
if d > 0 {
|
||||
fs.Logf(nil, "Received retry after error - sleeping until %s (%v)", retryAfter.Format(time.RFC3339Nano), d)
|
||||
time.Sleep(d)
|
||||
}
|
||||
}
|
||||
lastErr := accounting.Stats.GetLastError()
|
||||
if lastErr != nil {
|
||||
fs.Errorf(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, accounting.Stats.GetErrors(), lastErr)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, accounting.Stats.GetErrors(), err)
|
||||
} else {
|
||||
fs.Errorf(nil, "Attempt %d/%d failed with %d errors", try, *retries, accounting.Stats.GetErrors())
|
||||
}
|
||||
@@ -350,7 +341,8 @@ func initConfig() {
|
||||
configflags.SetFlags()
|
||||
|
||||
// Load filters
|
||||
err := filterflags.Reload()
|
||||
var err error
|
||||
filter.Active, err = filter.NewFilter(&filterflags.Opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load filters: %v", err)
|
||||
}
|
||||
|
||||
@@ -388,11 +388,7 @@ func (fsys *FS) Release(path string, fh uint64) (errc int) {
|
||||
return errc
|
||||
}
|
||||
_ = fsys.closeHandle(fh)
|
||||
// Run the Release asynchronously, ignoring errors
|
||||
go func() {
|
||||
_ = handle.Release()
|
||||
}()
|
||||
return 0
|
||||
return translateError(handle.Release())
|
||||
}
|
||||
|
||||
// Unlink removes a file.
|
||||
|
||||
@@ -127,7 +127,7 @@ func waitFor(fn func() bool) (ok bool) {
|
||||
func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, error) {
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
|
||||
// Check the mountpoint - in Windows the mountpoint mustn't exist before the mount
|
||||
// Check the mountpoint - in Windows the mountpoint musn't exist before the mount
|
||||
if runtime.GOOS != "windows" {
|
||||
fi, err := os.Stat(mountpoint)
|
||||
if err != nil {
|
||||
|
||||
@@ -7,13 +7,8 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
createEmptySrcDirs = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after copy")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
@@ -74,7 +69,7 @@ changed recently very efficiently like this:
|
||||
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
|
||||
cmd.Run(true, true, command, func() error {
|
||||
if srcFileName == "" {
|
||||
return sync.CopyDir(fdst, fsrc, createEmptySrcDirs)
|
||||
return sync.CopyDir(fdst, fsrc)
|
||||
}
|
||||
return operations.CopyFile(fdst, fsrc, srcFileName, srcFileName)
|
||||
})
|
||||
|
||||
@@ -48,7 +48,7 @@ destination.
|
||||
fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
|
||||
cmd.Run(true, true, command, func() error {
|
||||
if srcFileName == "" {
|
||||
return sync.CopyDir(fdst, fsrc, false)
|
||||
return sync.CopyDir(fdst, fsrc)
|
||||
}
|
||||
return operations.CopyFile(fdst, fsrc, dstFileName, srcFileName)
|
||||
})
|
||||
|
||||
@@ -45,7 +45,7 @@ __rclone_custom_func() {
|
||||
else
|
||||
__rclone_init_completion -n : || return
|
||||
fi
|
||||
if [[ $cur != *:* ]]; then
|
||||
if [[ $cur =~ ^[[:alnum:]]*$ ]]; then
|
||||
local remote
|
||||
while IFS= read -r remote; do
|
||||
[[ $remote != $cur* ]] || COMPREPLY+=("$remote")
|
||||
@@ -54,10 +54,10 @@ __rclone_custom_func() {
|
||||
local paths=("$cur"*)
|
||||
[[ ! -f ${paths[0]} ]] || COMPREPLY+=("${paths[@]}")
|
||||
fi
|
||||
else
|
||||
elif [[ $cur =~ ^[[:alnum:]]+: ]]; then
|
||||
local path=${cur#*:}
|
||||
if [[ $path == */* ]]; then
|
||||
local prefix=$(eval printf '%s' "${path%/*}")
|
||||
local prefix=${path%/*}
|
||||
else
|
||||
local prefix=
|
||||
fi
|
||||
@@ -66,7 +66,6 @@ __rclone_custom_func() {
|
||||
local reply=${prefix:+$prefix/}$line
|
||||
[[ $reply != $path* ]] || COMPREPLY+=("$reply")
|
||||
done < <(rclone lsf "${cur%%:*}:$prefix" 2>/dev/null)
|
||||
[[ ! ${COMPREPLY[@]} ]] || compopt -o filenames
|
||||
fi
|
||||
[[ ! ${COMPREPLY[@]} ]] || compopt -o nospace
|
||||
fi
|
||||
|
||||
@@ -2,7 +2,7 @@ package lshelp
|
||||
|
||||
// Help describes the common help for all the list commands
|
||||
var Help = `
|
||||
Any of the filtering options can be applied to this command.
|
||||
Any of the filtering options can be applied to this commmand.
|
||||
|
||||
There are several related list commands
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -66,11 +67,8 @@ output:
|
||||
s - size
|
||||
t - modification time
|
||||
h - hash
|
||||
i - ID of object
|
||||
o - Original ID of underlying object
|
||||
i - ID of object if known
|
||||
m - MimeType of object if known
|
||||
e - encrypted name
|
||||
T - tier of storage if known, eg "Hot" or "Cool"
|
||||
|
||||
So if you wanted the path, size and modification time, you would use
|
||||
--format "pst", or maybe --format "tsp" to put the path last.
|
||||
@@ -163,12 +161,6 @@ func Lsf(fsrc fs.Fs, out io.Writer) error {
|
||||
list.SetCSV(csv)
|
||||
list.SetDirSlash(dirSlash)
|
||||
list.SetAbsolute(absolute)
|
||||
var opt = operations.ListJSONOpt{
|
||||
NoModTime: true,
|
||||
DirsOnly: dirsOnly,
|
||||
FilesOnly: filesOnly,
|
||||
Recurse: recurse,
|
||||
}
|
||||
|
||||
for _, char := range format {
|
||||
switch char {
|
||||
@@ -176,31 +168,38 @@ func Lsf(fsrc fs.Fs, out io.Writer) error {
|
||||
list.AddPath()
|
||||
case 't':
|
||||
list.AddModTime()
|
||||
opt.NoModTime = false
|
||||
case 's':
|
||||
list.AddSize()
|
||||
case 'h':
|
||||
list.AddHash(hashType)
|
||||
opt.ShowHash = true
|
||||
case 'i':
|
||||
list.AddID()
|
||||
case 'm':
|
||||
list.AddMimeType()
|
||||
case 'e':
|
||||
list.AddEncrypted()
|
||||
opt.ShowEncrypted = true
|
||||
case 'o':
|
||||
list.AddOrigID()
|
||||
opt.ShowOrigIDs = true
|
||||
case 'T':
|
||||
list.AddTier()
|
||||
default:
|
||||
return errors.Errorf("Unknown format character %q", char)
|
||||
}
|
||||
}
|
||||
|
||||
return operations.ListJSON(fsrc, "", &opt, func(item *operations.ListJSONItem) error {
|
||||
_, _ = fmt.Fprintln(out, list.Format(item))
|
||||
return walk.Walk(fsrc, "", false, operations.ConfigMaxDepth(recurse), func(path string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
fs.Errorf(path, "error listing: %v", err)
|
||||
return nil
|
||||
}
|
||||
for _, entry := range entries {
|
||||
_, isDir := entry.(fs.Directory)
|
||||
if isDir {
|
||||
if filesOnly {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if dirsOnly {
|
||||
continue
|
||||
}
|
||||
}
|
||||
_, _ = fmt.Fprintln(out, list.Format(entry))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
@@ -23,8 +23,6 @@ func init() {
|
||||
commandDefintion.Flags().BoolVarP(&opt.NoModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up).")
|
||||
commandDefintion.Flags().BoolVarP(&opt.ShowEncrypted, "encrypted", "M", false, "Show the encrypted names.")
|
||||
commandDefintion.Flags().BoolVarP(&opt.ShowOrigIDs, "original", "", false, "Show the ID of the underlying Object.")
|
||||
commandDefintion.Flags().BoolVarP(&opt.FilesOnly, "files-only", "", false, "Show only files in the listing.")
|
||||
commandDefintion.Flags().BoolVarP(&opt.DirsOnly, "dirs-only", "", false, "Show only directories in the listing.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
@@ -57,10 +55,6 @@ If --no-modtime is specified then ModTime will be blank.
|
||||
|
||||
If --encrypted is not specified the Encrypted won't be emitted.
|
||||
|
||||
If --dirs-only is not specified files in addition to directories are returned
|
||||
|
||||
If --files-only is not specified directories in addition to the files will be returned.
|
||||
|
||||
The Path field will only show folders below the remote path being listed.
|
||||
If "remote:path" contains the file "subfolder/file.txt", the Path for "file.txt"
|
||||
will be "subfolder/file.txt", not "remote:path/subfolder/file.txt".
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
package mount
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
@@ -13,6 +12,7 @@ import (
|
||||
"github.com/ncw/rclone/fs/log"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context" // switch to "context" when we stop supporting go1.8
|
||||
)
|
||||
|
||||
// Dir represents a directory entry
|
||||
@@ -20,7 +20,7 @@ type Dir struct {
|
||||
*vfs.Dir
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
// Check interface satsified
|
||||
var _ fusefs.Node = (*Dir)(nil)
|
||||
|
||||
// Attr updates the attributes of a directory
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
package mount
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
@@ -12,6 +11,7 @@ import (
|
||||
"github.com/ncw/rclone/cmd/mountlib"
|
||||
"github.com/ncw/rclone/fs/log"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"golang.org/x/net/context" // switch to "context" when we stop supporting go1.8
|
||||
)
|
||||
|
||||
// File represents a file
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
package mount
|
||||
|
||||
import (
|
||||
"context"
|
||||
"syscall"
|
||||
|
||||
"bazil.org/fuse"
|
||||
@@ -16,6 +15,7 @@ import (
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context" // switch to "context" when we stop supporting go1.8
|
||||
)
|
||||
|
||||
// FS represents the top level filing system
|
||||
@@ -24,7 +24,7 @@ type FS struct {
|
||||
f fs.Fs
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
// Check interface satistfied
|
||||
var _ fusefs.FS = (*FS)(nil)
|
||||
|
||||
// NewFS makes a new FS
|
||||
@@ -46,7 +46,7 @@ func (f *FS) Root() (node fusefs.Node, err error) {
|
||||
return &Dir{root}, nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
// Check interface satsified
|
||||
var _ fusefs.FSStatfser = (*FS)(nil)
|
||||
|
||||
// Statfs is called to obtain file system metadata.
|
||||
|
||||
@@ -3,13 +3,13 @@
|
||||
package mount
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/ncw/rclone/fs/log"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"golang.org/x/net/context" // switch to "context" when we stop supporting go1.8
|
||||
)
|
||||
|
||||
// FileHandle is an open for read file handle on a File
|
||||
@@ -80,9 +80,5 @@ var _ fusefs.HandleReleaser = (*FileHandle)(nil)
|
||||
// the kernel
|
||||
func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) (err error) {
|
||||
defer log.Trace(fh, "")("err=%v", &err)
|
||||
// Run the Release asynchronously, ignoring errors
|
||||
go func() {
|
||||
_ = fh.Handle.Release()
|
||||
}()
|
||||
return nil
|
||||
return translateError(fh.Handle.Release())
|
||||
}
|
||||
|
||||
@@ -10,13 +10,11 @@ import (
|
||||
// Globals
|
||||
var (
|
||||
deleteEmptySrcDirs = false
|
||||
createEmptySrcDirs = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move")
|
||||
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after move")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
@@ -54,7 +52,7 @@ can speed transfers up greatly.
|
||||
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
|
||||
cmd.Run(true, true, command, func() error {
|
||||
if srcFileName == "" {
|
||||
return sync.MoveDir(fdst, fsrc, deleteEmptySrcDirs, createEmptySrcDirs)
|
||||
return sync.MoveDir(fdst, fsrc, deleteEmptySrcDirs)
|
||||
}
|
||||
return operations.MoveFile(fdst, fsrc, srcFileName, srcFileName)
|
||||
})
|
||||
|
||||
@@ -19,7 +19,7 @@ If source:path is a file or directory then it moves it to a file or
|
||||
directory named dest:path.
|
||||
|
||||
This can be used to rename files or upload single files to other than
|
||||
their existing name. If the source is a directory then it acts exactly
|
||||
their existing name. If the source is a directory then it acts exacty
|
||||
like the move command.
|
||||
|
||||
So
|
||||
@@ -52,7 +52,7 @@ transfer.
|
||||
|
||||
cmd.Run(true, true, command, func() error {
|
||||
if srcFileName == "" {
|
||||
return sync.MoveDir(fdst, fsrc, false, false)
|
||||
return sync.MoveDir(fdst, fsrc, false)
|
||||
}
|
||||
return operations.MoveFile(fdst, fsrc, dstFileName, srcFileName)
|
||||
})
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
runewidth "github.com/mattn/go-runewidth"
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/cmd/ncdu/scan"
|
||||
"github.com/ncw/rclone/fs"
|
||||
@@ -123,7 +122,7 @@ func Printf(x, y int, fg, bg termbox.Attribute, format string, args ...interface
|
||||
func Line(x, y, xmax int, fg, bg termbox.Attribute, spacer rune, msg string) {
|
||||
for _, c := range msg {
|
||||
termbox.SetCell(x, y, c, fg, bg)
|
||||
x += runewidth.RuneWidth(c)
|
||||
x++
|
||||
if x >= xmax {
|
||||
return
|
||||
}
|
||||
@@ -361,7 +360,7 @@ func (u *UI) Draw() error {
|
||||
Linef(0, h-1, w, termbox.ColorBlack, termbox.ColorWhite, ' ', "Total usage: %v, Objects: %d%s", fs.SizeSuffix(size), count, message)
|
||||
}
|
||||
|
||||
// Show the box on top if required
|
||||
// Show the box on top if requred
|
||||
if u.showBox {
|
||||
u.Box()
|
||||
}
|
||||
|
||||
@@ -82,7 +82,7 @@ var (
|
||||
progressMu sync.Mutex
|
||||
)
|
||||
|
||||
// printProgress prints the progress with an optional log
|
||||
// printProgress prings the progress with an optional log
|
||||
func printProgress(logMessage string) {
|
||||
progressMu.Lock()
|
||||
defer progressMu.Unlock()
|
||||
|
||||
@@ -1,184 +0,0 @@
|
||||
package dlna
|
||||
|
||||
const connectionManagerServiceDescription = `<?xml version="1.0" encoding="UTF-8"?>
|
||||
<scpd xmlns="urn:schemas-upnp-org:service-1-0">
|
||||
<specVersion>
|
||||
<major>1</major>
|
||||
<minor>0</minor>
|
||||
</specVersion>
|
||||
<actionList>
|
||||
<action>
|
||||
<name>GetProtocolInfo</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>Source</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>SourceProtocolInfo</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>Sink</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>SinkProtocolInfo</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>PrepareForConnection</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>RemoteProtocolInfo</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ProtocolInfo</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>PeerConnectionManager</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ConnectionManager</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>PeerConnectionID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ConnectionID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>Direction</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Direction</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>ConnectionID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ConnectionID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>AVTransportID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_AVTransportID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>RcsID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_RcsID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>ConnectionComplete</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ConnectionID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ConnectionID</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>GetCurrentConnectionIDs</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ConnectionIDs</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>CurrentConnectionIDs</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
<action>
|
||||
<name>GetCurrentConnectionInfo</name>
|
||||
<argumentList>
|
||||
<argument>
|
||||
<name>ConnectionID</name>
|
||||
<direction>in</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ConnectionID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>RcsID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_RcsID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>AVTransportID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_AVTransportID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>ProtocolInfo</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ProtocolInfo</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>PeerConnectionManager</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ConnectionManager</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>PeerConnectionID</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ConnectionID</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>Direction</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_Direction</relatedStateVariable>
|
||||
</argument>
|
||||
<argument>
|
||||
<name>Status</name>
|
||||
<direction>out</direction>
|
||||
<relatedStateVariable>A_ARG_TYPE_ConnectionStatus</relatedStateVariable>
|
||||
</argument>
|
||||
</argumentList>
|
||||
</action>
|
||||
</actionList>
|
||||
<serviceStateTable>
|
||||
<stateVariable sendEvents="yes">
|
||||
<name>SourceProtocolInfo</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="yes">
|
||||
<name>SinkProtocolInfo</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="yes">
|
||||
<name>CurrentConnectionIDs</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_ConnectionStatus</name>
|
||||
<dataType>string</dataType>
|
||||
<allowedValueList>
|
||||
<allowedValue>OK</allowedValue>
|
||||
<allowedValue>ContentFormatMismatch</allowedValue>
|
||||
<allowedValue>InsufficientBandwidth</allowedValue>
|
||||
<allowedValue>UnreliableChannel</allowedValue>
|
||||
<allowedValue>Unknown</allowedValue>
|
||||
</allowedValueList>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_ConnectionManager</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_Direction</name>
|
||||
<dataType>string</dataType>
|
||||
<allowedValueList>
|
||||
<allowedValue>Input</allowedValue>
|
||||
<allowedValue>Output</allowedValue>
|
||||
</allowedValueList>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_ProtocolInfo</name>
|
||||
<dataType>string</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_ConnectionID</name>
|
||||
<dataType>i4</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_AVTransportID</name>
|
||||
<dataType>i4</dataType>
|
||||
</stateVariable>
|
||||
<stateVariable sendEvents="no">
|
||||
<name>A_ARG_TYPE_RcsID</name>
|
||||
<dataType>i4</dataType>
|
||||
</stateVariable>
|
||||
</serviceStateTable>
|
||||
</scpd>`
|
||||
@@ -84,21 +84,6 @@ var services = []*service{
|
||||
},
|
||||
SCPD: contentDirectoryServiceDescription,
|
||||
},
|
||||
{
|
||||
Service: upnp.Service{
|
||||
ServiceType: "urn:schemas-upnp-org:service:ConnectionManager:1",
|
||||
ServiceId: "urn:upnp-org:serviceId:ConnectionManager",
|
||||
ControlURL: serviceControlURL,
|
||||
},
|
||||
SCPD: connectionManagerServiceDescription,
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
for _, s := range services {
|
||||
p := path.Join("/scpd", s.ServiceId)
|
||||
s.SCPDURL = p
|
||||
}
|
||||
}
|
||||
|
||||
func devices() []string {
|
||||
@@ -265,6 +250,9 @@ func (s *server) initMux(mux *http.ServeMux) {
|
||||
|
||||
// Install handlers to serve SCPD for each UPnP service.
|
||||
for _, s := range services {
|
||||
p := path.Join("/scpd", s.ServiceId)
|
||||
s.SCPDURL = p
|
||||
|
||||
mux.HandleFunc(s.SCPDURL, func(serviceDesc string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("content-type", `text/xml; charset="utf-8"`)
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build go1.8
|
||||
|
||||
package dlna
|
||||
|
||||
import (
|
||||
@@ -20,11 +22,11 @@ import (
|
||||
|
||||
var (
|
||||
dlnaServer *server
|
||||
testURL string
|
||||
)
|
||||
|
||||
const (
|
||||
testBindAddress = "localhost:0"
|
||||
testBindAddress = "localhost:51777"
|
||||
testURL = "http://" + testBindAddress + "/"
|
||||
)
|
||||
|
||||
func startServer(t *testing.T, f fs.Fs) {
|
||||
@@ -32,7 +34,6 @@ func startServer(t *testing.T, f fs.Fs) {
|
||||
opt.ListenAddr = testBindAddress
|
||||
dlnaServer = newServer(f, &opt)
|
||||
assert.NoError(t, dlnaServer.Serve())
|
||||
testURL = "http://" + dlnaServer.HTTPConn.Addr().String() + "/"
|
||||
}
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
@@ -58,11 +59,6 @@ func TestRootSCPD(t *testing.T) {
|
||||
// Make sure that the SCPD contains a CDS service.
|
||||
require.Contains(t, string(body),
|
||||
"<serviceType>urn:schemas-upnp-org:service:ContentDirectory:1</serviceType>")
|
||||
// Make sure that the SCPD contains a CM service.
|
||||
require.Contains(t, string(body),
|
||||
"<serviceType>urn:schemas-upnp-org:service:ConnectionManager:1</serviceType>")
|
||||
// Ensure that the SCPD url is configured.
|
||||
require.Regexp(t, "<SCPDURL>/.*</SCPDURL>", string(body))
|
||||
}
|
||||
|
||||
// Make sure that it serves content from the remote.
|
||||
|
||||
@@ -78,7 +78,6 @@ func newServer(f fs.Fs, opt *ftpopt.Options) (*server, error) {
|
||||
},
|
||||
Hostname: host,
|
||||
Port: portNum,
|
||||
PublicIp: opt.PublicIP,
|
||||
PassivePorts: opt.PassivePorts,
|
||||
Auth: &Auth{
|
||||
BasicUser: opt.BasicUser,
|
||||
@@ -156,7 +155,7 @@ func (f *DriverFactory) NewDriver() (ftp.Driver, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
//Driver implementation of ftp server
|
||||
//Driver impletation of ftp server
|
||||
type Driver struct {
|
||||
vfs *vfs.VFS
|
||||
lock sync.Mutex
|
||||
@@ -379,7 +378,7 @@ func (d *Driver) PutFile(path string, data io.Reader, appendData bool) (n int64,
|
||||
return bytes, nil
|
||||
}
|
||||
|
||||
//FileInfo struct to hold file info for ftp server
|
||||
//FileInfo struct ot hold file infor for ftp server
|
||||
type FileInfo struct {
|
||||
os.FileInfo
|
||||
|
||||
@@ -388,7 +387,7 @@ type FileInfo struct {
|
||||
group uint32
|
||||
}
|
||||
|
||||
//Mode return mode of file.
|
||||
//Mode return êrm mode of file.
|
||||
func (f *FileInfo) Mode() os.FileMode {
|
||||
return f.mode
|
||||
}
|
||||
@@ -408,7 +407,7 @@ func (f *FileInfo) Group() string {
|
||||
str := fmt.Sprint(f.group)
|
||||
g, err := user.LookupGroupId(str)
|
||||
if err != nil {
|
||||
return str //Group not found default to numerical value
|
||||
return str //Group not found default to numrical value
|
||||
}
|
||||
return g.Name
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ var (
|
||||
func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *ftpopt.Options) {
|
||||
rc.AddOption("ftp", &Opt)
|
||||
flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.")
|
||||
flags.StringVarP(flagSet, &Opt.PublicIP, prefix+"public-ip", "", Opt.PublicIP, "Public IP address to advertise for passive connections.")
|
||||
flags.StringVarP(flagSet, &Opt.PassivePorts, prefix+"passive-port", "", Opt.PassivePorts, "Passive port range to use.")
|
||||
flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication.")
|
||||
flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication. (empty value allow every password)")
|
||||
|
||||
@@ -24,7 +24,6 @@ You can set a single username and password with the --user and --pass flags.
|
||||
type Options struct {
|
||||
//TODO add more options
|
||||
ListenAddr string // Port to listen on
|
||||
PublicIP string // Passive ports range
|
||||
PassivePorts string // Passive ports range
|
||||
BasicUser string // single username for basic auth if not using Htpasswd
|
||||
BasicPass string // password for BasicUser
|
||||
@@ -33,7 +32,6 @@ type Options struct {
|
||||
// DefaultOpt is the default values used for Options
|
||||
var DefaultOpt = Options{
|
||||
ListenAddr: "localhost:2121",
|
||||
PublicIP: "",
|
||||
PassivePorts: "30000-32000",
|
||||
BasicUser: "anonymous",
|
||||
BasicPass: "",
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
// +build go1.8
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -20,11 +23,11 @@ import (
|
||||
var (
|
||||
updateGolden = flag.Bool("updategolden", false, "update golden files for regression test")
|
||||
httpServer *server
|
||||
testURL string
|
||||
)
|
||||
|
||||
const (
|
||||
testBindAddress = "localhost:0"
|
||||
testBindAddress = "localhost:51777"
|
||||
testURL = "http://" + testBindAddress + "/"
|
||||
)
|
||||
|
||||
func startServer(t *testing.T, f fs.Fs) {
|
||||
@@ -32,14 +35,13 @@ func startServer(t *testing.T, f fs.Fs) {
|
||||
opt.ListenAddr = testBindAddress
|
||||
httpServer = newServer(f, &opt)
|
||||
assert.NoError(t, httpServer.Serve())
|
||||
testURL = httpServer.Server.URL()
|
||||
|
||||
// try to connect to the test server
|
||||
pause := time.Millisecond
|
||||
for i := 0; i < 10; i++ {
|
||||
resp, err := http.Head(testURL)
|
||||
conn, err := net.Dial("tcp", testBindAddress)
|
||||
if err == nil {
|
||||
_ = resp.Body.Close()
|
||||
_ = conn.Close()
|
||||
return
|
||||
}
|
||||
// t.Logf("couldn't connect, sleeping for %v: %v", pause, err)
|
||||
|
||||
21
cmd/serve/httplib/http_new.go
Normal file
21
cmd/serve/httplib/http_new.go
Normal file
@@ -0,0 +1,21 @@
|
||||
// HTTP parts go1.8+
|
||||
|
||||
//+build go1.8
|
||||
|
||||
package httplib
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Initialise the http.Server for post go1.8
|
||||
func initServer(s *http.Server) {
|
||||
s.ReadHeaderTimeout = 10 * time.Second // time to send the headers
|
||||
s.IdleTimeout = 60 * time.Second // time to keep idle connections open
|
||||
}
|
||||
|
||||
// closeServer closes the server in a non graceful way
|
||||
func closeServer(s *http.Server) error {
|
||||
return s.Close()
|
||||
}
|
||||
18
cmd/serve/httplib/http_old.go
Normal file
18
cmd/serve/httplib/http_old.go
Normal file
@@ -0,0 +1,18 @@
|
||||
// HTTP parts pre go1.8
|
||||
|
||||
//+build !go1.8
|
||||
|
||||
package httplib
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Initialise the http.Server for pre go1.8
|
||||
func initServer(s *http.Server) {
|
||||
}
|
||||
|
||||
// closeServer closes the server in a non graceful way
|
||||
func closeServer(s *http.Server) error {
|
||||
return nil
|
||||
}
|
||||
@@ -180,17 +180,17 @@ func NewServer(handler http.Handler, opt *Options) *Server {
|
||||
|
||||
// FIXME make a transport?
|
||||
s.httpServer = &http.Server{
|
||||
Addr: s.Opt.ListenAddr,
|
||||
Handler: handler,
|
||||
ReadTimeout: s.Opt.ServerReadTimeout,
|
||||
WriteTimeout: s.Opt.ServerWriteTimeout,
|
||||
MaxHeaderBytes: s.Opt.MaxHeaderBytes,
|
||||
ReadHeaderTimeout: 10 * time.Second, // time to send the headers
|
||||
IdleTimeout: 60 * time.Second, // time to keep idle connections open
|
||||
Addr: s.Opt.ListenAddr,
|
||||
Handler: handler,
|
||||
ReadTimeout: s.Opt.ServerReadTimeout,
|
||||
WriteTimeout: s.Opt.ServerWriteTimeout,
|
||||
MaxHeaderBytes: s.Opt.MaxHeaderBytes,
|
||||
TLSConfig: &tls.Config{
|
||||
MinVersion: tls.VersionTLS10, // disable SSL v3.0 and earlier
|
||||
},
|
||||
}
|
||||
// go version specific initialisation
|
||||
initServer(s.httpServer)
|
||||
|
||||
if s.Opt.ClientCA != "" {
|
||||
if !s.useSSL {
|
||||
@@ -267,7 +267,7 @@ func (s *Server) Wait() {
|
||||
|
||||
// Close shuts the running server down
|
||||
func (s *Server) Close() {
|
||||
err := s.httpServer.Close()
|
||||
err := closeServer(s.httpServer)
|
||||
if err != nil {
|
||||
log.Printf("Error on closing HTTP server: %v", err)
|
||||
return
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// GetTemplate returns the HTML template for serving directories via HTTP
|
||||
// GetTemplate eturns the HTML template for serving directories via HTTP
|
||||
func GetTemplate() (tpl *template.Template, err error) {
|
||||
templateFile, err := Assets.Open("index.html")
|
||||
if err != nil {
|
||||
|
||||
@@ -330,12 +330,25 @@ func (s *server) listObjects(w http.ResponseWriter, r *http.Request, remote stri
|
||||
ls := listItems{}
|
||||
|
||||
// if remote supports ListR use that directly, otherwise use recursive Walk
|
||||
err := walk.ListR(s.f, remote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
for _, entry := range entries {
|
||||
ls.add(entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
var err error
|
||||
if ListR := s.f.Features().ListR; ListR != nil {
|
||||
err = ListR(remote, func(entries fs.DirEntries) error {
|
||||
for _, entry := range entries {
|
||||
ls.add(entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
} else {
|
||||
err = walk.Walk(s.f, remote, true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||
if err == nil {
|
||||
for _, entry := range entries {
|
||||
ls.add(entry)
|
||||
}
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
_, err = fserrors.Cause(err)
|
||||
if err != fs.ErrorDirNotFound {
|
||||
|
||||
@@ -17,7 +17,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
testBindAddress = "localhost:0"
|
||||
testBindAddress = "localhost:51779"
|
||||
testURL = "http://" + testBindAddress + "/"
|
||||
resticSource = "../../../../../restic/restic"
|
||||
)
|
||||
|
||||
@@ -61,7 +62,7 @@ func TestRestic(t *testing.T) {
|
||||
}
|
||||
cmd := exec.Command("go", args...)
|
||||
cmd.Env = append(os.Environ(),
|
||||
"RESTIC_TEST_REST_REPOSITORY=rest:"+w.Server.URL()+path,
|
||||
"RESTIC_TEST_REST_REPOSITORY=rest:"+testURL+path,
|
||||
)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if len(out) != 0 {
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
@@ -16,6 +15,7 @@ import (
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context" // switch to "context" when we stop supporting go1.8
|
||||
"golang.org/x/net/webdav"
|
||||
)
|
||||
|
||||
|
||||
@@ -20,7 +20,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
testBindAddress = "localhost:0"
|
||||
testBindAddress = "localhost:51778"
|
||||
testURL = "http://" + testBindAddress + "/"
|
||||
)
|
||||
|
||||
// check interfaces
|
||||
@@ -69,7 +70,7 @@ func TestWebDav(t *testing.T) {
|
||||
cmd := exec.Command("go", args...)
|
||||
cmd.Env = append(os.Environ(),
|
||||
"RCLONE_CONFIG_WEBDAVTEST_TYPE=webdav",
|
||||
"RCLONE_CONFIG_WEBDAVTEST_URL="+w.Server.URL(),
|
||||
"RCLONE_CONFIG_WEBDAVTEST_URL="+testURL,
|
||||
"RCLONE_CONFIG_WEBDAVTEST_VENDOR=other",
|
||||
)
|
||||
out, err := cmd.CombinedOutput()
|
||||
|
||||
@@ -20,7 +20,7 @@ Few cloud storage services provides different storage classes on objects,
|
||||
for example AWS S3 and Glacier, Azure Blob storage - Hot, Cool and Archive,
|
||||
Google Cloud Storage, Regional Storage, Nearline, Coldline etc.
|
||||
|
||||
Note that, certain tier changes make objects not available to access immediately.
|
||||
Note that, certain tier chages make objects not available to access immediately.
|
||||
For example tiering to archive in azure blob storage makes objects in frozen state,
|
||||
user can restore by setting tier to Hot/Cool, similarly S3 to Glacier makes object
|
||||
inaccessible.true
|
||||
|
||||
@@ -6,13 +6,8 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
createEmptySrcDirs = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after sync")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
@@ -44,7 +39,7 @@ go there.
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||
cmd.Run(true, true, command, func() error {
|
||||
return sync.Sync(fdst, fsrc, createEmptySrcDirs)
|
||||
return sync.Sync(fdst, fsrc)
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
@@ -29,7 +29,6 @@ Rclone is a command line program to sync files and directories to and from:
|
||||
* {{< provider name="Hubic" home="https://hubic.com/" config="/hubic/" >}}
|
||||
* {{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
|
||||
* {{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
|
||||
* {{< provider name="Koofr" home="https://koofr.eu/" config="/koofr/" >}}
|
||||
* {{< provider name="Memset Memstore" home="https://www.memset.com/cloud/storage/" config="/swift/" >}}
|
||||
* {{< provider name="Mega" home="https://mega.nz/" config="/mega/" >}}
|
||||
* {{< provider name="Microsoft Azure Blob Storage" home="https://azure.microsoft.com/en-us/services/storage/blobs/" config="/azureblob/" >}}
|
||||
@@ -71,4 +70,5 @@ Links
|
||||
* <i class="fa fa-home"></i> [Home page](https://rclone.org/)
|
||||
* <i class="fa fa-github"></i> [GitHub project page for source and bug tracker](https://github.com/ncw/rclone)
|
||||
* <i class="fa fa-comments"></i> [Rclone Forum](https://forum.rclone.org)
|
||||
* <i class="fa fa-google-plus"></i> <a href="https://google.com/+RcloneOrg" rel="publisher">Google+ page</a>
|
||||
* <i class="fa fa-cloud-download"></i>[Downloads](/downloads/)
|
||||
|
||||
@@ -15,7 +15,7 @@ eg `remote:directory/subdirectory` or `/directory/subdirectory`.
|
||||
During the initial setup with `rclone config` you will specify the target
|
||||
remote. The target remote can either be a local path or another remote.
|
||||
|
||||
Subfolders can be used in target remote. Assume a alias remote named `backup`
|
||||
Subfolders can be used in target remote. Asume a alias remote named `backup`
|
||||
with the target `mydrive:private/backup`. Invoking `rclone mkdir backup:desktop`
|
||||
is exactly the same as invoking `rclone mkdir mydrive:private/backup/desktop`.
|
||||
|
||||
|
||||
@@ -237,18 +237,3 @@ Contributors
|
||||
* Jonathan <vanillajonathan@users.noreply.github.com>
|
||||
* James Carpenter <orbsmiv@users.noreply.github.com>
|
||||
* Vince <vince0villamora@gmail.com>
|
||||
* Nestar47 <47841759+Nestar47@users.noreply.github.com>
|
||||
* Six <brbsix@gmail.com>
|
||||
* Alexandru Bumbacea <alexandru.bumbacea@booking.com>
|
||||
* calisro <robert.calistri@gmail.com>
|
||||
* Dr.Rx <david.rey@nventive.com>
|
||||
* marcintustin <marcintustin@users.noreply.github.com>
|
||||
* jaKa Močnik <jaka@koofr.net>
|
||||
* Fionera <fionera@fionera.de>
|
||||
* Dan Walters <dan@walters.io>
|
||||
* Danil Semelenov <sgtpep@users.noreply.github.com>
|
||||
* xopez <28950736+xopez@users.noreply.github.com>
|
||||
* Ben Boeckel <mathstuf@gmail.com>
|
||||
* Manu <manu@snapdragon.cc>
|
||||
* Kyle E. Mitchell <kyle@kemitchell.com>
|
||||
* Gary Kim <gary@garykim.dev>
|
||||
|
||||
@@ -270,7 +270,7 @@ start and finish the upload) and another 2 requests for each chunk:
|
||||
|
||||
#### Versions ####
|
||||
|
||||
Versions can be viewed with the `--b2-versions` flag. When it is set
|
||||
Versions can be viewd with the `--b2-versions` flag. When it is set
|
||||
rclone will show and act on older versions of files. For example
|
||||
|
||||
Listing without `--b2-versions`
|
||||
@@ -409,18 +409,5 @@ Disable checksums for large (> upload cutoff) files
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --b2-download-url
|
||||
|
||||
Custom endpoint for downloads.
|
||||
|
||||
This is usually set to a Cloudflare CDN URL as Backblaze offers
|
||||
free egress for data downloaded through the Cloudflare network.
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.
|
||||
|
||||
- Config: download_url
|
||||
- Env Var: RCLONE_B2_DOWNLOAD_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
<!--- autogenerated options stop -->
|
||||
|
||||
|
||||
@@ -262,7 +262,7 @@ There is an issue with wrapping the remotes in this order:
|
||||
During testing, I experienced a lot of bans with the remotes in this order.
|
||||
I suspect it might be related to how crypt opens files on the cloud provider
|
||||
which makes it think we're downloading the full file instead of small chunks.
|
||||
Organizing the remotes in this order yields better results:
|
||||
Organizing the remotes in this order yelds better results:
|
||||
<span style="color:green">**cloud remote** -> **cache** -> **crypt**</span>
|
||||
|
||||
#### absolute remote paths ####
|
||||
|
||||
@@ -1,97 +1,11 @@
|
||||
---
|
||||
title: "Documentation"
|
||||
description: "Rclone Changelog"
|
||||
date: "2019-04-13"
|
||||
date: "2019-02-09"
|
||||
---
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.47.0 - 2019-04-13
|
||||
|
||||
* New backends
|
||||
* Backend for Koofr cloud storage service. (jaKa)
|
||||
* New Features
|
||||
* Resume downloads if the reader fails in copy (Nick Craig-Wood)
|
||||
* this means rclone will restart transfers if the source has an error
|
||||
* this is most useful for downloads or cloud to cloud copies
|
||||
* Use `--fast-list` for listing operations where it won't use more memory (Nick Craig-Wood)
|
||||
* this should speed up the following operations on remotes which support `ListR`
|
||||
* `dedupe`, `serve restic` `lsf`, `ls`, `lsl`, `lsjson`, `lsd`, `md5sum`, `sha1sum`, `hashsum`, `size`, `delete`, `cat`, `settier`
|
||||
* use `--disable ListR` to get old behaviour if required
|
||||
* Make `--files-from` traverse the destination unless `--no-traverse` is set (Nick Craig-Wood)
|
||||
* this fixes `--files-from` with Google drive and excessive API use in general.
|
||||
* Make server side copy account bytes and obey `--max-transfer` (Nick Craig-Wood)
|
||||
* Add `--create-empty-src-dirs` flag and default to not creating empty dirs (ishuah)
|
||||
* Add client side TLS/SSL flags `--ca-cert`/`--client-cert`/`--client-key` (Nick Craig-Wood)
|
||||
* Implement `--suffix-keep-extension` for use with `--suffix` (Nick Craig-Wood)
|
||||
* build:
|
||||
* Switch to semvar compliant version tags to be go modules compliant (Nick Craig-Wood)
|
||||
* Update to use go1.12.x for the build (Nick Craig-Wood)
|
||||
* serve dlna: Add connection manager service description to improve compatibility (Dan Walters)
|
||||
* lsf: Add 'e' format to show encrypted names and 'o' for original IDs (Nick Craig-Wood)
|
||||
* lsjson: Added `--files-only` and `--dirs-only` flags (calistri)
|
||||
* rc: Implement operations/publiclink the equivalent of `rclone link` (Nick Craig-Wood)
|
||||
* Bug Fixes
|
||||
* accounting: Fix total ETA when `--stats-unit bits` is in effect (Nick Craig-Wood)
|
||||
* Bash TAB completion
|
||||
* Use private custom func to fix clash between rclone and kubectl (Nick Craig-Wood)
|
||||
* Fix for remotes with underscores in their names (Six)
|
||||
* Fix completion of remotes (Florian Gamböck)
|
||||
* Fix autocompletion of remote paths with spaces (Danil Semelenov)
|
||||
* serve dlna: Fix root XML service descriptor (Dan Walters)
|
||||
* ncdu: Fix display corruption with Chinese characters (Nick Craig-Wood)
|
||||
* Add SIGTERM to signals which run the exit handlers on unix (Nick Craig-Wood)
|
||||
* rc: Reload filter when the options are set via the rc (Nick Craig-Wood)
|
||||
* VFS / Mount
|
||||
* Fix FreeBSD: Ignore Truncate if called with no readers and already the correct size (Nick Craig-Wood)
|
||||
* Read directory and check for a file before mkdir (Nick Craig-Wood)
|
||||
* Shorten the locking window for vfs/refresh (Nick Craig-Wood)
|
||||
* Azure Blob
|
||||
* Enable MD5 checksums when uploading files bigger than the "Cutoff" (Dr.Rx)
|
||||
* Fix SAS URL support (Nick Craig-Wood)
|
||||
* B2
|
||||
* Allow manual configuration of backblaze downloadUrl (Vince)
|
||||
* Ignore already_hidden error on remove (Nick Craig-Wood)
|
||||
* Ignore malformed `src_last_modified_millis` (Nick Craig-Wood)
|
||||
* Drive
|
||||
* Add `--skip-checksum-gphotos` to ignore incorrect checksums on Google Photos (Nick Craig-Wood)
|
||||
* Allow server side move/copy between different remotes. (Fionera)
|
||||
* Add docs on team drives and `--fast-list` eventual consistency (Nestar47)
|
||||
* Fix imports of text files (Nick Craig-Wood)
|
||||
* Fix range requests on 0 length files (Nick Craig-Wood)
|
||||
* Fix creation of duplicates with server side copy (Nick Craig-Wood)
|
||||
* Dropbox
|
||||
* Retry blank errors to fix long listings (Nick Craig-Wood)
|
||||
* FTP
|
||||
* Add `--ftp-concurrency` to limit maximum number of connections (Nick Craig-Wood)
|
||||
* Google Cloud Storage
|
||||
* Fall back to default application credentials (marcintustin)
|
||||
* Allow bucket policy only buckets (Nick Craig-Wood)
|
||||
* HTTP
|
||||
* Add `--http-no-slash` for websites with directories with no slashes (Nick Craig-Wood)
|
||||
* Remove duplicates from listings (Nick Craig-Wood)
|
||||
* Fix socket leak on 404 errors (Nick Craig-Wood)
|
||||
* Jottacloud
|
||||
* Fix token refresh (Sebastian Bünger)
|
||||
* Add device registration (Oliver Heyme)
|
||||
* Onedrive
|
||||
* Implement graceful cancel of multipart uploads if rclone is interrupted (Cnly)
|
||||
* Always add trailing colon to path when addressing items, (Cnly)
|
||||
* Return errors instead of panic for invalid uploads (Fabian Möller)
|
||||
* S3
|
||||
* Add support for "Glacier Deep Archive" storage class (Manu)
|
||||
* Update Dreamhost endpoint (Nick Craig-Wood)
|
||||
* Note incompatibility with CEPH Jewel (Nick Craig-Wood)
|
||||
* SFTP
|
||||
* Allow custom ssh client config (Alexandru Bumbacea)
|
||||
* Swift
|
||||
* Obey Retry-After to enable OVH restore from cold storage (Nick Craig-Wood)
|
||||
* Work around token expiry on CEPH (Nick Craig-Wood)
|
||||
* WebDAV
|
||||
* Allow IsCollection property to be integer or boolean (Nick Craig-Wood)
|
||||
* Fix race when creating directories (Nick Craig-Wood)
|
||||
* Fix About/df when reading the available/total returns 0 (Nick Craig-Wood)
|
||||
|
||||
## v1.46 - 2019-02-09
|
||||
|
||||
* New backends
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user