1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-04 09:33:36 +00:00

Compare commits

..

77 Commits

Author SHA1 Message Date
Henning Surmeier
ecb3f04370 webdav/sharepoint: renew cookies after 12hrs 2018-09-17 22:49:02 +02:00
Nick Craig-Wood
a25875170b webdav: Add another time format to fix #2574 2018-09-15 21:46:56 +01:00
Alex Chen
a288646419 onedrive: fix sometimes special chars in filenames not replaced 2018-09-14 21:38:55 +08:00
Nick Craig-Wood
b3d8bc61ac build: add example scripts for bisecting rclone and go 2018-09-14 11:17:51 +01:00
sandeepkru
7accd30da8 cmd and fs: Added new command settier which performs storage tier changes on
supported remotes
2018-09-12 21:09:08 +01:00
sandeepkru
9594fd0a0c fstests: Added integration tests on SetTier operation 2018-09-12 21:09:08 +01:00
sandeepkru
aac84c554a azureblob: Implemented settier command support on azureblob remote, this supports to
change tier on objects. Added internal test to check if feature flags are set correctly
2018-09-12 21:09:08 +01:00
sandeepkru
5716a58413 fs: Added new optional interfaces SetTierer, GetTierer and ListTierer, these are to
perform object tier changes on supported remotes
2018-09-12 21:09:08 +01:00
sandeepkru
233507bfe0 vendor: Update AzureSDK version to latest one, fixes failing integration tests 2018-09-12 08:14:38 +01:00
sandeepkru
5b27702b61 AzureBlob new sdk changes 2018-09-12 08:14:38 +01:00
Nick Craig-Wood
b2a6a97443 Add Craig Miskell to contributors 2018-09-11 07:57:16 +01:00
Craig Miskell
2543278c3f S3: Use (custom) pacer, to retry operations when reasonable - fixes #2503 2018-09-11 07:57:03 +01:00
Jon Craton
19cf3bb9e7 Fixed typo (duplicate word) (#2563) 2018-09-10 19:09:48 -07:00
Fabian Möller
3c44ef788a cache: add plex_insecure option to skip certificate validation
Fixes #2215
2018-09-10 21:19:25 +01:00
Fabian Möller
e5663de09e docs: add section for .plex.direct urls to cache 2018-09-10 21:15:18 +01:00
Nick Craig-Wood
7cfd4e56f8 Add Santiago Rodríguez to contributors
Another email address for Sandeep.
2018-09-10 20:49:55 +01:00
Santiago Rodríguez
282540c2d4 azureblob: add --azureblob-list-chunk parameter - Fixes #2390
This parameter can be used to adjust the size of the listing chunks
which can be used to workaround problems listing large buckets.
2018-09-10 20:45:06 +01:00
albertony
1e7a7d756f jottacloud: fix for --fast-list 2018-09-10 20:38:20 +01:00
Fabian Möller
f6ee0795ac cache: preserve leading / in wrapped remote path
When combining the remote value and the root path, preserve the absence
or presence of the / at the beginning of the wrapped remote path.

e.g. a remote "cloud:" and root path "dir" becomes "cloud:dir" instead
of "cloud:/dir".

Fixes #2553
2018-09-10 20:35:50 +01:00
Fabian Möller
eb5a95e7de crypt: preserve leading / in wrapped remote path
When combining the remote value and the root path, preserve the absence
or presence of the / at the beginning of the wrapped remote path.

e.g. a remote "cloud:" and root path "dir" becomes "cloud:dir" instead
of "cloud:/dir".
2018-09-10 20:35:50 +01:00
Sandeep
5b1c162fb2 Added sandeepkru to maintainers list (#2560) 2018-09-08 20:58:23 -07:00
albertony
d51501938a jottacloud: add link sharing support 2018-09-08 09:38:57 +01:00
Nick Craig-Wood
a823d518ac docs: changelog from v1.43.1 branch 2018-09-07 17:16:11 +01:00
Nick Craig-Wood
87d4e32a6b build: instructions on how to make a point release 2018-09-07 17:10:29 +01:00
Nick Craig-Wood
820d2a7149 Add Felix Brucker to contributors 2018-09-07 15:14:28 +01:00
Nick Craig-Wood
9fe39f25e1 union: add missing docs 2018-09-07 15:14:08 +01:00
Nick Craig-Wood
1b2cc781e5 union: fix so all integration tests pass
* Fix error handling in List and NewObject
* Fix Precision in case we have precision > time.Second
* Fix Features - all binary features are possible
* Fix integration tests using new test facilities
2018-09-07 15:14:08 +01:00
Nick Craig-Wood
e05ec2b77e fstests: Allow object name and fs check to be skipped 2018-09-07 15:14:08 +01:00
Felix Brucker
9e3ea3c6ac union: Implement union backend which reads from multiple backends 2018-09-07 15:14:08 +01:00
Anagh Kumar Baranwal
0fb12112f5 docs: display changes
- Reduced size of the social menu and increased the size of the content
- Added scrollable property to the index menus
- Fixed code wrapping issue

Fixes #2103

Signed-off-by: Anagh Kumar Baranwal <anaghk.dos@gmail.com>
2018-09-07 14:54:22 +01:00
Cédric Connes
1b95ca2852 stats: handle FatalError and NoRetryError when reported to stats 2018-09-07 14:44:50 +01:00
albertony
e07a850be3 jottacloud: add permanent delete support: --jottacloud-hard-delete 2018-09-07 12:58:18 +01:00
albertony
3fccce625c jottacloud: add --fast-list support - fixes #2532 2018-09-07 12:49:39 +01:00
albertony
a1f935e815 jottacloud: minor improvement in quota info (omit if unlimited) 2018-09-07 12:49:39 +01:00
Alex Chen
22ee4151fd build: make CIs available for forks
This makes it possible to run CI on a fork of rclone which is useful for contributors.
2018-09-07 10:17:26 +01:00
Fabian Möller
cc23ad71ce ncdu: return error instead of log.Fatal in Show 2018-09-07 10:06:37 +01:00
sandeepkru
57b9fff904 azureblob - BugFix - Incorrect StageBlock invocation in multi-part uploads
fixes #2518. Incorrect formation of block list.
2018-09-06 22:24:40 +01:00
Alex Chen
692ad482dc onedrive: fix new fields not saved when editing old config - fixes #2527 2018-09-07 00:07:16 +08:00
Sebastian Bünger
c6f1c3c7f6 box: Implement link sharing. #2178 2018-09-04 22:01:22 +01:00
Nick Craig-Wood
164d1e05ca hubic: retry auth fetching if it fails to make hubic more reliable 2018-09-04 21:00:36 +01:00
Nick Craig-Wood
c644241392 hubic: fix uploads - fixes #2524
Uploads were broken because chunk size was set to zero.  This was a
consequence of the backend config re-organization which meant that
chunk size had lost its default.

Sharing some backend config between swift and hubic fixes the problem
and means hubic gains its own --hubic-chunk-size flag.
2018-09-04 20:27:48 +01:00
Cnly
89be5cadaa onedrive: fix make check 2018-09-05 00:37:52 +08:00
Cnly
f326f94b97 onedrive: use single-part upload for empty files - fixes #2520 2018-09-05 00:08:00 +08:00
Nick Craig-Wood
3d2117887d Add Anagh Kumar Baranwal to contributors 2018-09-04 16:16:46 +01:00
Anagh Kumar Baranwal
5a6750e1cd cache: documentation fix for cache-chunk-total-size - Fixes #2519
Signed-off-by: Anagh Kumar Baranwal <anaghk.dos@gmail.com>
2018-09-04 16:16:35 +01:00
Fabian Möller
6b8b9d19f3 googlecloudstorage: fix service_account_file been ignored - Fixes #2523 2018-09-04 15:31:20 +01:00
Nick Craig-Wood
4ca26eb38c cmd: fix crash with --progress and --stats 0 #2501 2018-09-04 14:39:48 +01:00
Alex Chen
37b2754f37 Add Alex Chen (Cnly) to MAINTAINERS.md 2018-09-04 08:40:01 +08:00
Nick Craig-Wood
172beb2ae3 Add cron410 to contributors 2018-09-03 20:28:15 +01:00
cron410
5eba392a04 cache: clarify docs 2018-09-03 20:28:15 +01:00
Fabian Möller
deda093637 cache: fix error return value of cache/fetch rc method 2018-09-03 17:32:11 +02:00
dcpu
a4c4019032 cache: improve performance by not sending info requests for cached chunks 2018-09-03 15:41:06 +01:00
Nick Craig-Wood
2e37942592 Add albertony to contributors 2018-09-03 15:31:25 +01:00
albertony
09d7bd2d40 config: don't create default config dir when user supplies --config
Avoid creating empty default configuration directory when user supplies path to config file.

Fixes #2514.
2018-09-03 15:30:53 +01:00
Nick Craig-Wood
ff0efb1501 Add Sheldon Rupp to contributors 2018-09-03 15:04:37 +01:00
Sheldon Rupp
0f1d4a7ca8 docs: fix typo 2018-09-03 15:03:49 +01:00
Fabian Möller
a0b3fd3a33 cache: fix worker scale down
Ensure that calling scaleWorkers will create/destroy the right amount of
workers.
2018-09-03 12:29:35 +02:00
Fabian Möller
cdbe3691b7 cache: add cache/fetch rc function 2018-09-03 12:29:35 +02:00
Fabian Möller
3a0b3b0f6e drive: reformat long API call lines 2018-09-03 12:22:05 +02:00
Nick Craig-Wood
d3afef3e1b Add dcpu to contributors 2018-09-02 18:11:42 +01:00
dcpu
f4aaec9ce5 log: Add --log-format flag - fixes #2424 2018-09-02 18:11:09 +01:00
Nick Craig-Wood
bd5d326160 build: enable caching of the go build cache for Travis and Appveyor 2018-09-02 17:43:09 +01:00
Cnly
5b9b9f1572 onedrive: graph: clarify option for root Sharepoint site 2018-09-02 16:06:25 +01:00
Cnly
571c8754de onedrive: graph: update docs 2018-09-02 16:06:25 +01:00
Cnly
fb9a95e68e onedrive: graph: Remove unnecessary error checks 2018-09-02 16:06:25 +01:00
Cnly
85e0839c8b onedrive: graph: Refine config handling 2018-09-02 16:06:25 +01:00
Cnly
1749fb8ebf onedrive: graph: Refine config keys naming 2018-09-02 16:06:25 +01:00
Oliver Heyme
e114be11ec onedrive: Removed upload cutoff and always do session uploads
Set modtime on copy

Added versioning issue to OneDrive documentation

(cherry picked from commit 7f74403)
2018-09-02 16:06:25 +01:00
Cnly
b709f73aab onedrive: rework to support Microsoft Graph
The initial work on this was done by Oliver Heyme with updates from
Cnly.

Oliver Heyme:

* Changed to Microsoft graph
* Enable writing
* Added more options for adding a OneDrive Remote
* Better error handling
* Send modDate at create upload session and fix list children

Cnly:

* Simple upload API only supports max 4MB files
* Fix supported hash types for different drive types
* Fix unchecked err

Co-authored-by: Oliver Heyme <olihey@googlemail.com>
Co-authored-by: Cnly <minecnly@gmail.com>
2018-09-02 16:06:25 +01:00
Nick Craig-Wood
05a615ef22 Add Dr. Tobias Quathamer to contributors 2018-09-02 15:51:47 +01:00
Dr. Tobias Quathamer
76450c01f3 cache: remove accidentally committed files
* Delete cache_upload_test.go.orig
* Delete cache_upload_test.go.rej
2018-09-02 15:51:22 +01:00
Nick Craig-Wood
86e64c626c Add Cédric Connes to contributors 2018-09-02 15:08:38 +01:00
Cédric Connes
9b827be418 local: skip bad symlinks in dir listing with -L enabled - fixes #1509 2018-09-02 14:47:54 +01:00
Nick Craig-Wood
7e5c6725c1 docs: Fix version in changelog 2018-09-01 18:40:34 +01:00
Nick Craig-Wood
543d75723b Start v1.43-DEV development 2018-09-01 18:37:48 +01:00
Nick Craig-Wood
b4d94f255a build: server side copy the current release files in 2018-09-01 18:22:19 +01:00
Nick Craig-Wood
b0dd218fea build: make tidy-beta for removing old beta releases 2018-09-01 18:21:54 +01:00
107 changed files with 3171 additions and 1393 deletions

View File

@@ -4,6 +4,9 @@ os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\ncw\rclone
cache:
- '%LocalAppData%\go-build'
environment:
GOPATH: C:\gopath
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
@@ -43,4 +46,4 @@ artifacts:
- path: build/*-v*.zip
deploy_script:
- IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
- IF "%APPVEYOR_REPO_NAME%" == "ncw/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload

View File

@@ -10,6 +10,7 @@ go:
- 1.10.x
- 1.11.x
- tip
go_import_path: github.com/ncw/rclone
before_install:
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi
- if [[ $TRAVIS_OS_NAME == osx ]]; then brew update && brew tap caskroom/cask && brew cask install osxfuse ; fi
@@ -34,6 +35,9 @@ addons:
- libfuse-dev
- rpm
- pkg-config
cache:
directories:
- $HOME/.cache/go-build
matrix:
allow_failures:
- go: tip
@@ -41,11 +45,15 @@ matrix:
- os: osx
go: 1.11.x
env: GOTAGS=""
cache:
directories:
- $HOME/Library/Caches/go-build
deploy:
provider: script
script: make travis_beta
skip_cleanup: true
on:
repo: ncw/rclone
all_branches: true
go: 1.11.x
condition: $TRAVIS_PULL_REQUEST == false

View File

@@ -71,7 +71,7 @@ Make sure you
When you are done with that
git push origin my-new-feature
git push origin my-new-feature
Go to the Github website and click [Create pull
request](https://help.github.com/articles/creating-a-pull-request/).
@@ -81,6 +81,14 @@ You patch will get reviewed and you might get asked to fix some stuff.
If so, then make the changes in the same branch, squash the commits,
rebase it to master then push it to Github with `--force`.
## Enabling CI for your fork ##
The CI config files for rclone have taken care of forks of the project, so you can enable CI for your fork repo easily.
rclone currently uses [Travis CI](https://travis-ci.org/), [AppVeyor](https://ci.appveyor.com/), and
[Circle CI](https://circleci.com/) to build the project. To enable them for your fork, simply go into their
websites, find your fork of rclone, and enable building there.
## Testing ##
rclone's tests are run from the go testing framework, so at the top

View File

@@ -7,6 +7,8 @@ Current active maintainers of rclone are
* Ishuah Kariuki @ishuah
* Remus Bunduc @remusb - cache subsystem maintainer
* Fabian Möller @B4dM4n
* Alex Chen @Cnly
* Sandeep Ummadi @sandeepkru
**This is a work in progress Draft**

View File

@@ -152,8 +152,8 @@ check_sign:
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
upload:
rclone -v copy --exclude '*current*' build/ memstore:downloads-rclone-org/$(TAG)
rclone -v copy --include '*current*' --include version.txt build/ memstore:downloads-rclone-org
rclone -P copy build/ memstore:downloads-rclone-org/$(TAG)
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"'
upload_github:
./bin/upload-github $(TAG)
@@ -202,7 +202,7 @@ endif
# Fetch the binary builds from travis and appveyor
fetch_binaries:
rclone -v sync $(BETA_UPLOAD) build/
rclone -P sync $(BETA_UPLOAD) build/
serve: website
cd docs && hugo server -v -w

View File

@@ -31,3 +31,25 @@ Early in the next release cycle update the vendored dependencies
* git status
* git add new files
* git commit -a -v
Making a point release. If rclone needs a point release due to some
horrendous bug, then
* git branch v1.XX v1.XX-fixes
* git cherry-pick any fixes
* Test (see above)
* make NEW_TAG=v1.XX.1 tag
* edit docs/content/changelog.md
* make TAG=v1.43.1 doc
* git commit -a -v -m "Version v1.XX.1"
* git tag -d -v1.XX.1
* git tag -s -m "Version v1.XX.1" v1.XX.1
* git push --tags -u origin v1.XX-fixes
* make BRANCH_PATH= TAG=v1.43.1 fetch_binaries
* make TAG=v1.43.1 tarball
* make TAG=v1.43.1 sign_upload
* make TAG=v1.43.1 check_sign
* make TAG=v1.43.1 upload
* make TAG=v1.43.1 upload_website
* make TAG=v1.43.1 upload_github
* NB this overwrites the current beta so after the release, rebuild the last travis build
* Announce!

View File

@@ -25,6 +25,7 @@ import (
_ "github.com/ncw/rclone/backend/s3"
_ "github.com/ncw/rclone/backend/sftp"
_ "github.com/ncw/rclone/backend/swift"
_ "github.com/ncw/rclone/backend/union"
_ "github.com/ncw/rclone/backend/webdav"
_ "github.com/ncw/rclone/backend/yandex"
)

View File

@@ -7,6 +7,7 @@ package azureblob
import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
"encoding/binary"
"encoding/hex"
@@ -37,7 +38,7 @@ const (
minSleep = 10 * time.Millisecond
maxSleep = 10 * time.Second
decayConstant = 1 // bigger for slower decay, exponential
listChunkSize = 5000 // number of items to read at once
maxListChunkSize = 5000 // number of items to read at once
modTimeKey = "mtime"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
@@ -80,6 +81,11 @@ func init() {
Help: "Upload chunk size. Must fit in memory.",
Default: fs.SizeSuffix(defaultChunkSize),
Advanced: true,
}, {
Name: "list_chunk",
Help: "Size of blob list.",
Default: maxListChunkSize,
Advanced: true,
}, {
Name: "access_tier",
Help: "Access tier of blob, supports hot, cool and archive tiers.\nArchived blobs can be restored by setting access tier to hot or cool." +
@@ -91,13 +97,14 @@ func init() {
// Options defines the configuration for this backend
type Options struct {
Account string `config:"account"`
Key string `config:"key"`
Endpoint string `config:"endpoint"`
SASURL string `config:"sas_url"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
AccessTier string `config:"access_tier"`
Account string `config:"account"`
Key string `config:"key"`
Endpoint string `config:"endpoint"`
SASURL string `config:"sas_url"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"`
}
// Fs represents a remote azure server
@@ -171,6 +178,32 @@ func parsePath(path string) (container, directory string, err error) {
return
}
// validateAccessTier checks if azureblob supports user supplied tier
func validateAccessTier(tier string) bool {
switch tier {
case string(azblob.AccessTierHot),
string(azblob.AccessTierCool),
string(azblob.AccessTierArchive):
// valid cases
return true
default:
return false
}
}
// validAccessTiers returns list of supported storage tiers on azureblob fs
func validAccessTiers() []string {
validTiers := [...]azblob.AccessTierType{azblob.AccessTierHot, azblob.AccessTierCool,
azblob.AccessTierArchive}
var tiers [len(validTiers)]string
for i, tier := range validTiers {
tiers[i] = string(tier)
}
return tiers[:]
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
401, // Unauthorized (eg "Token has expired")
@@ -211,6 +244,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if opt.ChunkSize > maxChunkSize {
return nil, errors.Errorf("azure: chunk size can't be greater than %v - was %v", maxChunkSize, opt.ChunkSize)
}
if opt.ListChunkSize > maxListChunkSize {
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
}
container, directory, err := parsePath(root)
if err != nil {
return nil, err
@@ -221,15 +257,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if opt.AccessTier == "" {
opt.AccessTier = string(defaultAccessTier)
} else {
switch opt.AccessTier {
case string(azblob.AccessTierHot):
case string(azblob.AccessTierCool):
case string(azblob.AccessTierArchive):
// valid cases
default:
return nil, errors.Errorf("azure: Supported access tiers are %s, %s and %s", string(azblob.AccessTierHot), string(azblob.AccessTierCool), azblob.AccessTierArchive)
}
} else if !validateAccessTier(opt.AccessTier) {
return nil, errors.Errorf("Azure Blob: Supported access tiers are %s, %s and %s",
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
}
var (
@@ -239,7 +269,11 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
)
switch {
case opt.Account != "" && opt.Key != "":
credential := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
if err != nil {
return nil, errors.Wrapf(err, "Failed to parse credentials")
}
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
@@ -285,6 +319,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
SetTier: true,
GetTier: true,
ListTiers: true,
}).Fill(f)
if f.root != "" {
f.root += "/"
@@ -474,7 +511,7 @@ func (f *Fs) markContainerOK() {
// listDir lists a single directory
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
err = f.list(dir, false, listChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
err = f.list(dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
@@ -545,7 +582,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback)
err = f.list(dir, true, listChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
err = f.list(dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
@@ -566,11 +603,11 @@ type listContainerFn func(*azblob.ContainerItem) error
// listContainersToFn lists the containers to the function supplied
func (f *Fs) listContainersToFn(fn listContainerFn) error {
params := azblob.ListContainersSegmentOptions{
MaxResults: int32(listChunkSize),
MaxResults: int32(f.opt.ListChunkSize),
}
ctx := context.Background()
for marker := (azblob.Marker{}); marker.NotDone(); {
var response *azblob.ListContainersResponse
var response *azblob.ListContainersSegmentResponse
err := f.pacer.Call(func() (bool, error) {
var err error
response, err = f.svcURL.ListContainersSegment(ctx, marker, params)
@@ -752,7 +789,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
var startCopy *azblob.BlobStartCopyFromURLResponse
err = f.pacer.Call(func() (bool, error) {
startCopy, err = dstBlobURL.StartCopyFromURL(ctx, *source, nil, options, options)
startCopy, err = dstBlobURL.StartCopyFromURL(ctx, *source, nil, azblob.ModifiedAccessConditions{}, options)
return f.shouldRetry(err)
})
if err != nil {
@@ -837,9 +874,8 @@ func (o *Object) setMetadata(metadata azblob.Metadata) {
// o.md5
// o.meta
func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetPropertiesResponse) (err error) {
// NOTE - In BlobGetPropertiesResponse, Client library returns MD5 as base64 decoded string
// unlike BlobProperties in BlobItem (used in decodeMetadataFromBlob) which returns base64
// encoded bytes. Object needs to maintain this as base64 encoded string.
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
// this as base64 encoded string.
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
o.mimeType = info.ContentType()
o.size = info.ContentLength()
@@ -851,7 +887,9 @@ func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetProper
}
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
o.md5 = string(info.Properties.ContentMD5)
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
// this as base64 encoded string.
o.md5 = base64.StdEncoding.EncodeToString(info.Properties.ContentMD5)
o.mimeType = *info.Properties.ContentType
o.size = *info.Properties.ContentLength
o.modTime = info.Properties.LastModified
@@ -1068,7 +1106,7 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, blob *azblob.BlobURL,
var (
rawID uint64
blockID = "" // id in base64 encoded form
blocks = make([]string, totalParts)
blocks []string
)
// increment the blockID
@@ -1125,11 +1163,14 @@ outer:
defer o.fs.uploadToken.Put()
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
// Upload the block, with MD5 for check
md5sum := md5.Sum(buf)
transactionalMD5 := md5sum[:]
err = o.fs.pacer.Call(func() (bool, error) {
bufferReader := bytes.NewReader(buf)
wrappedReader := wrap(bufferReader)
rs := readSeeker{wrappedReader, bufferReader}
_, err = blockBlobURL.StageBlock(ctx, blockID, rs, ac)
_, err = blockBlobURL.StageBlock(ctx, blockID, &rs, ac, transactionalMD5)
return o.fs.shouldRetry(err)
})
@@ -1236,16 +1277,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
}
// Now, set blob tier based on configured access tier
desiredAccessTier := azblob.AccessTierType(o.fs.opt.AccessTier)
err = o.fs.pacer.Call(func() (bool, error) {
_, err := blob.SetTier(ctx, desiredAccessTier)
return o.fs.shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "Failed to set Blob Tier")
}
return nil
return o.SetTier(o.fs.opt.AccessTier)
}
// Remove an object
@@ -1270,6 +1302,46 @@ func (o *Object) AccessTier() azblob.AccessTierType {
return o.accessTier
}
// SetTier performs changing object tier
func (o *Object) SetTier(tier string) error {
if !validateAccessTier(tier) {
return errors.Errorf("Tier %s not supported by Azure Blob Storage", tier)
}
// Check if current tier already matches with desired tier
if o.GetTier() == tier {
return nil
}
desiredAccessTier := azblob.AccessTierType(tier)
blob := o.getBlobReference()
ctx := context.Background()
err := o.fs.pacer.Call(func() (bool, error) {
_, err := blob.SetTier(ctx, desiredAccessTier)
return o.fs.shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "Failed to set Blob Tier")
}
// Set access tier on local object also, this typically
// gets updated on get blob properties
o.accessTier = desiredAccessTier
fs.Debugf(o, "Successfully changed object tier to %s", tier)
return nil
}
// GetTier returns object tier in azure as string
func (o *Object) GetTier() string {
return string(o.accessTier)
}
// ListTiers returns list of storage tiers supported on this object
func (o *Object) ListTiers() []string {
return validAccessTiers()
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}

View File

@@ -0,0 +1,20 @@
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
package azureblob
import (
"testing"
"github.com/stretchr/testify/assert"
)
func (f *Fs) InternalTest(t *testing.T) {
// Check first feature flags are set on this
// remote
enabled := f.Features().ListTiers
assert.True(t, enabled)
enabled = f.Features().SetTier
assert.True(t, enabled)
enabled = f.Features().GetTier
assert.True(t, enabled)
}

View File

@@ -61,7 +61,7 @@ func (e *Error) Error() string {
var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status"
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link"
// Types of things in Item
const (
@@ -86,6 +86,10 @@ type Item struct {
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
SharedLink struct {
URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"`
} `json:"shared_link"`
}
// ModTime returns the modification time of the item
@@ -145,6 +149,14 @@ type CopyFile struct {
Parent Parent `json:"parent"`
}
// CreateSharedLink is the request for Public Link
type CreateSharedLink struct {
SharedLink struct {
URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"`
} `json:"shared_link"`
}
// UploadSessionRequest is uses in Create Upload Session
type UploadSessionRequest struct {
FolderID string `json:"folder_id,omitempty"` // don't pass for update

View File

@@ -126,6 +126,7 @@ type Object struct {
size int64 // size of the object
modTime time.Time // modification time of the object
id string // ID of the object
publicLink string // Public Link for the object
sha1 string // SHA-1 of the object content
}
@@ -299,6 +300,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
return nil, err
}
f.features.Fill(&newF)
// return an error with an fs which points to the parent
return &newF, fs.ErrorIsFile
}
@@ -844,6 +846,46 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
return nil
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(remote string) (string, error) {
id, err := f.dirCache.FindDir(remote, false)
var opts rest.Opts
if err == nil {
fs.Debugf(f, "attempting to share directory '%s'", remote)
opts = rest.Opts{
Method: "PUT",
Path: "/folders/" + id,
Parameters: fieldsValue(),
}
} else {
fs.Debugf(f, "attempting to share single file '%s'", remote)
o, err := f.NewObject(remote)
if err != nil {
return "", err
}
if o.(*Object).publicLink != "" {
return o.(*Object).publicLink, nil
}
opts = rest.Opts{
Method: "PUT",
Path: "/files/" + o.(*Object).id,
Parameters: fieldsValue(),
}
}
shareLink := api.CreateSharedLink{}
var info api.Item
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, &shareLink, &info)
return shouldRetry(resp, err)
})
return info.SharedLink.URL, err
}
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func (f *Fs) DirCacheFlush() {
@@ -908,6 +950,7 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
o.sha1 = info.SHA1
o.modTime = info.ModTime()
o.id = info.ID
o.publicLink = info.SharedLink.URL
return nil
}
@@ -1087,6 +1130,7 @@ var (
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

225
backend/cache/cache.go vendored
View File

@@ -6,10 +6,12 @@ import (
"context"
"fmt"
"io"
"math"
"os"
"os/signal"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
@@ -79,6 +81,10 @@ func init() {
Help: "The plex token for authentication - auto set normally",
Hide: fs.OptionHideBoth,
Advanced: true,
}, {
Name: "plex_insecure",
Help: "Skip all certificate verifications when connecting to the Plex server",
Advanced: true,
}, {
Name: "chunk_size",
Help: "The size of a chunk. Lower value good for slow connections but can affect seamless reading.",
@@ -193,6 +199,7 @@ type Options struct {
PlexUsername string `config:"plex_username"`
PlexPassword string `config:"plex_password"`
PlexToken string `config:"plex_token"`
PlexInsecure bool `config:"plex_insecure"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
InfoAge fs.Duration `config:"info_age"`
ChunkTotalSize fs.SizeSuffix `config:"chunk_total_size"`
@@ -248,7 +255,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
return nil, errors.Errorf("don't set cache-total-chunk-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
return nil, errors.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
}
@@ -261,10 +268,15 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
}
remotePath := path.Join(opt.Remote, rpath)
wrappedFs, wrapErr := fs.NewFs(remotePath)
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(opt.Remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", opt.Remote)
}
remotePath := path.Join(wPath, rootPath)
wrappedFs, wrapErr := wInfo.NewFs(wName, remotePath, wConfig)
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
return nil, errors.Wrapf(wrapErr, "failed to make remote %s:%s to wrap", wName, remotePath)
}
var fsErr error
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
@@ -290,7 +302,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
f.plexConnector = &plexConnector{}
if opt.PlexURL != "" {
if opt.PlexToken != "" {
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken)
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure)
if err != nil {
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
}
@@ -300,7 +312,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
decPass = opt.PlexPassword
}
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, func(token string) {
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
m.Set("plex_token", token)
})
if err != nil {
@@ -455,6 +467,39 @@ Eg
Title: "Get cache stats",
Help: `
Show statistics for the cache remote.
`,
})
rc.Add(rc.Call{
Path: "cache/fetch",
Fn: f.rcFetch,
Title: "Fetch file chunks",
Help: `
Ensure the specified file chunks are cached on disk.
The chunks= parameter specifies the file chunks to check.
It takes a comma separated list of array slice indices.
The slice indices are similar to Python slices: start[:end]
start is the 0 based chunk number from the beginning of the file
to fetch inclusive. end is 0 based chunk number from the beginning
of the file to fetch exclisive.
Both values can be negative, in which case they count from the back
of the file. The value "-5:" represents the last 5 chunks of a file.
Some valid examples are:
":5,-5:" -> the first and last five chunks
"0,-2" -> the first and the second last chunk
"0:10" -> the first ten chunks
Any parameter with a key that starts with "file" can be used to
specify files to fetch, eg
rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye
File names will automatically be encrypted when the a crypt remote
is used on top of the cache.
`,
})
@@ -472,6 +517,22 @@ func (f *Fs) httpStats(in rc.Params) (out rc.Params, err error) {
return out, nil
}
func (f *Fs) unwrapRemote(remote string) string {
remote = cleanPath(remote)
if remote != "" {
// if it's wrapped by crypt we need to check what format we got
if cryptFs, yes := f.isWrappedByCrypt(); yes {
_, err := cryptFs.DecryptFileName(remote)
// if it failed to decrypt then it is a decrypted format and we need to encrypt it
if err != nil {
return cryptFs.EncryptFileName(remote)
}
// else it's an encrypted format and we can use it as it is
}
}
return remote
}
func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
out = make(rc.Params)
remoteInt, ok := in["remote"]
@@ -485,20 +546,9 @@ func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
withData = true
}
if cleanPath(remote) != "" {
// if it's wrapped by crypt we need to check what format we got
if cryptFs, yes := f.isWrappedByCrypt(); yes {
_, err := cryptFs.DecryptFileName(remote)
// if it failed to decrypt then it is a decrypted format and we need to encrypt it
if err != nil {
remote = cryptFs.EncryptFileName(remote)
}
// else it's an encrypted format and we can use it as it is
}
if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
return out, errors.Errorf("%s doesn't exist in cache", remote)
}
remote = f.unwrapRemote(remote)
if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
return out, errors.Errorf("%s doesn't exist in cache", remote)
}
co := NewObject(f, remote)
@@ -528,6 +578,141 @@ func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
return out, nil
}
func (f *Fs) rcFetch(in rc.Params) (rc.Params, error) {
type chunkRange struct {
start, end int64
}
parseChunks := func(ranges string) (crs []chunkRange, err error) {
for _, part := range strings.Split(ranges, ",") {
var start, end int64 = 0, math.MaxInt64
switch ints := strings.Split(part, ":"); len(ints) {
case 1:
start, err = strconv.ParseInt(ints[0], 10, 64)
if err != nil {
return nil, errors.Errorf("invalid range: %q", part)
}
end = start + 1
case 2:
if ints[0] != "" {
start, err = strconv.ParseInt(ints[0], 10, 64)
if err != nil {
return nil, errors.Errorf("invalid range: %q", part)
}
}
if ints[1] != "" {
end, err = strconv.ParseInt(ints[1], 10, 64)
if err != nil {
return nil, errors.Errorf("invalid range: %q", part)
}
}
default:
return nil, errors.Errorf("invalid range: %q", part)
}
crs = append(crs, chunkRange{start: start, end: end})
}
return
}
walkChunkRange := func(cr chunkRange, size int64, cb func(chunk int64)) {
if size <= 0 {
return
}
chunks := (size-1)/f.ChunkSize() + 1
start, end := cr.start, cr.end
if start < 0 {
start += chunks
}
if end <= 0 {
end += chunks
}
if end <= start {
return
}
switch {
case start < 0:
start = 0
case start >= chunks:
return
}
switch {
case end <= start:
end = start + 1
case end >= chunks:
end = chunks
}
for i := start; i < end; i++ {
cb(i)
}
}
walkChunkRanges := func(crs []chunkRange, size int64, cb func(chunk int64)) {
for _, cr := range crs {
walkChunkRange(cr, size, cb)
}
}
v, ok := in["chunks"]
if !ok {
return nil, errors.New("missing chunks parameter")
}
s, ok := v.(string)
if !ok {
return nil, errors.New("invalid chunks parameter")
}
delete(in, "chunks")
crs, err := parseChunks(s)
if err != nil {
return nil, errors.Wrap(err, "invalid chunks parameter")
}
var files [][2]string
for k, v := range in {
if !strings.HasPrefix(k, "file") {
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
}
switch v := v.(type) {
case string:
files = append(files, [2]string{v, f.unwrapRemote(v)})
default:
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
}
}
type fileStatus struct {
Error string
FetchedChunks int
}
fetchedChunks := make(map[string]fileStatus, len(files))
for _, pair := range files {
file, remote := pair[0], pair[1]
var status fileStatus
o, err := f.NewObject(remote)
if err != nil {
fetchedChunks[file] = fileStatus{Error: err.Error()}
continue
}
co := o.(*Object)
err = co.refreshFromSource(true)
if err != nil {
fetchedChunks[file] = fileStatus{Error: err.Error()}
continue
}
handle := NewObjectHandle(co, f)
handle.UseMemory = false
handle.scaleWorkers(1)
walkChunkRanges(crs, co.Size(), func(chunk int64) {
_, err := handle.getChunk(chunk * f.ChunkSize())
if err != nil {
if status.Error == "" {
status.Error = err.Error()
}
} else {
status.FetchedChunks++
}
})
fetchedChunks[file] = status
}
return rc.Params{"status": fetchedChunks}, nil
}
// receiveChangeNotify is a wrapper to notifications sent from the wrapped FS about changed files
func (f *Fs) receiveChangeNotify(forgetPath string, entryType fs.EntryType) {
if crypt, yes := f.isWrappedByCrypt(); yes {

View File

@@ -1,455 +0,0 @@
// +build !plan9
package cache_test
import (
"math/rand"
"os"
"path"
"strconv"
"testing"
"time"
"fmt"
"github.com/ncw/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/fs"
"github.com/stretchr/testify/require"
)
func TestInternalUploadTempDirCreated(t *testing.T) {
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)})
defer runInstance.cleanupFs(t, rootFs, boltDb)
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
require.NoError(t, err)
}
func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) {
// create some rand test data
testSize := int64(524288000)
testReader := runInstance.randomReader(t, testSize)
bu := runInstance.listenForBackgroundUpload(t, rootFs, "one")
runInstance.writeRemoteReader(t, rootFs, "one", testReader)
// validate that it exists in temp fs
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
require.NoError(t, err)
if runInstance.rootIsCrypt {
require.Equal(t, int64(524416032), ti.Size())
} else {
require.Equal(t, testSize, ti.Size())
}
de1, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
require.Len(t, de1, 1)
runInstance.completeBackgroundUpload(t, "one", bu)
// check if it was removed from temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
require.True(t, os.IsNotExist(err))
// check if it can be read
data2, err := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false)
require.NoError(t, err)
require.Len(t, data2, 1024)
}
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
func TestInternalUploadMoveExistingFile(t *testing.T) {
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("one")
require.NoError(t, err)
err = rootFs.Mkdir("one/test")
require.NoError(t, err)
err = rootFs.Mkdir("second")
require.NoError(t, err)
// create some rand test data
testSize := int64(10485760)
testReader := runInstance.randomReader(t, testSize)
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
de1, err := runInstance.list(t, rootFs, "one/test")
require.NoError(t, err)
require.Len(t, de1, 1)
time.Sleep(time.Second * 5)
//_ = os.Remove(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
//require.NoError(t, err)
err = runInstance.dirMove(t, rootFs, "one/test", "second/test")
require.NoError(t, err)
// check if it can be read
de1, err = runInstance.list(t, rootFs, "second/test")
require.NoError(t, err)
require.Len(t, de1, 1)
}
func TestInternalUploadTempPathCleaned(t *testing.T) {
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("one")
require.NoError(t, err)
err = rootFs.Mkdir("one/test")
require.NoError(t, err)
err = rootFs.Mkdir("second")
require.NoError(t, err)
// create some rand test data
testSize := int64(1048576)
testReader := runInstance.randomReader(t, testSize)
testReader2 := runInstance.randomReader(t, testSize)
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
runInstance.writeObjectReader(t, rootFs, "second/data.bin", testReader2)
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
require.True(t, os.IsNotExist(err))
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
require.True(t, os.IsNotExist(err))
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second")))
require.False(t, os.IsNotExist(err))
runInstance.completeAllBackgroundUploads(t, rootFs, "second/data.bin")
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/data.bin")))
require.True(t, os.IsNotExist(err))
de1, err := runInstance.list(t, rootFs, "one/test")
require.NoError(t, err)
require.Len(t, de1, 1)
// check if it can be read
de1, err = runInstance.list(t, rootFs, "second")
require.NoError(t, err)
require.Len(t, de1, 1)
}
func TestInternalUploadQueueMoreFiles(t *testing.T) {
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("test")
require.NoError(t, err)
minSize := 5242880
maxSize := 10485760
totalFiles := 10
rand.Seed(time.Now().Unix())
lastFile := ""
for i := 0; i < totalFiles; i++ {
size := int64(rand.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin"
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
// validate that it exists in temp fs
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote)))
require.NoError(t, err)
require.Equal(t, size, runInstance.cleanSize(t, ti.Size()))
if runInstance.wrappedIsExternal && i < totalFiles-1 {
time.Sleep(time.Second * 3)
}
lastFile = remote
}
// check if cache lists all files, likely temp upload didn't finish yet
de1, err := runInstance.list(t, rootFs, "test")
require.NoError(t, err)
require.Len(t, de1, totalFiles)
// wait for background uploader to do its thing
runInstance.completeAllBackgroundUploads(t, rootFs, lastFile)
// retry until we have no more temp files and fail if they don't go down to 0
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test")))
require.True(t, os.IsNotExist(err))
// check if cache lists all files
de1, err = runInstance.list(t, rootFs, "test")
require.NoError(t, err)
require.Len(t, de1, totalFiles)
}
func TestInternalUploadTempFileOperations(t *testing.T) {
id := "tiutfo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()
// create some rand test data
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// check if it can be read
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data1)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
// test DirMove - allowed
err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
_, err = rootFs.NewObject("second/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.Error(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
require.NoError(t, err)
_, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
require.Error(t, err)
var started bool
started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one")))
require.NoError(t, err)
require.False(t, started)
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
}
// test Rmdir - allowed
err = runInstance.rm(t, rootFs, "test")
require.Error(t, err)
require.Contains(t, err.Error(), "directory not empty")
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
require.False(t, started)
require.NoError(t, err)
// test Move/Rename -- allowed
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
if err != errNotSupported {
require.NoError(t, err)
// try to read from it
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
_, err = rootFs.NewObject("test/second")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data2)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.Error(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
require.NoError(t, err)
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
}
// test Copy -- allowed
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/third")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data2)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
require.NoError(t, err)
}
// test Remove -- allowed
err = runInstance.rm(t, rootFs, "test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.Error(t, err)
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// test Update -- allowed
firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
require.NoError(t, err)
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
require.NoError(t, err)
obj2, err := rootFs.NewObject("test/one")
require.NoError(t, err)
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
require.Equal(t, "one content updated", string(data2))
tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
if runInstance.rootIsCrypt {
require.Equal(t, int64(67), tmpInfo.Size())
} else {
require.Equal(t, int64(len(data2)), tmpInfo.Size())
}
// test SetModTime -- allowed
secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
require.NoError(t, err)
require.NotEqual(t, secondModTime, firstModTime)
require.NotEqual(t, time.Time{}, firstModTime)
require.NotEqual(t, time.Time{}, secondModTime)
}
func TestInternalUploadUploadingFileOperations(t *testing.T) {
id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()
// create some rand test data
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// check if it can be read
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data1)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one")))
require.NoError(t, err)
// test DirMove
err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported {
require.Error(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
require.Error(t, err)
}
// test Rmdir
err = runInstance.rm(t, rootFs, "test")
require.Error(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
// test Move/Rename
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
if err != errNotSupported {
require.Error(t, err)
// try to read from it
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/second")
require.Error(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
require.Error(t, err)
}
// test Copy -- allowed
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/third")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data2)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
require.NoError(t, err)
}
// test Remove
err = runInstance.rm(t, rootFs, "test/one")
require.Error(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// test Update - this seems to work. Why? FIXME
//firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
//require.NoError(t, err)
//err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() {
// data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true)
// require.Equal(t, "one content", string(data2))
//
// tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
// require.NoError(t, err)
// if runInstance.rootIsCrypt {
// require.Equal(t, int64(67), tmpInfo.Size())
// } else {
// require.Equal(t, int64(len(data2)), tmpInfo.Size())
// }
//})
//require.Error(t, err)
// test SetModTime -- seems to work cause of previous
//secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
//require.NoError(t, err)
//require.Equal(t, secondModTime, firstModTime)
//require.NotEqual(t, time.Time{}, firstModTime)
//require.NotEqual(t, time.Time{}, secondModTime)
}

View File

@@ -1,12 +0,0 @@
--- cache_upload_test.go
+++ cache_upload_test.go
@@ -1500,9 +1469,6 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
}
r.tempFiles = nil
debug.FreeOSMemory()
- for k, v := range r.runDefaultFlagMap {
- _ = flag.Set(k, v)
- }
}
func (r *run) randomBytes(t *testing.T, size int64) []byte {

View File

@@ -49,12 +49,13 @@ type Handle struct {
offset int64
seenOffsets map[int64]bool
mu sync.Mutex
workersWg sync.WaitGroup
confirmReading chan bool
UseMemory bool
workers []*worker
closed bool
reading bool
workers int
maxWorkerID int
UseMemory bool
closed bool
reading bool
}
// NewObjectHandle returns a new Handle for an existing Object
@@ -95,7 +96,7 @@ func (r *Handle) String() string {
// startReadWorkers will start the worker pool
func (r *Handle) startReadWorkers() {
if r.hasAtLeastOneWorker() {
if r.workers > 0 {
return
}
totalWorkers := r.cacheFs().opt.TotalWorkers
@@ -117,26 +118,27 @@ func (r *Handle) startReadWorkers() {
// scaleOutWorkers will increase the worker pool count by the provided amount
func (r *Handle) scaleWorkers(desired int) {
current := len(r.workers)
current := r.workers
if current == desired {
return
}
if current > desired {
// scale in gracefully
for i := 0; i < current-desired; i++ {
for r.workers > desired {
r.preloadQueue <- -1
r.workers--
}
} else {
// scale out
for i := 0; i < desired-current; i++ {
for r.workers < desired {
w := &worker{
r: r,
ch: r.preloadQueue,
id: current + i,
id: r.maxWorkerID,
}
r.workersWg.Add(1)
r.workers++
r.maxWorkerID++
go w.run()
r.workers = append(r.workers, w)
}
}
// ignore first scale out from 0
@@ -148,7 +150,7 @@ func (r *Handle) scaleWorkers(desired int) {
func (r *Handle) confirmExternalReading() {
// if we have a max value of workers
// then we skip this step
if len(r.workers) > 1 ||
if r.workers > 1 ||
!r.cacheFs().plexConnector.isConfigured() {
return
}
@@ -178,7 +180,7 @@ func (r *Handle) queueOffset(offset int64) {
}
}
for i := 0; i < len(r.workers); i++ {
for i := 0; i < r.workers; i++ {
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
if o < 0 || o >= r.cachedObject.Size() {
continue
@@ -193,16 +195,6 @@ func (r *Handle) queueOffset(offset int64) {
}
}
func (r *Handle) hasAtLeastOneWorker() bool {
oneWorker := false
for i := 0; i < len(r.workers); i++ {
if r.workers[i].isRunning() {
oneWorker = true
}
}
return oneWorker
}
// getChunk is called by the FS to retrieve a specific chunk of known start and size from where it can find it
// it can be from transient or persistent cache
// it will also build the chunk from the cache's specific chunk boundaries and build the final desired chunk in a buffer
@@ -243,7 +235,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
// not found in ram or
// the worker didn't managed to download the chunk in time so we abort and close the stream
if err != nil || len(data) == 0 || !found {
if !r.hasAtLeastOneWorker() {
if r.workers == 0 {
fs.Errorf(r, "out of workers")
return nil, io.ErrUnexpectedEOF
}
@@ -304,14 +296,7 @@ func (r *Handle) Close() error {
close(r.preloadQueue)
r.closed = true
// wait for workers to complete their jobs before returning
waitCount := 3
for i := 0; i < len(r.workers); i++ {
waitIdx := 0
for r.workers[i].isRunning() && waitIdx < waitCount {
time.Sleep(time.Second)
waitIdx++
}
}
r.workersWg.Wait()
r.memory.db.Flush()
fs.Debugf(r, "cache reader closed %v", r.offset)
@@ -348,12 +333,9 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
}
type worker struct {
r *Handle
ch <-chan int64
rc io.ReadCloser
id int
running bool
mu sync.Mutex
r *Handle
rc io.ReadCloser
id int
}
// String is a representation of this worker
@@ -398,33 +380,19 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
})
}
func (w *worker) isRunning() bool {
w.mu.Lock()
defer w.mu.Unlock()
return w.running
}
func (w *worker) setRunning(f bool) {
w.mu.Lock()
defer w.mu.Unlock()
w.running = f
}
// run is the main loop for the worker which receives offsets to preload
func (w *worker) run() {
var err error
var data []byte
defer w.setRunning(false)
defer func() {
if w.rc != nil {
_ = w.rc.Close()
w.setRunning(false)
}
w.r.workersWg.Done()
}()
for {
chunkStart, open := <-w.ch
w.setRunning(true)
chunkStart, open := <-w.r.preloadQueue
if chunkStart < 0 || !open {
break
}

View File

@@ -208,11 +208,17 @@ func (o *Object) SetModTime(t time.Time) error {
// Open is used to request a specific part of the file using fs.RangeOption
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
if err := o.refreshFromSource(true); err != nil {
var err error
if o.Object == nil {
err = o.refreshFromSource(true)
} else {
err = o.refresh()
}
if err != nil {
return nil, err
}
var err error
cacheReader := NewObjectHandle(o, o.CacheFs)
var offset, limit int64 = 0, -1
for _, option := range options {

28
backend/cache/plex.go vendored
View File

@@ -3,6 +3,7 @@
package cache
import (
"crypto/tls"
"encoding/json"
"fmt"
"net/http"
@@ -54,6 +55,7 @@ type plexConnector struct {
username string
password string
token string
insecure bool
f *Fs
mu sync.Mutex
running bool
@@ -63,7 +65,7 @@ type plexConnector struct {
}
// newPlexConnector connects to a Plex server and generates a token
func newPlexConnector(f *Fs, plexURL, username, password string, saveToken func(string)) (*plexConnector, error) {
func newPlexConnector(f *Fs, plexURL, username, password string, insecure bool, saveToken func(string)) (*plexConnector, error) {
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
if err != nil {
return nil, err
@@ -75,6 +77,7 @@ func newPlexConnector(f *Fs, plexURL, username, password string, saveToken func(
username: username,
password: password,
token: "",
insecure: insecure,
stateCache: cache.New(time.Hour, time.Minute),
saveToken: saveToken,
}
@@ -83,7 +86,7 @@ func newPlexConnector(f *Fs, plexURL, username, password string, saveToken func(
}
// newPlexConnector connects to a Plex server and generates a token
func newPlexConnectorWithToken(f *Fs, plexURL, token string) (*plexConnector, error) {
func newPlexConnectorWithToken(f *Fs, plexURL, token string, insecure bool) (*plexConnector, error) {
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
if err != nil {
return nil, err
@@ -93,6 +96,7 @@ func newPlexConnectorWithToken(f *Fs, plexURL, token string) (*plexConnector, er
f: f,
url: u,
token: token,
insecure: insecure,
stateCache: cache.New(time.Hour, time.Minute),
}
pc.listenWebsocket()
@@ -107,14 +111,26 @@ func (p *plexConnector) closeWebsocket() {
p.running = false
}
func (p *plexConnector) websocketDial() (*websocket.Conn, error) {
u := strings.TrimRight(strings.Replace(strings.Replace(
p.url.String(), "http://", "ws://", 1), "https://", "wss://", 1), "/")
url := fmt.Sprintf(defPlexNotificationURL, u, p.token)
config, err := websocket.NewConfig(url, "http://localhost")
if err != nil {
return nil, err
}
if p.insecure {
config.TlsConfig = &tls.Config{InsecureSkipVerify: true}
}
return websocket.DialConfig(config)
}
func (p *plexConnector) listenWebsocket() {
p.runningMu.Lock()
defer p.runningMu.Unlock()
u := strings.Replace(p.url.String(), "http://", "ws://", 1)
u = strings.Replace(u, "https://", "wss://", 1)
conn, err := websocket.Dial(fmt.Sprintf(defPlexNotificationURL, strings.TrimRight(u, "/"), p.token),
"", "http://localhost")
conn, err := p.websocketDial()
if err != nil {
fs.Errorf("plex", "%v", err)
return

View File

@@ -130,16 +130,20 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
}
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
}
// Look for a file first
remotePath := path.Join(remote, cipher.EncryptFileName(rpath))
wrappedFs, err := fs.NewFs(remotePath)
remotePath := path.Join(wPath, cipher.EncryptFileName(rpath))
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
// if that didn't produce a file, look for a directory
if err != fs.ErrorIsFile {
remotePath = path.Join(remote, cipher.EncryptDirName(rpath))
wrappedFs, err = fs.NewFs(remotePath)
remotePath = path.Join(wPath, cipher.EncryptDirName(rpath))
wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig)
}
if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remotePath)
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
}
f := &Fs{
Fs: wrappedFs,

View File

@@ -779,7 +779,10 @@ func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
}
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Create(createInfo).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).Do()
info, err = f.svc.Files.Create(createInfo).
Fields(googleapi.Field(partialFields)).
SupportsTeamDrives(f.isTeamDrive).
Do()
return shouldRetry(err)
})
if err != nil {
@@ -807,7 +810,9 @@ func (f *Fs) exportFormats() map[string][]string {
var about *drive.About
var err error
err = f.pacer.Call(func() (bool, error) {
about, err = f.svc.About.Get().Fields("exportFormats").Do()
about, err = f.svc.About.Get().
Fields("exportFormats").
Do()
return shouldRetry(err)
})
if err != nil {
@@ -1178,7 +1183,12 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt
// Make the API request to upload metadata and file data.
// Don't retry, return a retry error instead
err = f.pacer.CallNoRetry(func() (bool, error) {
info, err = f.svc.Files.Create(createInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).KeepRevisionForever(f.opt.KeepRevisionForever).Do()
info, err = f.svc.Files.Create(createInfo).
Media(in, googleapi.ContentType("")).
Fields(googleapi.Field(partialFields)).
SupportsTeamDrives(f.isTeamDrive).
KeepRevisionForever(f.opt.KeepRevisionForever).
Do()
return shouldRetry(err)
})
if err != nil {
@@ -1217,7 +1227,12 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error {
fs.Infof(srcDir, "merging %q", info.Name)
// Move the file into the destination
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Files.Update(info.Id, nil).RemoveParents(srcDir.ID()).AddParents(dstDir.ID()).Fields("").SupportsTeamDrives(f.isTeamDrive).Do()
_, err = f.svc.Files.Update(info.Id, nil).
RemoveParents(srcDir.ID()).
AddParents(dstDir.ID()).
Fields("").
SupportsTeamDrives(f.isTeamDrive).
Do()
return shouldRetry(err)
})
if err != nil {
@@ -1254,9 +1269,15 @@ func (f *Fs) rmdir(directoryID string, useTrash bool) error {
info := drive.File{
Trashed: true,
}
_, err = f.svc.Files.Update(directoryID, &info).Fields("").SupportsTeamDrives(f.isTeamDrive).Do()
_, err = f.svc.Files.Update(directoryID, &info).
Fields("").
SupportsTeamDrives(f.isTeamDrive).
Do()
} else {
err = f.svc.Files.Delete(directoryID).Fields("").SupportsTeamDrives(f.isTeamDrive).Do()
err = f.svc.Files.Delete(directoryID).
Fields("").
SupportsTeamDrives(f.isTeamDrive).
Do()
}
return shouldRetry(err)
})
@@ -1335,7 +1356,11 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
var info *drive.File
err = o.fs.pacer.Call(func() (bool, error) {
info, err = o.fs.svc.Files.Copy(srcObj.id, createInfo).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).KeepRevisionForever(f.opt.KeepRevisionForever).Do()
info, err = o.fs.svc.Files.Copy(srcObj.id, createInfo).
Fields(googleapi.Field(partialFields)).
SupportsTeamDrives(f.isTeamDrive).
KeepRevisionForever(f.opt.KeepRevisionForever).
Do()
return shouldRetry(err)
})
if err != nil {
@@ -1364,9 +1389,15 @@ func (f *Fs) Purge() error {
info := drive.File{
Trashed: true,
}
_, err = f.svc.Files.Update(f.dirCache.RootID(), &info).Fields("").SupportsTeamDrives(f.isTeamDrive).Do()
_, err = f.svc.Files.Update(f.dirCache.RootID(), &info).
Fields("").
SupportsTeamDrives(f.isTeamDrive).
Do()
} else {
err = f.svc.Files.Delete(f.dirCache.RootID()).Fields("").SupportsTeamDrives(f.isTeamDrive).Do()
err = f.svc.Files.Delete(f.dirCache.RootID()).
Fields("").
SupportsTeamDrives(f.isTeamDrive).
Do()
}
return shouldRetry(err)
})
@@ -1452,7 +1483,12 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
// Do the move
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Update(srcObj.id, dstInfo).RemoveParents(srcParentID).AddParents(dstParents).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).Do()
info, err = f.svc.Files.Update(srcObj.id, dstInfo).
RemoveParents(srcParentID).
AddParents(dstParents).
Fields(googleapi.Field(partialFields)).
SupportsTeamDrives(f.isTeamDrive).
Do()
return shouldRetry(err)
})
if err != nil {
@@ -1489,7 +1525,10 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
err = f.pacer.Call(func() (bool, error) {
// TODO: On TeamDrives this might fail if lacking permissions to change ACLs.
// Need to either check `canShare` attribute on the object or see if a sufficient permission is already present.
_, err = f.svc.Permissions.Create(id, permission).Fields(googleapi.Field("id")).SupportsTeamDrives(f.isTeamDrive).Do()
_, err = f.svc.Permissions.Create(id, permission).
Fields(googleapi.Field("id")).
SupportsTeamDrives(f.isTeamDrive).
Do()
return shouldRetry(err)
})
if err != nil {
@@ -1584,7 +1623,12 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
Name: leaf,
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Files.Update(srcID, &patch).RemoveParents(srcDirectoryID).AddParents(dstDirectoryID).Fields("").SupportsTeamDrives(f.isTeamDrive).Do()
_, err = f.svc.Files.Update(srcID, &patch).
RemoveParents(srcDirectoryID).
AddParents(dstDirectoryID).
Fields("").
SupportsTeamDrives(f.isTeamDrive).
Do()
return shouldRetry(err)
})
if err != nil {
@@ -1621,7 +1665,9 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), pollInter
var err error
var startPageToken *drive.StartPageToken
err = f.pacer.Call(func() (bool, error) {
startPageToken, err = f.svc.Changes.GetStartPageToken().SupportsTeamDrives(f.isTeamDrive).Do()
startPageToken, err = f.svc.Changes.GetStartPageToken().
SupportsTeamDrives(f.isTeamDrive).
Do()
return shouldRetry(err)
})
if err != nil {
@@ -1635,7 +1681,8 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), pollInter
var changeList *drive.ChangeList
err = f.pacer.Call(func() (bool, error) {
changesCall := f.svc.Changes.List(pageToken).Fields("nextPageToken,newStartPageToken,changes(fileId,file(name,parents,mimeType))")
changesCall := f.svc.Changes.List(pageToken).
Fields("nextPageToken,newStartPageToken,changes(fileId,file(name,parents,mimeType))")
if f.opt.ListChunk > 0 {
changesCall.PageSize(f.opt.ListChunk)
}
@@ -1862,7 +1909,10 @@ func (o *Object) SetModTime(modTime time.Time) error {
// Set modified date
var info *drive.File
err = o.fs.pacer.Call(func() (bool, error) {
info, err = o.fs.svc.Files.Update(o.id, updateInfo).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(o.fs.isTeamDrive).Do()
info, err = o.fs.svc.Files.Update(o.id, updateInfo).
Fields(googleapi.Field(partialFields)).
SupportsTeamDrives(o.fs.isTeamDrive).
Do()
return shouldRetry(err)
})
if err != nil {
@@ -2014,7 +2064,12 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
if size == 0 || size < int64(o.fs.opt.UploadCutoff) {
// Don't retry, return a retry error instead
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
info, err = o.fs.svc.Files.Update(o.id, updateInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(o.fs.isTeamDrive).KeepRevisionForever(o.fs.opt.KeepRevisionForever).Do()
info, err = o.fs.svc.Files.Update(o.id, updateInfo).
Media(in, googleapi.ContentType("")).
Fields(googleapi.Field(partialFields)).
SupportsTeamDrives(o.fs.isTeamDrive).
KeepRevisionForever(o.fs.opt.KeepRevisionForever).
Do()
return shouldRetry(err)
})
if err != nil {
@@ -2042,9 +2097,15 @@ func (o *Object) Remove() error {
info := drive.File{
Trashed: true,
}
_, err = o.fs.svc.Files.Update(o.id, &info).Fields("").SupportsTeamDrives(o.fs.isTeamDrive).Do()
_, err = o.fs.svc.Files.Update(o.id, &info).
Fields("").
SupportsTeamDrives(o.fs.isTeamDrive).
Do()
} else {
err = o.fs.svc.Files.Delete(o.id).Fields("").SupportsTeamDrives(o.fs.isTeamDrive).Do()
err = o.fs.svc.Files.Delete(o.id).
Fields("").
SupportsTeamDrives(o.fs.isTeamDrive).
Do()
}
return shouldRetry(err)
})

View File

@@ -345,7 +345,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
// try loading service account credentials from env variable, then from a file
if opt.ServiceAccountCredentials != "" && opt.ServiceAccountFile != "" {
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
if err != nil {
return nil, errors.Wrap(err, "error opening service account credentials file")

View File

@@ -2,7 +2,9 @@ package hubic
import (
"net/http"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/swift"
)
@@ -21,12 +23,17 @@ func newAuth(f *Fs) *auth {
// Request constructs a http.Request for authentication
//
// returns nil for not needed
func (a *auth) Request(*swift.Connection) (*http.Request, error) {
err := a.f.getCredentials()
if err != nil {
return nil, err
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
const retries = 10
for try := 1; try <= retries; try++ {
err = a.f.getCredentials()
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
}
return nil, nil
return nil, err
}
// Response parses the result of an http request

View File

@@ -60,13 +60,13 @@ func init() {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Options: append([]fs.Option{{
Name: config.ConfigClientID,
Help: "Hubic Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Hubic Client Secret\nLeave blank normally.",
}},
}}, swift.SharedOptions...),
})
}

View File

@@ -233,16 +233,17 @@ GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/.../<file>
// JottaFile represents a Jottacloud file
type JottaFile struct {
XMLName xml.Name
Name string `xml:"name,attr"`
Deleted Flag `xml:"deleted,attr"`
State string `xml:"currentRevision>state"`
CreatedAt Time `xml:"currentRevision>created"`
ModifiedAt Time `xml:"currentRevision>modified"`
Updated Time `xml:"currentRevision>updated"`
Size int64 `xml:"currentRevision>size"`
MimeType string `xml:"currentRevision>mime"`
MD5 string `xml:"currentRevision>md5"`
XMLName xml.Name
Name string `xml:"name,attr"`
Deleted Flag `xml:"deleted,attr"`
PublicSharePath string `xml:"publicSharePath"`
State string `xml:"currentRevision>state"`
CreatedAt Time `xml:"currentRevision>created"`
ModifiedAt Time `xml:"currentRevision>modified"`
Updated Time `xml:"currentRevision>updated"`
Size int64 `xml:"currentRevision>size"`
MimeType string `xml:"currentRevision>mime"`
MD5 string `xml:"currentRevision>md5"`
}
// Error is a custom Error for wrapping Jottacloud error responses

View File

@@ -25,6 +25,7 @@ import (
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
@@ -39,6 +40,7 @@ const (
defaultMountpoint = "Sync"
rootURL = "https://www.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com"
shareURL = "https://www.jottacloud.com/"
cachePrefix = "rclone-jcmd5-"
)
@@ -71,6 +73,16 @@ func init() {
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
Default: fs.SizeSuffix(10 * 1024 * 1024),
Advanced: true,
}, {
Name: "hard_delete",
Help: "Delete files permanently rather than putting them into the trash.",
Default: false,
Advanced: true,
}, {
Name: "unlink",
Help: "Remove existing public link to file/folder with link command rather than creating.",
Default: false,
Advanced: true,
}},
})
}
@@ -81,6 +93,8 @@ type Options struct {
Pass string `config:"pass"`
Mountpoint string `config:"mountpoint"`
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
HardDelete bool `config:"hard_delete"`
Unlink bool `config:"unlink"`
}
// Fs represents a remote jottacloud
@@ -227,9 +241,14 @@ func errorHandler(resp *http.Response) error {
return errResponse
}
// filePathRaw returns an unescaped file path (f.root, file)
func (f *Fs) filePathRaw(file string) string {
return path.Join(f.endpointURL, replaceReservedChars(path.Join(f.root, file)))
}
// filePath returns a escaped file path (f.root, file)
func (f *Fs) filePath(file string) string {
return rest.URLPathEscape(path.Join(f.endpointURL, replaceReservedChars(path.Join(f.root, file))))
return rest.URLPathEscape(f.filePathRaw(file))
}
// filePath returns a escaped file path (f.root, remote)
@@ -425,6 +444,102 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
return entries, nil
}
// listFileDirFn is called from listFileDir to handle an object.
type listFileDirFn func(fs.DirEntry) error
// List the objects and directories into entries, from a
// special kind of JottaFolder representing a FileDirLis
func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, fn listFileDirFn) error {
pathPrefix := "/" + f.filePathRaw("") // Non-escaped prefix of API paths to be cut off, to be left with the remote path including the remoteStartPath
pathPrefixLength := len(pathPrefix)
startPath := path.Join(pathPrefix, remoteStartPath) // Non-escaped API path up to and including remoteStartPath, to decide if it should be created as a new dir object
startPathLength := len(startPath)
for i := range startFolder.Folders {
folder := &startFolder.Folders[i]
if folder.Deleted {
return nil
}
folderPath := path.Join(folder.Path, folder.Name)
remoteDirLength := len(folderPath) - pathPrefixLength
var remoteDir string
if remoteDirLength > 0 {
remoteDir = restoreReservedChars(folderPath[pathPrefixLength+1:])
if remoteDirLength > startPathLength {
d := fs.NewDir(remoteDir, time.Time(folder.ModifiedAt))
err := fn(d)
if err != nil {
return err
}
}
}
for i := range folder.Files {
file := &folder.Files[i]
if file.Deleted || file.State != "COMPLETED" {
continue
}
remoteFile := path.Join(remoteDir, restoreReservedChars(file.Name))
o, err := f.newObjectWithInfo(remoteFile, file)
if err != nil {
return err
}
err = fn(o)
if err != nil {
return err
}
}
}
return nil
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
opts := rest.Opts{
Method: "GET",
Path: f.filePath(dir),
Parameters: url.Values{},
}
opts.Parameters.Set("mode", "list")
var resp *http.Response
var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(&opts, nil, &result)
return shouldRetry(resp, err)
})
if err != nil {
if apiErr, ok := err.(*api.Error); ok {
// does not exist
if apiErr.StatusCode == http.StatusNotFound {
return fs.ErrorDirNotFound
}
}
return errors.Wrap(err, "couldn't list files")
}
list := walk.NewListRHelper(callback)
err = f.listFileDir(dir, &result, func(entry fs.DirEntry) error {
return list.Add(entry)
})
if err != nil {
return err
}
return list.Flush()
}
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
//
@@ -498,7 +613,11 @@ func (f *Fs) purgeCheck(dir string, check bool) (err error) {
NoResponse: true,
}
opts.Parameters.Set("dlDir", "true")
if f.opt.HardDelete {
opts.Parameters.Set("rmDir", "true")
} else {
opts.Parameters.Set("dlDir", "true")
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
@@ -506,7 +625,7 @@ func (f *Fs) purgeCheck(dir string, check bool) (err error) {
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "rmdir failed")
return errors.Wrap(err, "couldn't purge directory")
}
// TODO: Parse response?
@@ -579,7 +698,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
info, err := f.copyOrMove("cp", srcObj.filePath(), remote)
if err != nil {
return nil, errors.Wrap(err, "copy failed")
return nil, errors.Wrap(err, "couldn't copy file")
}
return f.newObjectWithInfo(remote, info)
@@ -609,7 +728,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
info, err := f.copyOrMove("mv", srcObj.filePath(), remote)
if err != nil {
return nil, errors.Wrap(err, "move failed")
return nil, errors.Wrap(err, "couldn't move file")
}
return f.newObjectWithInfo(remote, info)
@@ -653,11 +772,57 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
_, err = f.copyOrMove("mvDir", path.Join(f.endpointURL, replaceReservedChars(srcPath))+"/", dstRemote)
if err != nil {
return errors.Wrap(err, "moveDir failed")
return errors.Wrap(err, "couldn't move directory")
}
return nil
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(remote string) (link string, err error) {
opts := rest.Opts{
Method: "GET",
Path: f.filePath(remote),
Parameters: url.Values{},
}
if f.opt.Unlink {
opts.Parameters.Set("mode", "disableShare")
} else {
opts.Parameters.Set("mode", "enableShare")
}
var resp *http.Response
var result api.JottaFile
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(&opts, nil, &result)
return shouldRetry(resp, err)
})
if apiErr, ok := err.(*api.Error); ok {
// does not exist
if apiErr.StatusCode == http.StatusNotFound {
return "", fs.ErrorObjectNotFound
}
}
if err != nil {
if f.opt.Unlink {
return "", errors.Wrap(err, "couldn't remove public link")
}
return "", errors.Wrap(err, "couldn't create public link")
}
if f.opt.Unlink {
if result.PublicSharePath != "" {
return "", errors.Errorf("couldn't remove public link - %q", result.PublicSharePath)
}
return "", nil
}
if result.PublicSharePath == "" {
return "", errors.New("couldn't create public link - no link path received")
}
link = path.Join(shareURL, result.PublicSharePath)
return link, nil
}
// About gets quota information
func (f *Fs) About() (*fs.Usage, error) {
info, err := f.getAccountInfo()
@@ -666,8 +831,11 @@ func (f *Fs) About() (*fs.Usage, error) {
}
usage := &fs.Usage{
Total: fs.NewUsageValue(info.Capacity),
Used: fs.NewUsageValue(info.Usage),
Used: fs.NewUsageValue(info.Usage),
}
if info.Capacity > 0 {
usage.Total = fs.NewUsageValue(info.Capacity)
usage.Free = fs.NewUsageValue(info.Capacity - info.Usage)
}
return usage, nil
}
@@ -914,7 +1082,11 @@ func (o *Object) Remove() error {
Parameters: url.Values{},
}
opts.Parameters.Set("dl", "true")
if o.fs.opt.HardDelete {
opts.Parameters.Set("rm", "true")
} else {
opts.Parameters.Set("dl", "true")
}
return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallXML(&opts, nil, nil)
@@ -924,12 +1096,14 @@ func (o *Object) Remove() error {
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
)

View File

@@ -16,8 +16,10 @@ import (
"unicode/utf8"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
@@ -280,6 +282,13 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
// Follow symlinks if required
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
fi, err = os.Stat(newPath)
if os.IsNotExist(err) {
// Skip bad symlinks
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
fs.Errorf(newRemote, "Listing error: %v", err)
accounting.Stats.Error(err)
continue
}
if err != nil {
return nil, err
}

View File

@@ -91,9 +91,10 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
// ItemReference groups data needed to reference a OneDrive item
// across the service into a single structure.
type ItemReference struct {
DriveID string `json:"driveId"` // Unique identifier for the Drive that contains the item. Read-only.
ID string `json:"id"` // Unique identifier for the item. Read/Write.
Path string `json:"path"` // Path that used to navigate to the item. Read/Write.
DriveID string `json:"driveId"` // Unique identifier for the Drive that contains the item. Read-only.
ID string `json:"id"` // Unique identifier for the item. Read/Write.
Path string `json:"path"` // Path that used to navigate to the item. Read/Write.
DriveType string `json:"driveType"` // Type of the drive, Read-Only
}
// RemoteItemFacet groups data needed to reference a OneDrive remote item
@@ -244,7 +245,6 @@ type MoveItemRequest struct {
// Copy Item
// Upload From URL
type AsyncOperationStatus struct {
Operation string `json:"operation"` // The type of job being run.
PercentageComplete float64 `json:"percentageComplete"` // An float value between 0 and 100 that indicates the percentage complete.
Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting"
}

View File

@@ -10,7 +10,6 @@ import (
"io"
"log"
"net/http"
"net/url"
"path"
"strings"
"time"
@@ -33,48 +32,32 @@ import (
)
const (
rclonePersonalClientID = "0000000044165769"
rclonePersonalEncryptedClientSecret = "ugVWLNhKkVT1-cbTRO-6z1MlzwdW6aMwpKgNaFG-qXjEn_WfDnG9TVyRA5yuoliU"
rcloneBusinessClientID = "52857fec-4bc2-483f-9f1b-5fe28e97532c"
rcloneBusinessEncryptedClientSecret = "6t4pC8l6L66SFYVIi8PgECDyjXy_ABo1nsTaE-Lr9LpzC6yT4vNOwHsakwwdEui0O6B0kX8_xbBLj91J"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
rootURLPersonal = "https://api.onedrive.com/v1.0/drive" // root URL for requests
discoveryServiceURL = "https://api.office.com/discovery/"
configResourceURL = "resource_url"
rcloneClientID = "b15665d9-eda6-4092-8539-0eec376afd59"
rcloneEncryptedClientSecret = "_JUdzh3LnKNqSPcf4Wu5fgMFIQOI8glZu_akYgR8yf6egowNBg-R"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
graphURL = "https://graph.microsoft.com/v1.0"
configDriveID = "drive_id"
configDriveType = "drive_type"
driveTypePersonal = "personal"
driveTypeBusiness = "business"
driveTypeSharepoint = "documentLibrary"
)
// Globals
var (
// Description of how to auth for this app for a personal account
oauthPersonalConfig = &oauth2.Config{
Scopes: []string{
"wl.signin", // Allow single sign-on capabilities
"wl.offline_access", // Allow receiving a refresh token
"onedrive.readwrite", // r/w perms to all of a user's OneDrive files
},
Endpoint: oauth2.Endpoint{
AuthURL: "https://login.live.com/oauth20_authorize.srf",
TokenURL: "https://login.live.com/oauth20_token.srf",
},
ClientID: rclonePersonalClientID,
ClientSecret: obscure.MustReveal(rclonePersonalEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
// Description of how to auth for this app for a business account
oauthBusinessConfig = &oauth2.Config{
oauthConfig = &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: "https://login.microsoftonline.com/common/oauth2/authorize",
TokenURL: "https://login.microsoftonline.com/common/oauth2/token",
AuthURL: "https://login.microsoftonline.com/common/oauth2/v2.0/authorize",
TokenURL: "https://login.microsoftonline.com/common/oauth2/v2.0/token",
},
ClientID: rcloneBusinessClientID,
ClientSecret: obscure.MustReveal(rcloneBusinessEncryptedClientSecret),
Scopes: []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
oauthBusinessResource = oauth2.SetAuthURLParam("resource", discoveryServiceURL)
sharedURL = "https://api.onedrive.com/v1.0/drives" // root URL for remote shared resources
)
// Register with Fs
@@ -84,147 +67,143 @@ func init() {
Description: "Microsoft OneDrive",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
// choose account type
fmt.Printf("Choose OneDrive account type?\n")
fmt.Printf(" * Say b for a OneDrive business account\n")
fmt.Printf(" * Say p for a personal OneDrive account\n")
isPersonal := config.Command([]string{"bBusiness", "pPersonal"}) == 'p'
err := oauthutil.Config("onedrive", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
return
}
if isPersonal {
// for personal accounts we don't safe a field about the account
err := oauthutil.Config("onedrive", name, m, oauthPersonalConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
} else {
err := oauthutil.ConfigErrorCheck("onedrive", name, m, func(req *http.Request) oauthutil.AuthError {
var resp oauthutil.AuthError
// Are we running headless?
if automatic, _ := m.Get(config.ConfigAutomatic); automatic != "" {
// Yes, okay we are done
return
}
resp.Name = req.URL.Query().Get("error")
resp.Code = strings.Split(req.URL.Query().Get("error_description"), ":")[0] // error_description begins with XXXXXXXXXXXX:
resp.Description = strings.Join(strings.Split(req.URL.Query().Get("error_description"), ":")[1:], ":")
resp.HelpURL = "https://rclone.org/onedrive/#troubleshooting"
return resp
}, oauthBusinessConfig, oauthBusinessResource)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
return
}
type driveResource struct {
DriveID string `json:"id"`
DriveName string `json:"name"`
DriveType string `json:"driveType"`
}
type drivesResponse struct {
Drives []driveResource `json:"value"`
}
// Are we running headless?
if automatic, _ := m.Get(config.ConfigAutomatic); automatic != "" {
// Yes, okay we are done
return
}
type siteResource struct {
SiteID string `json:"id"`
SiteName string `json:"displayName"`
SiteURL string `json:"webUrl"`
}
type siteResponse struct {
Sites []siteResource `json:"value"`
}
type serviceResource struct {
ServiceAPIVersion string `json:"serviceApiVersion"`
ServiceEndpointURI string `json:"serviceEndpointUri"`
ServiceResourceID string `json:"serviceResourceId"`
}
type serviceResponse struct {
Services []serviceResource `json:"value"`
}
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure OneDrive: %v", err)
}
srv := rest.NewClient(oAuthClient)
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthBusinessConfig)
if err != nil {
log.Fatalf("Failed to configure OneDrive: %v", err)
return
}
srv := rest.NewClient(oAuthClient)
var opts rest.Opts
var finalDriveID string
var siteID string
switch config.Choose("Your choice",
[]string{"onedrive", "sharepoint", "driveid", "siteid", "search"},
[]string{"OneDrive Personal or Business", "Root Sharepoint site", "Type in driveID", "Type in SiteID", "Search a Sharepoint site"},
false) {
opts := rest.Opts{
Method: "GET",
RootURL: discoveryServiceURL,
Path: "/v2.0/me/services",
}
services := serviceResponse{}
resp, err := srv.CallJSON(&opts, nil, &services)
if err != nil {
fs.Errorf(nil, "Failed to query available services: %v", err)
return
}
if resp.StatusCode != 200 {
fs.Errorf(nil, "Failed to query available services: Got HTTP error code %d", resp.StatusCode)
return
}
var resourcesURL []string
var resourcesID []string
for _, service := range services.Services {
if service.ServiceAPIVersion == "v2.0" {
resourcesID = append(resourcesID, service.ServiceResourceID)
resourcesURL = append(resourcesURL, service.ServiceEndpointURI)
}
// we only support 2.0 API
fs.Infof(nil, "Skipping API %s endpoint %s", service.ServiceAPIVersion, service.ServiceEndpointURI)
}
var foundService string
if len(resourcesID) == 0 {
fs.Errorf(nil, "No Service found")
return
} else if len(resourcesID) == 1 {
foundService = resourcesID[0]
} else {
foundService = config.Choose("Choose resource URL", resourcesID, resourcesURL, false)
}
m.Set(configResourceURL, foundService)
oauthBusinessResource = oauth2.SetAuthURLParam("resource", foundService)
// get the token from the inital config
// we need to update the token with a resource
// specific token we will query now
token, err := oauthutil.GetToken(name, m)
if err != nil {
fs.Errorf(nil, "Error while getting token: %s", err)
return
}
// values for the token query
values := url.Values{}
values.Set("refresh_token", token.RefreshToken)
values.Set("grant_type", "refresh_token")
values.Set("resource", foundService)
values.Set("client_id", oauthBusinessConfig.ClientID)
values.Set("client_secret", oauthBusinessConfig.ClientSecret)
case "onedrive":
opts = rest.Opts{
Method: "POST",
RootURL: oauthBusinessConfig.Endpoint.TokenURL,
ContentType: "application/x-www-form-urlencoded",
Body: strings.NewReader(values.Encode()),
Method: "GET",
RootURL: graphURL,
Path: "/me/drives",
}
case "sharepoint":
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/root/drives",
}
case "driveid":
fmt.Printf("Paste your Drive ID here> ")
finalDriveID = config.ReadLine()
case "siteid":
fmt.Printf("Paste your Site ID here> ")
siteID = config.ReadLine()
case "search":
fmt.Printf("What to search for> ")
searchTerm := config.ReadLine()
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites?search=" + searchTerm,
}
// tokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
// we are only interested in the new tokens, all other fields we don't care
type tokenJSON struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
}
jsonToken := tokenJSON{}
resp, err = srv.CallJSON(&opts, nil, &jsonToken)
sites := siteResponse{}
_, err := srv.CallJSON(&opts, nil, &sites)
if err != nil {
fs.Errorf(nil, "Failed to get resource token: %v", err)
return
}
if resp.StatusCode != 200 {
fs.Errorf(nil, "Failed to get resource token: Got HTTP error code %d", resp.StatusCode)
return
log.Fatalf("Failed to query available sites: %v", err)
}
// update the tokens
token.AccessToken = jsonToken.AccessToken
token.RefreshToken = jsonToken.RefreshToken
// finally save them in the config
err = oauthutil.PutToken(name, m, token, true)
if err != nil {
fs.Errorf(nil, "Error while setting token: %s", err)
if len(sites.Sites) == 0 {
log.Fatalf("Search for '%s' returned no results", searchTerm)
} else {
fmt.Printf("Found %d sites, please select the one you want to use:\n", len(sites.Sites))
for index, site := range sites.Sites {
fmt.Printf("%d: %s (%s) id=%s\n", index, site.SiteName, site.SiteURL, site.SiteID)
}
siteID = sites.Sites[config.ChooseNumber("Chose drive to use:", 0, len(sites.Sites)-1)].SiteID
}
}
// if we have a siteID we need to ask for the drives
if siteID != "" {
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/" + siteID + "/drives",
}
}
// We don't have the final ID yet?
// query Microsoft Graph
if finalDriveID == "" {
drives := drivesResponse{}
_, err := srv.CallJSON(&opts, nil, &drives)
if err != nil {
log.Fatalf("Failed to query available drives: %v", err)
}
if len(drives.Drives) == 0 {
log.Fatalf("No drives found")
} else {
fmt.Printf("Found %d drives, please select the one you want to use:\n", len(drives.Drives))
for index, drive := range drives.Drives {
fmt.Printf("%d: %s (%s) id=%s\n", index, drive.DriveName, drive.DriveType, drive.DriveID)
}
finalDriveID = drives.Drives[config.ChooseNumber("Chose drive to use:", 0, len(drives.Drives)-1)].DriveID
}
}
// Test the driveID and get drive type
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/drives/" + finalDriveID + "/root"}
var rootItem api.Item
_, err = srv.CallJSON(&opts, nil, &rootItem)
if err != nil {
log.Fatalf("Failed to query root for drive %s: %v", finalDriveID, err)
}
fmt.Printf("Found drive '%s' of type '%s', URL: %s\nIs that okay?\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)
// This does not work, YET :)
if !config.Confirm() {
log.Fatalf("Cancelled by user")
}
m.Set(configDriveID, finalDriveID)
m.Set(configDriveType, rootItem.ParentReference.DriveType)
config.SaveConfig()
},
Options: []fs.Option{{
Name: config.ConfigClientID,
@@ -237,14 +216,25 @@ func init() {
Help: "Chunk size to upload files with - must be multiple of 320k.",
Default: fs.SizeSuffix(10 * 1024 * 1024),
Advanced: true,
}, {
Name: "drive_id",
Help: "The ID of the drive to use",
Default: "",
Advanced: true,
}, {
Name: "drive_type",
Help: "The type of the drive ( personal | business | documentLibrary )",
Default: "",
Advanced: true,
}},
})
}
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
ResourceURL string `config:"resource_url"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DriveID string `config:"drive_id"`
DriveType string `config:"drive_type"`
}
// Fs represents a remote one drive
@@ -257,7 +247,8 @@ type Fs struct {
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
isBusiness bool // true if this is an OneDrive Business account
driveID string // ID to use for querying Microsoft Graph
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
}
// Object describes a one drive object
@@ -327,9 +318,17 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
var opts rest.Opts
if len(path) == 0 {
opts = rest.Opts{
Method: "GET",
Path: "/root",
}
} else {
opts = rest.Opts{
Method: "GET",
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
}
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, nil, &info)
@@ -364,23 +363,11 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if opt.ChunkSize%(320*1024) != 0 {
return nil, errors.Errorf("chunk size %d is not a multiple of 320k", opt.ChunkSize)
}
// if we have a resource URL it's a business account otherwise a personal one
isBusiness := opt.ResourceURL != ""
var rootURL string
var oauthConfig *oauth2.Config
if !isBusiness {
// personal account setup
oauthConfig = oauthPersonalConfig
rootURL = rootURLPersonal
} else {
// business account setup
oauthConfig = oauthBusinessConfig
rootURL = opt.ResourceURL + "_api/v2.0/drives/me"
sharedURL = opt.ResourceURL + "_api/v2.0/drives"
// update the URL in the AuthOptions
oauthBusinessResource = oauth2.SetAuthURLParam("resource", opt.ResourceURL)
if opt.DriveID == "" || opt.DriveType == "" {
log.Fatalf("Unable to get drive_id and drive_type. If you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend.")
}
root = parsePath(root)
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
@@ -388,19 +375,17 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
isBusiness: isBusiness,
name: name,
root: root,
opt: *opt,
driveID: opt.DriveID,
driveType: opt.DriveType,
srv: rest.NewClient(oAuthClient).SetRoot(graphURL + "/drives/" + opt.DriveID),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
}
f.features = (&fs.Features{
CaseInsensitive: true,
// OneDrive for business doesn't support mime types properly
// so we disable it until resolved
// https://github.com/OneDrive/onedrive-api-docs/issues/643
ReadMimeType: !f.isBusiness,
CaseInsensitive: true,
ReadMimeType: true,
CanHaveEmptyDirectories: true,
}).Fill(f)
f.srv.SetErrorHandler(errorHandler)
@@ -546,8 +531,7 @@ type listAllFn func(*api.Item) bool
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
// Top parameter asks for bigger pages of data
// https://dev.onedrive.com/odata/optional-query-parameters.htm
opts := newOptsCall(dirID, "GET", "/children?top=1000")
opts := newOptsCall(dirID, "GET", "/children?$top=1000")
OUTER:
for {
var result api.ListChildrenResponse
@@ -757,16 +741,11 @@ func (f *Fs) Precision() time.Duration {
func (f *Fs) waitForJob(location string, o *Object) error {
deadline := time.Now().Add(fs.Config.Timeout)
for time.Now().Before(deadline) {
opts := rest.Opts{
Method: "GET",
RootURL: location,
IgnoreStatus: true, // Ignore the http status response since it seems to return valid info on 500 errors
}
var resp *http.Response
var err error
var body []byte
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(&opts)
resp, err = http.Get(location)
if err != nil {
return fserrors.ShouldRetry(err), err
}
@@ -782,19 +761,18 @@ func (f *Fs) waitForJob(location string, o *Object) error {
if err != nil {
return errors.Wrapf(err, "async status result not JSON: %q", body)
}
// See if we decoded anything...
if !(status.Operation == "" && status.PercentageComplete == 0 && status.Status == "") {
if status.Status == "failed" || status.Status == "deleteFailed" {
return errors.Errorf("%s: async operation %q returned %q", o.remote, status.Operation, status.Status)
switch status.Status {
case "failed":
case "deleteFailed":
{
return errors.Errorf("%s: async operation returned %q", o.remote, status.Status)
}
} else if resp.StatusCode == 200 {
var info api.Item
err = json.Unmarshal(body, &info)
if err != nil {
return errors.Wrapf(err, "async item result not JSON: %q", body)
}
return o.setMetaData(&info)
case "completed":
err = o.readMetaData()
return errors.Wrapf(err, "async operation completed but readMetaData failed")
}
time.Sleep(1 * time.Second)
}
return errors.Errorf("async operation didn't complete after %v", fs.Config.Timeout)
@@ -833,7 +811,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
}
// Copy the object
opts := newOptsCall(srcObj.id, "POST", "/action.copy")
opts := newOptsCall(srcObj.id, "POST", "/copy")
opts.ExtraHeaders = map[string]string{"Prefer": "respond-async"}
opts.NoResponse = true
@@ -843,7 +821,8 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
copyReq := api.CopyItemRequest{
Name: &replacedLeaf,
ParentReference: api.ItemReference{
ID: id,
DriveID: f.driveID,
ID: id,
},
}
var resp *http.Response
@@ -1079,10 +1058,10 @@ func (f *Fs) About() (usage *fs.Usage, err error) {
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
if f.isBusiness {
return hash.Set(hash.QuickXorHash)
if f.driveType == driveTypePersonal {
return hash.Set(hash.SHA1)
}
return hash.Set(hash.SHA1)
return hash.Set(hash.QuickXorHash)
}
// ------------------------------------------------------------
@@ -1112,16 +1091,16 @@ func (o *Object) srvPath() string {
// Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(t hash.Type) (string, error) {
if o.fs.isBusiness {
if t != hash.QuickXorHash {
return "", hash.ErrUnsupported
if o.fs.driveType == driveTypePersonal {
if t == hash.SHA1 {
return o.sha1, nil
}
} else {
if t == hash.QuickXorHash {
return o.quickxorhash, nil
}
return o.quickxorhash, nil
}
if t != hash.SHA1 {
return "", hash.ErrUnsupported
}
return o.sha1, nil
return "", hash.ErrUnsupported
}
// Size returns the size of an object in bytes
@@ -1282,12 +1261,12 @@ func (o *Object) createUploadSession(modTime time.Time) (response *api.CreateUpl
opts = rest.Opts{
Method: "POST",
RootURL: rootURL,
Path: "/" + drive + "/items/" + id + ":/" + rest.URLPathEscape(leaf) + ":/upload.createSession",
Path: "/" + drive + "/items/" + id + ":/" + rest.URLPathEscape(replaceReservedChars(leaf)) + ":/createUploadSession",
}
} else {
opts = rest.Opts{
Method: "POST",
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/upload.createSession",
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/createUploadSession",
}
}
createRequest := api.CreateUploadRequest{}
@@ -1349,6 +1328,10 @@ func (o *Object) cancelUploadSession(url string) (err error) {
// uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
if size <= 0 {
panic("size passed into uploadMultipart must be > 0")
}
// Create upload session
fs.Debugf(o, "Starting multipart upload")
session, err := o.createUploadSession(modTime)
@@ -1389,8 +1372,14 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (i
return info, nil
}
// uploadSinglepart uploads a file as a single part
// Update the content of a remote file within 4MB size in one single request
// This function will set modtime after uploading, which will create a new version for the remote file
func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
panic("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
}
fs.Debugf(o, "Starting singlepart upload")
var resp *http.Response
var opts rest.Opts
_, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
@@ -1411,12 +1400,12 @@ func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (
Body: in,
}
}
// for go1.8 (see release notes) we must nil the Body if we want a
// "Content-Length: 0" header which onedrive requires for all files.
if size == 0 {
opts.Body = nil
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, nil, &info)
return shouldRetry(resp, err)
})
@@ -1443,15 +1432,17 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
modTime := src.ModTime()
var info *api.Item
if size <= 0 {
// This is for 0 length files, or files with an unknown size
if size > 0 {
info, err = o.uploadMultipart(in, size, modTime)
} else if size == 0 {
info, err = o.uploadSinglepart(in, size, modTime)
} else {
info, err = o.uploadMultipart(in, size, modTime)
panic("src file size must be >= 0")
}
if err != nil {
return err
}
return o.setMetaData(info)
}
@@ -1489,7 +1480,7 @@ func newOptsCall(id string, method string, route string) (opts rest.Opts) {
func parseDirID(ID string) (string, string, string) {
if strings.Index(ID, "#") >= 0 {
s := strings.Split(ID, "#")
return s[1], s[0], sharedURL
return s[1], s[0], graphURL + "/drives"
}
return ID, "", ""
}

View File

@@ -39,9 +39,11 @@ import (
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/ncw/swift"
"github.com/pkg/errors"
@@ -570,6 +572,7 @@ const (
maxRetries = 10 // number of retries to make of operations
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
)
// Options defines the configuration for this backend
@@ -604,6 +607,7 @@ type Fs struct {
bucketOKMu sync.Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket
bucketDeleted bool // true if we have deleted the bucket
pacer *pacer.Pacer // To pace the API calls
}
// Object describes a s3 object
@@ -649,6 +653,37 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// retryErrorCodes is a slice of error codes that we will retry
// See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
var retryErrorCodes = []int{
409, // Conflict - various states that could be resolved on a retry
503, // Service Unavailable/Slow Down - "Reduce your request rate"
}
//S3 is pretty resilient, and the built in retry handling is probably sufficient
// as it should notice closed connections and timeouts which are the most likely
// sort of failure modes
func shouldRetry(err error) (bool, error) {
// If this is an awserr object, try and extract more useful information to determine if we should retry
if awsError, ok := err.(awserr.Error); ok {
// Simple case, check the original embedded error in case it's generically retriable
if fserrors.ShouldRetry(awsError.OrigErr()) {
return true, err
}
//Failing that, if it's a RequestFailure it's probably got an http status code we can check
if reqErr, ok := err.(awserr.RequestFailure); ok {
for _, e := range retryErrorCodes {
if reqErr.StatusCode() == e {
return true, err
}
}
}
}
//Ok, not an awserr, check for generic failure conditions
return fserrors.ShouldRetry(err), err
}
// Pattern to match a s3 path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
@@ -774,6 +809,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
c: c,
bucket: bucket,
ses: ses,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.S3Pacer),
}
f.features = (&fs.Features{
ReadMimeType: true,
@@ -787,7 +823,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
Bucket: &f.bucket,
Key: &directory,
}
_, err = f.c.HeadObject(&req)
err = f.pacer.Call(func() (bool, error) {
_, err = f.c.HeadObject(&req)
return shouldRetry(err)
})
if err == nil {
f.root = path.Dir(directory)
if f.root == "." {
@@ -864,7 +903,12 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) error {
MaxKeys: &maxKeys,
Marker: marker,
}
resp, err := f.c.ListObjects(&req)
var resp *s3.ListObjectsOutput
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.ListObjects(&req)
return shouldRetry(err)
})
if err != nil {
if awsErr, ok := err.(awserr.RequestFailure); ok {
if awsErr.StatusCode() == http.StatusNotFound {
@@ -989,7 +1033,11 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
return nil, fs.ErrorListBucketRequired
}
req := s3.ListBucketsInput{}
resp, err := f.c.ListBuckets(&req)
var resp *s3.ListBucketsOutput
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.ListBuckets(&req)
return shouldRetry(err)
})
if err != nil {
return nil, err
}
@@ -1074,7 +1122,10 @@ func (f *Fs) dirExists() (bool, error) {
req := s3.HeadBucketInput{
Bucket: &f.bucket,
}
_, err := f.c.HeadBucket(&req)
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.HeadBucket(&req)
return shouldRetry(err)
})
if err == nil {
return true, nil
}
@@ -1111,7 +1162,10 @@ func (f *Fs) Mkdir(dir string) error {
LocationConstraint: &f.opt.LocationConstraint,
}
}
_, err := f.c.CreateBucket(&req)
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.CreateBucket(&req)
return shouldRetry(err)
})
if err, ok := err.(awserr.Error); ok {
if err.Code() == "BucketAlreadyOwnedByYou" {
err = nil
@@ -1136,7 +1190,10 @@ func (f *Fs) Rmdir(dir string) error {
req := s3.DeleteBucketInput{
Bucket: &f.bucket,
}
_, err := f.c.DeleteBucket(&req)
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.DeleteBucket(&req)
return shouldRetry(err)
})
if err == nil {
f.bucketOK = false
f.bucketDeleted = true
@@ -1183,7 +1240,10 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
CopySource: &source,
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
}
_, err = f.c.CopyObject(&req)
err = f.pacer.Call(func() (bool, error) {
_, err = f.c.CopyObject(&req)
return shouldRetry(err)
})
if err != nil {
return nil, err
}
@@ -1260,7 +1320,12 @@ func (o *Object) readMetaData() (err error) {
Bucket: &o.fs.bucket,
Key: &key,
}
resp, err := o.fs.c.HeadObject(&req)
var resp *s3.HeadObjectOutput
err = o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = o.fs.c.HeadObject(&req)
return shouldRetry(err)
})
if err != nil {
if awsErr, ok := err.(awserr.RequestFailure); ok {
if awsErr.StatusCode() == http.StatusNotFound {
@@ -1344,7 +1409,10 @@ func (o *Object) SetModTime(modTime time.Time) error {
Metadata: o.meta,
MetadataDirective: &directive,
}
_, err = o.fs.c.CopyObject(&req)
err = o.fs.pacer.Call(func() (bool, error) {
_, err := o.fs.c.CopyObject(&req)
return shouldRetry(err)
})
return err
}
@@ -1371,7 +1439,12 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
}
}
}
resp, err := o.fs.c.GetObject(&req)
var resp *s3.GetObjectOutput
err = o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = o.fs.c.GetObject(&req)
return shouldRetry(err)
})
if err, ok := err.(awserr.RequestFailure); ok {
if err.Code() == "InvalidObjectState" {
return nil, errors.Errorf("Object in GLACIER, restore first: %v", key)
@@ -1450,7 +1523,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
if o.fs.opt.StorageClass != "" {
req.StorageClass = &o.fs.opt.StorageClass
}
_, err = uploader.Upload(&req)
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
_, err = uploader.Upload(&req)
return shouldRetry(err)
})
if err != nil {
return err
}
@@ -1468,7 +1544,10 @@ func (o *Object) Remove() error {
Bucket: &o.fs.bucket,
Key: &key,
}
_, err := o.fs.c.DeleteObject(&req)
err := o.fs.pacer.Call(func() (bool, error) {
_, err := o.fs.c.DeleteObject(&req)
return shouldRetry(err)
})
return err
}

View File

@@ -31,13 +31,21 @@ const (
listChunks = 1000 // chunk size to read directory listings
)
// SharedOptions are shared between swift and hubic
var SharedOptions = []fs.Option{{
Name: "chunk_size",
Help: "Above this size files will be chunked into a _segments container.",
Default: fs.SizeSuffix(5 * 1024 * 1024 * 1024),
Advanced: true,
}}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "swift",
Description: "Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)",
NewFs: NewFs,
Options: []fs.Option{{
Options: append([]fs.Option{{
Name: "env_auth",
Help: "Get swift credentials from environment variables in standard OpenStack form.",
Default: false,
@@ -134,12 +142,7 @@ func init() {
Help: "OVH Public Cloud Archive",
Value: "pca",
}},
}, {
Name: "chunk_size",
Help: "Above this size files will be chunked into a _segments container.",
Default: fs.SizeSuffix(5 * 1024 * 1024 * 1024),
Advanced: true,
}},
}}, SharedOptions...),
})
}
@@ -291,7 +294,7 @@ func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
return c, nil
}
// NewFsWithConnection contstructs an Fs from the path, container:path
// NewFsWithConnection constructs an Fs from the path, container:path
// and authenticated connection.
//
// if noCheckContainer is set then the Fs won't check the container

227
backend/union/union.go Normal file
View File

@@ -0,0 +1,227 @@
package union
import (
"fmt"
"io"
"path"
"path/filepath"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/hash"
"github.com/pkg/errors"
)
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "union",
Description: "Builds a stackable unification remote, which can appear to merge the contents of several remotes",
NewFs: NewFs,
Options: []fs.Option{{
Name: "remotes",
Help: "List of space separated remotes.\nCan be 'remotea:test/dir remoteb:', '\"remotea:test/space dir\" remoteb:', etc.\nThe last remote is used to write to.",
Required: true,
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
Remotes fs.SpaceSepList `config:"remotes"`
}
// Fs represents a remote acd server
type Fs struct {
name string // name of this remote
features *fs.Features // optional features
opt Options // options for this Fs
root string // the path we are working on
remotes []fs.Fs // slice of remotes
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("union root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(dir string) error {
return f.remotes[len(f.remotes)-1].Rmdir(dir)
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
// This could probably be set if all remotes share the same hashing algorithm
return hash.Set(hash.None)
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(dir string) error {
return f.remotes[len(f.remotes)-1].Mkdir(dir)
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.remotes[len(f.remotes)-1].Put(in, src, options...)
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
set := make(map[string]fs.DirEntry)
found := false
for _, remote := range f.remotes {
var remoteEntries, err = remote.List(dir)
if err == fs.ErrorDirNotFound {
continue
}
if err != nil {
return nil, errors.Wrapf(err, "List failed on %v", remote)
}
found = true
for _, remoteEntry := range remoteEntries {
set[remoteEntry.Remote()] = remoteEntry
}
}
if !found {
return nil, fs.ErrorDirNotFound
}
for key := range set {
entries = append(entries, set[key])
}
return entries, nil
}
// NewObject creates a new remote union file object based on the first Object it finds (reverse remote order)
func (f *Fs) NewObject(path string) (fs.Object, error) {
for i := range f.remotes {
var remote = f.remotes[len(f.remotes)-i-1]
var obj, err = remote.NewObject(path)
if err == fs.ErrorObjectNotFound {
continue
}
if err != nil {
return nil, errors.Wrapf(err, "NewObject failed on %v", remote)
}
return obj, nil
}
return nil, fs.ErrorObjectNotFound
}
// Precision is the greatest Precision of all remotes
func (f *Fs) Precision() time.Duration {
var greatestPrecision time.Duration
for _, remote := range f.remotes {
if remote.Precision() > greatestPrecision {
greatestPrecision = remote.Precision()
}
}
return greatestPrecision
}
// NewFs constructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if len(opt.Remotes) == 0 {
return nil, errors.New("union can't point to an empty remote - check the value of the remotes setting")
}
if len(opt.Remotes) == 1 {
return nil, errors.New("union can't point to a single remote - check the value of the remotes setting")
}
for _, remote := range opt.Remotes {
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point union remote at itself - check the value of the remote setting")
}
}
var remotes []fs.Fs
for i := range opt.Remotes {
// Last remote first so we return the correct (last) matching fs in case of fs.ErrorIsFile
var remote = opt.Remotes[len(opt.Remotes)-i-1]
_, configName, fsPath, err := fs.ParseRemote(remote)
if err != nil {
return nil, err
}
var rootString = path.Join(fsPath, filepath.ToSlash(root))
if configName != "local" {
rootString = configName + ":" + rootString
}
myFs, err := fs.NewFs(rootString)
if err != nil {
if err == fs.ErrorIsFile {
return myFs, err
}
return nil, err
}
remotes = append(remotes, myFs)
}
// Reverse the remotes again so they are in the order as before
for i, j := 0, len(remotes)-1; i < j; i, j = i+1, j-1 {
remotes[i], remotes[j] = remotes[j], remotes[i]
}
f := &Fs{
name: name,
root: root,
opt: *opt,
remotes: remotes,
}
var features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,
ReadMimeType: true,
WriteMimeType: true,
CanHaveEmptyDirectories: true,
BucketBased: true,
}).Fill(f)
for _, remote := range f.remotes {
features = features.Mask(remote)
}
f.features = features
return f, nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
)

View File

@@ -0,0 +1,18 @@
// Test Union filesystem interface
package union_test
import (
"testing"
_ "github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestUnion:",
NilObject: nil,
SkipFsMatch: true,
})
}

View File

@@ -12,6 +12,9 @@ import (
const (
// Wed, 27 Sep 2017 14:28:34 GMT
timeFormat = time.RFC1123
// The same as time.RFC1123 with optional leading zeros on the date
// see https://github.com/ncw/rclone/issues/2574
noZerosRFC1123 = "Mon, _2 Jan 2006 15:04:05 MST"
)
// Multistatus contains responses returned from an HTTP 207 return code
@@ -138,9 +141,10 @@ func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
// Possible time formats to parse the time with
var timeFormats = []string{
timeFormat, // Wed, 27 Sep 2017 14:28:34 GMT (as per RFC)
time.RFC1123Z, // Fri, 05 Jan 2018 14:14:38 +0000 (as used by mydrive.ch)
time.UnixDate, // Wed May 17 15:31:58 UTC 2017 (as used in an internal server)
timeFormat, // Wed, 27 Sep 2017 14:28:34 GMT (as per RFC)
time.RFC1123Z, // Fri, 05 Jan 2018 14:14:38 +0000 (as used by mydrive.ch)
time.UnixDate, // Wed May 17 15:31:58 UTC 2017 (as used in an internal server)
noZerosRFC1123, // Fri, 7 Sep 2018 08:49:58 GMT (as used by server in #2574)
}
// UnmarshalXML turns XML into a Time

View File

@@ -0,0 +1,32 @@
package odrvcookie
import (
"time"
"github.com/ncw/rclone/lib/rest"
)
// CookieRenew holds information for the renew
type CookieRenew struct {
srv *rest.Client
timer *time.Ticker
renewFn func()
}
// NewRenew returns and starts a CookieRenew
func NewRenew(interval time.Duration, renewFn func()) *CookieRenew {
renew := CookieRenew{
timer: time.NewTicker(interval),
renewFn: renewFn,
}
go renew.Renew()
return &renew
}
// Renew calls the renewFn for every tick
func (c *CookieRenew) Renew() {
for {
<-c.timer.C // wait for tick
c.renewFn()
}
}

View File

@@ -372,6 +372,17 @@ func (f *Fs) setQuirks(vendor string) error {
if err != nil {
return err
}
odrvcookie.NewRenew(12*time.Hour, func() {
spCookies, err := spCk.Cookies()
if err != nil {
fs.Errorf("could not renew cookies: %s", err.Error())
return
}
f.srv.SetCookie(&spCookies.FedAuth, &spCookies.RtFa)
fs.Debugf(spCookies, "successfully renewed sharepoint cookies")
})
f.srv.SetCookie(&spCookies.FedAuth, &spCookies.RtFa)
// sharepoint, unlike the other vendors, only lists files if the depth header is set to 0

23
bin/bisect-go-rclone.sh Executable file
View File

@@ -0,0 +1,23 @@
#!/bin/bash
# An example script to run when bisecting go with git bisect -run when
# looking for an rclone regression
# Run this from the go root
set -e
# Compile the go version
cd src
./make.bash
# Make sure we are using it
source ~/bin/use-go1.11
go version
# Compile rclone
cd ~/go/src/github.com/ncw/rclone
make
# run the failing test
go run -race race.go

15
bin/bisect-rclone.sh Executable file
View File

@@ -0,0 +1,15 @@
#!/bin/bash
# Example script for git-bisect -run
# Run from the project root
set -e
# Compile
make
rclone version
# Test whatever it is that is going wrong
truncate -s 10M /tmp/10M
rclone delete azure:rclone-test1/10M || true
rclone --retries 1 copyto -vv /tmp/10M azure:rclone-test1/10M --azureblob-upload-cutoff 1M

View File

@@ -44,6 +44,7 @@ docs = [
"swift.md",
"pcloud.md",
"sftp.md",
"union.md",
"webdav.md",
"yandex.md",

17
bin/tidy-beta Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/sh
# Use this script after a release to tidy the betas
version="$1"
if [ "$version" = "" ]; then
echo "Syntax: $0 <version> [delete]"
exit 1
fi
dry_run="--dry-run"
if [ "$2" = "delete" ]; then
dry_run=""
else
echo "Running in --dry-run mode"
echo "Use '$0 $version delete' to actually delete files"
fi
rclone ${dry_run} --fast-list -P --checkers 16 --transfers 16 delete --include "**/${version}**" memstore:beta-rclone-org

View File

@@ -47,6 +47,7 @@ import (
_ "github.com/ncw/rclone/cmd/rmdir"
_ "github.com/ncw/rclone/cmd/rmdirs"
_ "github.com/ncw/rclone/cmd/serve"
_ "github.com/ncw/rclone/cmd/settier"
_ "github.com/ncw/rclone/cmd/sha1sum"
_ "github.com/ncw/rclone/cmd/size"
_ "github.com/ncw/rclone/cmd/sync"

View File

@@ -311,11 +311,11 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
}
break
}
if fserrors.IsFatalError(err) {
if fserrors.IsFatalError(err) || accounting.Stats.HadFatalError() {
fs.Errorf(nil, "Fatal error received - not attempting retries")
break
}
if fserrors.IsNoRetryError(err) {
if fserrors.IsNoRetryError(err) || (accounting.Stats.Errored() && !accounting.Stats.HadRetryError()) {
fs.Errorf(nil, "Can't retry this error - not attempting retries")
break
}

View File

@@ -34,7 +34,9 @@ without account.
if err != nil {
return err
}
fmt.Println(link)
if link != "" {
fmt.Println(link)
}
return nil
})
},

View File

@@ -6,7 +6,6 @@ package ncdu
import (
"fmt"
"log"
"path"
"sort"
"strings"
@@ -466,7 +465,7 @@ func NewUI(f fs.Fs) *UI {
func (u *UI) Show() error {
err := termbox.Init()
if err != nil {
log.Fatal(err)
return errors.Wrap(err, "termbox init")
}
defer termbox.Close()

View File

@@ -38,7 +38,7 @@ func startProgress() chan struct{} {
}
go func() {
progressInterval := defaultProgressInterval
if ShowStats() {
if ShowStats() && *statsInterval > 0 {
progressInterval = *statsInterval
}
ticker := time.NewTicker(progressInterval)

54
cmd/settier/settier.go Normal file
View File

@@ -0,0 +1,54 @@
package settier
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/operations"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "settier tier remote:path",
Short: `Changes storage class/tier of objects in remote.`,
Long: `
rclone settier changes storage tier or class at remote if supported.
Few cloud storage services provides different storage classes on objects,
for example AWS S3 and Glacier, Azure Blob storage - Hot, Cool and Archive,
Google Cloud Storage, Regional Storage, Nearline, Coldline etc.
Note that, certain tier chages make objects not available to access immediately.
For example tiering to archive in azure blob storage makes objects in frozen state,
user can restore by setting tier to Hot/Cool, similarly S3 to Glacier makes object
inaccessible.true
You can use it to tier single object
rclone settier Cool remote:path/file
Or use rclone filters to set tier on only specific files
rclone --include "*.txt" settier Hot remote:path/dir
Or just provide remote directory and all files in directory will be tiered
rclone settier tier remote:path/dir
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)
tier := args[0]
input := args[1:]
fsrc := cmd.NewFsSrc(input)
cmd.Run(false, false, command, func() error {
isSupported := fsrc.Features().SetTier
if !isSupported {
return errors.Errorf("Remote %s does not support settier", fsrc.Name())
}
return operations.SetTier(fsrc, tier)
})
},
}

View File

@@ -58,8 +58,9 @@ Features
* [Sync](/commands/rclone_sync/) (one way) mode to make a directory identical
* [Check](/commands/rclone_check/) mode to check for file hash equality
* Can sync to and from network, eg two different cloud accounts
* Optional encryption ([Crypt](/crypt/))
* Optional cache ([Cache](/cache/))
* ([Encryption](/crypt/)) backend
* ([Cache](/cache/)) backend
* ([Union](/union/)) backend
* Optional FUSE mount ([rclone mount](/commands/rclone_mount/))
Links

View File

@@ -171,7 +171,7 @@ Contributors
* themylogin <themylogin@gmail.com>
* Onno Zweers <onno.zweers@surfsara.nl>
* Jasper Lievisse Adriaanse <jasper@humppa.nl>
* sandeepkru <sandeep.ummadi@gmail.com>
* sandeepkru <sandeep.ummadi@gmail.com> <sandeepkru@users.noreply.github.com>
* HerrH <atomtigerzoo@users.noreply.github.com>
* Andrew <4030760+sparkyman215@users.noreply.github.com>
* dan smith <XX1011@gmail.com>
@@ -186,3 +186,13 @@ Contributors
* Alex Chen <Cnly@users.noreply.github.com>
* Denis <deniskovpen@gmail.com>
* bsteiss <35940619+bsteiss@users.noreply.github.com>
* Cédric Connes <cedric.connes@gmail.com>
* Dr. Tobias Quathamer <toddy15@users.noreply.github.com>
* dcpu <42736967+dcpu@users.noreply.github.com>
* Sheldon Rupp <me@shel.io>
* albertony <12441419+albertony@users.noreply.github.com>
* cron410 <cron410@gmail.com>
* Anagh Kumar Baranwal <anaghk.dos@gmail.com>
* Felix Brucker <felix@felixbrucker.com>
* Santiago Rodríguez <scollazo@users.noreply.github.com>
* Craig Miskell <craig.miskell@fluxfederation.com>

View File

@@ -184,6 +184,13 @@ Upload chunk size. Default 4MB. Note that this is stored in memory
and there may be up to `--transfers` chunks stored at once in memory.
This can be at most 100MB.
#### --azureblob-list-chunk=SIZE ####
List blob limit. Default is the maximum, 5000. `List blobs` requests
are permitted 2 minutes per megabyte to complete. If an operation is
taking longer than 2 minutes per megabyte on average, it will time out ( [source](https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-blob-service-operations#exceptions-to-default-timeout-interval) ). This limit the number of blobs items to return, to avoid the time out.
#### --azureblob-access-tier=Hot/Cool/Archive ####
Azure storage supports blob tiering, you can configure tier in advanced

View File

@@ -1,4 +1,4 @@
---
-----
title: "Cache"
description: "Rclone docs for cache remote"
date: "2017-09-03"
@@ -133,7 +133,8 @@ to the cloud provider without interrupting the reading (small blip can happen th
Files are uploaded in sequence and only one file is uploaded at a time.
Uploads will be stored in a queue and be processed based on the order they were added.
The queue and the temporary storage is persistent across restarts and even purges of the cache.
The queue and the temporary storage is persistent across restarts but
can be cleared on startup with the `--cache-db-purge` flag.
### Write Support ###
@@ -181,6 +182,28 @@ and password) in your remote and it will be automatically enabled.
Affected settings:
- `cache-workers`: _Configured value_ during confirmed playback or _1_ all the other times
##### Certificate Validation #####
When the Plex server is configured to only accept secure connections, it is
possible to use `.plex.direct` URL's to ensure certificate validation succeeds.
These URL's are used by Plex internally to connect to the Plex server securely.
The format for this URL's is the following:
https://ip-with-dots-replaced.server-hash.plex.direct:32400/
The `ip-with-dots-replaced` part can be any IPv4 address, where the dots
have been replaced with dashes, e.g. `127.0.0.1` becomes `127-0-0-1`.
To get the `server-hash` part, the easiest way is to visit
https://plex.tv/api/resources?includeHttps=1&X-Plex-Token=your-plex-token
This page will list all the available Plex servers for your account
with at least one `.plex.direct` link for each. Copy one URL and replace
the IP address with the desired address. This can be used as the
`plex_url` value.
### Known issues ###
#### Mount and --dir-cache-time ####
@@ -242,6 +265,19 @@ which makes it think we're downloading the full file instead of small chunks.
Organizing the remotes in this order yelds better results:
<span style="color:green">**cloud remote** -> **cache** -> **crypt**</span>
#### absolute remote paths ####
`cache` can not differentiate between relative and absolute paths for the wrapped remote.
Any path given in the `remote` config setting and on the command line will be passed to
the wrapped remote as is, but for storing the chunks on disk the path will be made
relative by removing any leading `/` character.
This behavior is irrelevant for most backend types, but there are backends where a leading `/`
changes the effective directory, e.g. in the `sftp` backend paths starting with a `/` are
relative to the root of the SSH server and paths without are relative to the user home directory.
As a result `sftp:bin` and `sftp:/bin` will share the same cache folder, even if they represent
a different directory on the SSH server.
### Cache and Remote Control (--rc) ###
Cache supports the new `--rc` mode in rclone and can be remote controlled through the following end points:
By default, the listener is disabled if you do not add the flag.
@@ -281,7 +317,7 @@ then `--cache-chunk-path` will use the same path as `--cache-db-path`.
#### --cache-db-purge ####
Flag to clear all the cached data for this remote before.
Flag to clear all the cached data for this remote on start.
**Default**: not set
@@ -292,7 +328,7 @@ connections. If the chunk size is changed, any downloaded chunks will be invalid
**Default**: 5M
#### --cache-total-chunk-size=SIZE ####
#### --cache-chunk-total-size=SIZE ####
The total size that the chunks can take up on the local disk. If `cache`
exceeds this value then it will start to the delete the oldest chunks until
@@ -303,7 +339,7 @@ it goes under this value.
#### --cache-chunk-clean-interval=DURATION ####
How often should `cache` perform cleanups of the chunk storage. The default value
should be ok for most people. If you find that `cache` goes over `cache-total-chunk-size`
should be ok for most people. If you find that `cache` goes over `cache-chunk-total-size`
too often then try to lower this value to force it to perform cleanups more often.
**Default**: 1m

View File

@@ -6,7 +6,21 @@ date: "2018-09-01"
# Changelog
## v1.42 - 2018-09-01
## v1.43.1 - 2018-09-07
Point release to fix hubic and azureblob backends.
* Bug Fixes
* ncdu: Return error instead of log.Fatal in Show (Fabian Möller)
* cmd: Fix crash with --progress and --stats 0 (Nick Craig-Wood)
* docs: Tidy website display (Anagh Kumar Baranwal)
* Azure Blob:
* Fix multi-part uploads. (sandeepkru)
* Hubic
* Fix uploads (Nick Craig-Wood)
* Retry auth fetching if it fails to make hubic more reliable (Nick Craig-Wood)
## v1.43 - 2018-09-01
* New backends
* Jottacloud (Sebastian Bünger)

View File

@@ -92,7 +92,7 @@ systems are a long way from 100% reliable. The rclone sync/copy
commands cope with this with lots of retries. However rclone mount
can't use retries in the same way without making local copies of the
uploads. Look at the **EXPERIMENTAL** [file caching](#file-caching)
for solutions to make mount mount more reliable.
for solutions to make mount more reliable.
### Attribute caching

View File

@@ -42,6 +42,7 @@ See the following for detailed instructions for
* [Pcloud](/pcloud/)
* [QingStor](/qingstor/)
* [SFTP](/sftp/)
* [Union](/union/)
* [WebDAV](/webdav/)
* [Yandex Disk](/yandex/)
* [The local filesystem](/local/)
@@ -549,6 +550,10 @@ Note that if you are using the `logrotate` program to manage rclone's
logs, then you should use the `copytruncate` option as rclone doesn't
have a signal to rotate logs.
### --log-format LIST ###
Comma separated list of log format options. `date`, `time`, `microseconds`, `longfile`, `shortfile`, `UTC`. The default is "`date`,`time`".
### --log-level LEVEL ###
This sets the log level for rclone. The default log level is `NOTICE`.
@@ -661,7 +666,7 @@ files if they are incorrect as it would normally.
This can be used if the remote is being synced with another tool also
(eg the Google Drive client).
### --P, --progress ###
### -P, --progress ###
This flag makes rclone update the stats in a static block in the
terminal providing a realtime overview of the transfer.

View File

@@ -81,6 +81,15 @@ To copy a local directory to an Jottacloud directory called backup
rclone copy /home/source remote:backup
### --fast-list ###
This remote supports `--fast-list` which allows you to use fewer
transactions in exchange for more memory. See the [rclone
docs](/docs/#fast-list) for more details.
Note that the implementation in Jottacloud always uses only a single
API request to get the entire list, so for large folders this could
lead to long wait time before the first results are shown.
### Modified time and hashes ###
@@ -99,12 +108,22 @@ the `--jottacloud-md5-memory-limit` flag.
### Deleting files ###
Any files you delete with rclone will end up in the trash. Due to a lack of API documentation emptying the trash is currently only possible via the Jottacloud website.
By default rclone will send all files to the trash when deleting files.
Due to a lack of API documentation emptying the trash is currently
only possible via the Jottacloud website. If deleting permanently
is required then use the `--jottacloud-hard-delete` flag,
or set the equivalent environment variable.
### Versions ###
Jottacloud supports file versioning. When rclone uploads a new version of a file it creates a new version of it. Currently rclone only supports retrieving the current version but older versions can be accessed via the Jottacloud Website.
### Quota information ###
To view your current quota you can use the `rclone about remote:`
command which will display your usage limit (unless it is unlimited)
and the current usage.
### Limitations ###
Note that Jottacloud is case insensitive so you can't have a file called
@@ -124,6 +143,17 @@ system.
Files bigger than this will be cached on disk to calculate the MD5 if
required. (default 10M)
#### --jottacloud-hard-delete ####
Controls whether files are sent to the trash or deleted
permanently. Defaults to false, namely sending files to the trash.
Use `--jottacloud-hard-delete=true` to delete files permanently instead.
#### --jottacloud-unlink ####
Set to true to make the link command remove existing public link to file/folder.
Default is false, meaning link command will create or retrieve public link.
### Troubleshooting ###
Jottacloud exhibits some inconsistent behaviours regarding deleted files and folders which may cause Copy, Move and DirMove operations to previously deleted paths to fail. Emptying the trash should help in such cases.

View File

@@ -22,51 +22,36 @@ Here is an example of how to make a remote called `remote`. First run:
This will guide you through an interactive setup process:
```
No remotes found - make a new one
e) Edit existing remote
n) New remote
d) Delete remote
r) Rename remote
c) Copy remote
s) Set configuration password
n/s> n
q) Quit config
e/n/d/r/c/s/q> n
name> remote
Type of storage to configure.
Enter a string value. Press Enter for the default ("").
Choose a number from below, or type in your own value
1 / Amazon Drive
\ "amazon cloud drive"
2 / Amazon S3 (also Dreamhost, Ceph, Minio)
\ "s3"
3 / Backblaze B2
\ "b2"
4 / Dropbox
\ "dropbox"
5 / Encrypt/Decrypt a remote
\ "crypt"
6 / Google Cloud Storage (this is not Google Drive)
\ "google cloud storage"
7 / Google Drive
\ "drive"
8 / Hubic
\ "hubic"
9 / Local Disk
\ "local"
10 / Microsoft OneDrive
...
17 / Microsoft OneDrive
\ "onedrive"
11 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
\ "swift"
12 / SSH/SFTP Connection
\ "sftp"
13 / Yandex Disk
\ "yandex"
Storage> 10
Microsoft App Client Id - leave blank normally.
...
Storage> 17
Microsoft App Client Id
Leave blank normally.
Enter a string value. Press Enter for the default ("").
client_id>
Microsoft App Client Secret - leave blank normally.
Microsoft App Client Secret
Leave blank normally.
Enter a string value. Press Enter for the default ("").
client_secret>
Edit advanced config? (y/n)
y) Yes
n) No
y/n> n
Remote config
Choose OneDrive account type?
* Say b for a OneDrive business account
* Say p for a personal OneDrive account
b) Business
p) Personal
b/p> p
Use auto config?
* Say Y if not sure
* Say N if you are working on a remote or headless machine
@@ -77,11 +62,32 @@ If your browser doesn't open automatically go to the following link: http://127.
Log in and authorize rclone for access
Waiting for code...
Got code
Choose a number from below, or type in an existing value
1 / OneDrive Personal or Business
\ "onedrive"
2 / Sharepoint site
\ "sharepoint"
3 / Type in driveID
\ "driveid"
4 / Type in SiteID
\ "siteid"
5 / Search a Sharepoint site
\ "search"
Your choice> 1
Found 1 drives, please select the one you want to use:
0: OneDrive (business) id=b!Eqwertyuiopasdfghjklzxcvbnm-7mnbvcxzlkjhgfdsapoiuytrewqk
Chose drive to use:> 0
Found drive 'root' of type 'business', URL: https://org-my.sharepoint.com/personal/you/Documents
Is that okay?
y) Yes
n) No
y/n> y
--------------------
[remote]
client_id =
client_secret =
token = {"access_token":"XXXXXX"}
type = onedrive
token = {"access_token":"youraccesstoken","token_type":"Bearer","refresh_token":"yourrefreshtoken","expiry":"2018-08-26T22:39:52.486512262+08:00"}
drive_id = b!Eqwertyuiopasdfghjklzxcvbnm-7mnbvcxzlkjhgfdsapoiuytrewqk
drive_type = business
--------------------
y) Yes this is OK
e) Edit this remote
@@ -112,20 +118,23 @@ To copy a local directory to an OneDrive directory called backup
rclone copy /home/source remote:backup
### OneDrive for Business ###
### Getting your own Client ID and Key ###
There is additional support for OneDrive for Business.
Select "b" when ask
```
Choose OneDrive account type?
* Say b for a OneDrive business account
* Say p for a personal OneDrive account
b) Business
p) Personal
b/p>
```
After that rclone requires an authentication of your account. The application will first authenticate your account, then query the OneDrive resource URL
and do a second (silent) authentication for this resource URL.
rclone uses a pair of Client ID and Key shared by all rclone users when performing requests by default.
If you are having problems with them (E.g., seeing a lot of throttling), you can get your own
Client ID and Key by following the steps below:
1. Open https://apps.dev.microsoft.com/#/appList, then click `Add an app` (Choose `Converged applications` if applicable)
2. Enter a name for your app, and click continue. Copy and keep the `Application Id` under the app name for later use.
3. Under section `Application Secrets`, click `Generate New Password`. Copy and keep that password for later use.
4. Under section `Platforms`, click `Add platform`, then `Web`. Enter `http://localhost:53682/` in
`Redirect URLs`.
5. Under section `Microsoft Graph Permissions`, `Add` these `delegated permissions`:
`Files.Read`, `Files.ReadWrite`, `Files.Read.All`, `Files.ReadWrite.All`, `offline_access`, `User.Read`.
6. Scroll to the bottom and click `Save`.
Now the application is complete. Run `rclone config` to create or edit a OneDrive remote.
Supply the app ID and password as Client ID and Secret, respectively. rclone will walk you through the remaining steps.
### Modified time and hashes ###

View File

@@ -131,14 +131,14 @@ operations more efficient.
| Amazon Drive | Yes | No | Yes | Yes | No [#575](https://github.com/ncw/rclone/issues/575) | No | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | No |
| Amazon S3 | No | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/ncw/rclone/issues/2178) | No |
| Backblaze B2 | No | No | No | No | Yes | Yes | Yes | No [#2178](https://github.com/ncw/rclone/issues/2178) | No |
| Box | Yes | Yes | Yes | Yes | No [#575](https://github.com/ncw/rclone/issues/575) | No | Yes | No [#2178](https://github.com/ncw/rclone/issues/2178) | No |
| Box | Yes | Yes | Yes | Yes | No [#575](https://github.com/ncw/rclone/issues/575) | No | Yes | Yes | No |
| Dropbox | Yes | Yes | Yes | Yes | No [#575](https://github.com/ncw/rclone/issues/575) | No | Yes | Yes | Yes |
| FTP | No | No | Yes | Yes | No | No | Yes | No [#2178](https://github.com/ncw/rclone/issues/2178) | No |
| Google Cloud Storage | Yes | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/ncw/rclone/issues/2178) | No |
| Google Drive | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes |
| HTTP | No | No | No | No | No | No | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | No |
| Hubic | Yes † | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes |
| Jottacloud | Yes | Yes | Yes | Yes | No | No | No | No | No |
| Jottacloud | Yes | Yes | Yes | Yes | No | Yes | No | Yes | No |
| Mega | Yes | No | Yes | Yes | No | No | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes |
| Microsoft Azure Blob Storage | Yes | Yes | No | No | No | Yes | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | No |
| Microsoft OneDrive | Yes | Yes | Yes | Yes | No [#575](https://github.com/ncw/rclone/issues/575) | No | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes |

144
docs/content/union.md Normal file
View File

@@ -0,0 +1,144 @@
---
title: "Union"
description: "Remote Unification"
date: "2018-08-29"
---
<i class="fa fa-link"></i> Union
-----------------------------------------
The `union` remote provides a unification similar to UnionFS using other remotes.
Paths may be as deep as required or a local path,
eg `remote:directory/subdirectory` or `/directory/subdirectory`.
During the initial setup with `rclone config` you will specify the target
remotes as a space separated list. The target remotes can either be a local paths or other remotes.
The order of the remotes is important as it defines which remotes take precedence over others if there are files with the same name in the same logical path.
The last remote is the topmost remote and replaces files with the same name from previous remotes.
Only the last remote is used to write to and delete from, all other remotes are read-only.
Subfolders can be used in target remote. Asume a union remote named `backup`
with the remotes `mydrive:private/backup mydrive2:/backup`. Invoking `rclone mkdir backup:desktop`
is exactly the same as invoking `rclone mkdir mydrive2:/backup/desktop`.
There will be no special handling of paths containing `..` segments.
Invoking `rclone mkdir backup:../desktop` is exactly the same as invoking
`rclone mkdir mydrive2:/backup/../desktop`.
Here is an example of how to make a union called `remote` for local folders.
First run:
rclone config
This will guide you through an interactive setup process:
```
No remotes found - make a new one
n) New remote
s) Set configuration password
q) Quit config
n/s/q> n
name> remote
Type of storage to configure.
Choose a number from below, or type in your own value
1 / Alias for a existing remote
\ "alias"
2 / Amazon Drive
\ "amazon cloud drive"
3 / Amazon S3 Compliant Storage Providers (AWS, Ceph, Dreamhost, IBM COS, Minio)
\ "s3"
4 / Backblaze B2
\ "b2"
5 / Box
\ "box"
6 / Builds a stackable unification remote, which can appear to merge the contents of several remotes
\ "union"
7 / Cache a remote
\ "cache"
8 / Dropbox
\ "dropbox"
9 / Encrypt/Decrypt a remote
\ "crypt"
10 / FTP Connection
\ "ftp"
11 / Google Cloud Storage (this is not Google Drive)
\ "google cloud storage"
12 / Google Drive
\ "drive"
13 / Hubic
\ "hubic"
14 / JottaCloud
\ "jottacloud"
15 / Local Disk
\ "local"
16 / Mega
\ "mega"
17 / Microsoft Azure Blob Storage
\ "azureblob"
18 / Microsoft OneDrive
\ "onedrive"
19 / OpenDrive
\ "opendrive"
20 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
\ "swift"
21 / Pcloud
\ "pcloud"
22 / QingCloud Object Storage
\ "qingstor"
23 / SSH/SFTP Connection
\ "sftp"
24 / Webdav
\ "webdav"
25 / Yandex Disk
\ "yandex"
26 / http Connection
\ "http"
Storage> union
List of space separated remotes.
Can be 'remotea:test/dir remoteb:', '"remotea:test/space dir" remoteb:', etc.
The last remote is used to write to.
Enter a string value. Press Enter for the default ("").
remotes>
Remote config
--------------------
[remote]
type = union
remotes = C:\dir1 C:\dir2 C:\dir3
--------------------
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
Current remotes:
Name Type
==== ====
remote union
e) Edit existing remote
n) New remote
d) Delete remote
r) Rename remote
c) Copy remote
s) Set configuration password
q) Quit config
e/n/d/r/c/s/q> q
```
Once configured you can then use `rclone` like this,
List directories in top level in `C:\dir1`, `C:\dir2` and `C:\dir3`
rclone lsd remote:
List all the files in `C:\dir1`, `C:\dir2` and `C:\dir3`
rclone ls remote:
Copy another local directory to the union directory called source, which will be placed into `C:\dir3`
rclone copy C:\source remote:source

View File

@@ -3,7 +3,7 @@
{{ template "chrome/navbar.html" . }}
<div class="container">
<div class="row">
<div class="col-md-9">
<div class="col-md-10">
<div class="well well-sm">
<h3>{{ .Title }}<br> <small>{{ .Description }}</small></h3>
<hr>
@@ -12,10 +12,10 @@
</div>
<!-- Sidebar -->
<div class="col-md-3">
<div class="col-md-2">
{{ template "chrome/menu.html" . }}
</div>
</div>
{{ template "chrome/footer.copyright.html" . }}
</div>
{{ template "chrome/footer.html" . }}
{{ template "chrome/footer.html" . }}

View File

@@ -1,5 +1,5 @@
<div class="panel panel-default">
<div class="panel-heading" style="padding: 2px 15px;">
<div class="panel-heading" style="padding: 2px 10px;">
<h4>Share and Enjoy.</h4>
</div>
<div class="panel-body">

View File

@@ -14,7 +14,7 @@
<li><a href="/downloads/"><i class="fa fa-cloud-download"></i> Downloads</a></li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><b class="caret"></b> Docs</a>
<ul class="dropdown-menu">
<ul class="dropdown-menu pre-scrollable">
<li><a href="/install/"><i class="fa fa-book"></i> Installation</a></li>
<li><a href="/docs/"><i class="fa fa-book"></i> Usage</a></li>
<li><a href="/filtering/"><i class="fa fa-book"></i> Filtering</a></li>
@@ -30,7 +30,7 @@
</li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><b class="caret"></b> Commands</a>
<ul class="dropdown-menu">
<ul class="dropdown-menu pre-scrollable">
<li><a href="/commands/rclone_config/"><i class="fa fa-book"></i> rclone config</a></li>
<li><a href="/commands/rclone_copy/"><i class="fa fa-book"></i> rclone copy</a></li>
<li><a href="/commands/rclone_sync/"><i class="fa fa-book"></i> rclone sync</a></li>
@@ -52,7 +52,7 @@
</li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><b class="caret"></b> Storage Systems</a>
<ul class="dropdown-menu">
<ul class="dropdown-menu pre-scrollable">
<li><a href="/overview/"><i class="fa fa-archive"></i> Overview</a></li>
<li><a href="/amazonclouddrive/"><i class="fa fa-amazon"></i> Amazon Drive</a></li>
<li><a href="/s3/"><i class="fa fa-amazon"></i> Amazon S3</a></li>
@@ -75,6 +75,7 @@
<li><a href="/swift/"><i class="fa fa-space-shuttle"></i> Openstack Swift</a></li>
<li><a href="/pcloud/"><i class="fa fa-cloud"></i> pCloud</a></li>
<li><a href="/sftp/"><i class="fa fa-server"></i> SFTP</a></li>
<li><a href="/union/"><i class="fa fa-link"></i> Union (merge backends)</a></li>
<li><a href="/webdav/"><i class="fa fa-server"></i> WebDAV</a></li>
<li><a href="/yandex/"><i class="fa fa-space-shuttle"></i> Yandex Disk</a></li>
<li><a href="/local/"><i class="fa fa-file"></i> The local filesystem</a></li>

View File

@@ -3,14 +3,14 @@
{{ template "chrome/navbar.html" . }}
<div class="container">
<div class="row">
<div class="col-md-9">
<div class="col-md-10">
{{ range $key, $value := .Site.Taxonomies.groups.about.Pages }}
{{ $value.Content }}
{{ end }}
</div>
<!-- Sidebar -->
<div class="col-md-3">
<div class="col-md-2">
{{ template "chrome/menu.html" . }}
</div>
</div>

View File

@@ -3,7 +3,7 @@
{{ template "chrome/navbar.html" . }}
<div class="container">
<div class="row">
<div class="col-md-9">
<div class="col-md-10">
<div class="well well-sm">
<strong>Items in category <code>{{ .Title | lower }}</code></strong>
<ul class="list-unstyled">
@@ -15,10 +15,10 @@
</div>
<!-- Sidebar -->
<div class="col-md-3">
<div class="col-md-2">
{{ template "chrome/menu.html" . }}
</div>
</div>
{{ template "chrome/footer.copyright.html" . }}
</div>
{{ template "chrome/footer.html" . }}
{{ template "chrome/footer.html" . }}

View File

@@ -3,7 +3,7 @@
{{ template "chrome/navbar.html" . }}
<div class="container">
<div class="row">
<div class="col-md-9">
<div class="col-md-10">
<div class="well well-sm">
<strong>Blog Post Archive</strong>
<ul class="list-unstyled">
@@ -15,10 +15,10 @@
</div>
<!-- Sidebar -->
<div class="col-md-3">
<div class="col-md-2">
{{ template "chrome/menu.html" . }}
</div>
</div>
{{ template "chrome/footer.copyright.html" . }}
</div>
{{ template "chrome/footer.html" . }}
{{ template "chrome/footer.html" . }}

View File

@@ -3,7 +3,7 @@
{{ template "chrome/navbar.html" . }}
<div class="container">
<div class="row">
<div class="col-md-9">
<div class="col-md-10">
<div class="well well-sm">
<strong>Items with tag <code>{{ .Title | lower }}</code></strong>
<ul class="list-unstyled">
@@ -15,10 +15,10 @@
</div>
<!-- Sidebar -->
<div class="col-md-3">
<div class="col-md-2">
{{ template "chrome/menu.html" . }}
</div>
</div>
{{ template "chrome/footer.copyright.html" . }}
</div>
{{ template "chrome/footer.html" . }}
{{ template "chrome/footer.html" . }}

View File

@@ -3,12 +3,12 @@
{{ template "chrome/navbar.html" . }}
<div class="container">
<div class="row">
<div class="col-md-9">
<div class="col-md-10">
{{ .Content }}
</div>
<!-- Sidebar -->
<div class="col-md-3">
<div class="col-md-2">
{{ template "chrome/menu.html" . }}
</div>
</div>

View File

@@ -3,7 +3,7 @@
{{ template "chrome/navbar.html" . }}
<div class="container">
<div class="row">
<div class="col-md-9">
<div class="col-md-10">
<div class="well well-sm">
<h3>{{ .Title }}<br> <small>{{ .Description }}</small></h3>
<hr>
@@ -12,7 +12,7 @@
</div>
<!-- Sidebar -->
<div class="col-md-3">
<div class="col-md-2">
<div class="well well-sm"> <!-- Post-specific stats -->
<h4>{{ .Date.Format "January 2, 2006" }}<br>
<small>{{ .WordCount }} words</small></h4>
@@ -32,4 +32,4 @@
</div>
{{ template "chrome/footer.copyright.html" . }}
</div>
{{ template "chrome/footer.html" . }}
{{ template "chrome/footer.html" . }}

View File

@@ -3,7 +3,7 @@
{{ template "chrome/navbar.html" . }}
<div class="container">
<div class="row">
<div class="col-md-9">
<div class="col-md-10">
<div class="well well-sm">
<h3>{{ .Title }}<br> <small>{{ .Description }}</small></h3>
<hr>
@@ -24,7 +24,7 @@
</div>
<!-- Sidebar -->
<div class="col-md-3">
<div class="col-md-2">
{{ template "chrome/menu.html" . }}
</div>
</div>

View File

@@ -25,6 +25,12 @@ tbody tr:nth-child(odd) {
background-color:#d0d0ff
}
pre code {
/* Preserve whitespace. Wrap text only at line breaks. */
/* Avoids auto-wrapping the code lines. */
white-space: pre
}
/* Hover over links on headers */
.header-link {
position: absolute;

View File

@@ -8,6 +8,7 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/rc"
)
@@ -36,6 +37,8 @@ Returns the following values:
"speed": average speed in bytes/sec since start of the process,
"bytes": total transferred bytes since the start of the process,
"errors": number of errors,
"fatalError": whether there has been at least one FatalError,
"retryError": whether there has been at least one non-NoRetryError,
"checks": number of checked files,
"transfers": number of transferred files,
"deletes" : number of deleted files,
@@ -69,6 +72,8 @@ type StatsInfo struct {
bytes int64
errors int64
lastError error
fatalError bool
retryError bool
checks int64
checking *stringSet
checkQueue int
@@ -107,6 +112,8 @@ func (s *StatsInfo) RemoteStats(in rc.Params) (out rc.Params, err error) {
out["speed"] = speed
out["bytes"] = s.bytes
out["errors"] = s.errors
out["fatalError"] = s.fatalError
out["retryError"] = s.retryError
out["checks"] = s.checks
out["transfers"] = s.transfers
out["deletes"] = s.deletes
@@ -234,13 +241,23 @@ func (s *StatsInfo) String() string {
)
if !fs.Config.StatsOneLine {
errorDetails := ""
switch {
case s.fatalError:
errorDetails = " (fatal error encountered)"
case s.retryError:
errorDetails = " (retrying may help)"
case s.errors != 0:
errorDetails = " (no need to retry)"
}
_, _ = fmt.Fprintf(buf, `
Errors: %10d
Errors: %10d%s
Checks: %10d / %d, %s
Transferred: %10d / %d, %s
Elapsed time: %10v
`,
s.errors,
s.errors, errorDetails,
s.checks, totalChecks, percent(s.checks, totalChecks),
s.transfers, totalTransfer, percent(s.transfers, totalTransfer),
dtRounded)
@@ -303,6 +320,34 @@ func (s *StatsInfo) GetLastError() error {
return s.lastError
}
// FatalError sets the fatalError flag
func (s *StatsInfo) FatalError() {
s.mu.Lock()
defer s.mu.Unlock()
s.fatalError = true
}
// HadFatalError returns whether there has been at least one FatalError
func (s *StatsInfo) HadFatalError() bool {
s.mu.RLock()
defer s.mu.RUnlock()
return s.fatalError
}
// RetryError sets the retryError flag
func (s *StatsInfo) RetryError() {
s.mu.Lock()
defer s.mu.Unlock()
s.retryError = true
}
// HadRetryError returns whether there has been at least one non-NoRetryError
func (s *StatsInfo) HadRetryError() bool {
s.mu.RLock()
defer s.mu.RUnlock()
return s.retryError
}
// Deletes updates the stats for deletes
func (s *StatsInfo) Deletes(deletes int64) int64 {
s.mu.Lock()
@@ -311,22 +356,28 @@ func (s *StatsInfo) Deletes(deletes int64) int64 {
return s.deletes
}
// ResetCounters sets the counters (bytes, checks, errors, transfers) to 0
// ResetCounters sets the counters (bytes, checks, errors, transfers, deletes) to 0 and resets lastError, fatalError and retryError
func (s *StatsInfo) ResetCounters() {
s.mu.Lock()
defer s.mu.Unlock()
s.bytes = 0
s.errors = 0
s.lastError = nil
s.fatalError = false
s.retryError = false
s.checks = 0
s.transfers = 0
s.deletes = 0
}
// ResetErrors sets the errors count to 0
// ResetErrors sets the errors count to 0 and resets lastError, fatalError and retryError
func (s *StatsInfo) ResetErrors() {
s.mu.Lock()
defer s.mu.Unlock()
s.errors = 0
s.lastError = nil
s.fatalError = false
s.retryError = false
}
// Errored returns whether there have been any errors
@@ -336,12 +387,18 @@ func (s *StatsInfo) Errored() bool {
return s.errors != 0
}
// Error adds a single error into the stats and assigns lastError
// Error adds a single error into the stats, assigns lastError and eventually sets fatalError or retryError
func (s *StatsInfo) Error(err error) {
s.mu.Lock()
defer s.mu.Unlock()
s.errors++
s.lastError = err
switch {
case fserrors.IsFatalError(err):
s.fatalError = true
case !fserrors.IsNoRetryError(err):
s.retryError = true
}
}
// Checking adds a check into the stats

View File

@@ -115,29 +115,31 @@ func makeConfigPath() string {
homedir = os.Getenv("HOME")
}
// Possibly find the user's XDG config paths
// Find user's configuration directory.
// Prefer XDG config path, with fallback to $HOME/.config.
// See XDG Base Directory specification
// https://specifications.freedesktop.org/basedir-spec/latest/
// https://specifications.freedesktop.org/basedir-spec/latest/),
xdgdir := os.Getenv("XDG_CONFIG_HOME")
var xdgcfgdir string
var cfgdir string
if xdgdir != "" {
xdgcfgdir = filepath.Join(xdgdir, "rclone")
// User's configuration directory for rclone is $XDG_CONFIG_HOME/rclone
cfgdir = filepath.Join(xdgdir, "rclone")
} else if homedir != "" {
xdgdir = filepath.Join(homedir, ".config")
xdgcfgdir = filepath.Join(xdgdir, "rclone")
// User's configuration directory for rclone is $HOME/.config/rclone
cfgdir = filepath.Join(homedir, ".config", "rclone")
}
// Use $XDG_CONFIG_HOME/rclone/rclone.conf if already existing
var xdgconf string
if xdgcfgdir != "" {
xdgconf = filepath.Join(xdgcfgdir, configFileName)
_, err := os.Stat(xdgconf)
// Use rclone.conf from user's configuration directory if already existing
var cfgpath string
if cfgdir != "" {
cfgpath = filepath.Join(cfgdir, configFileName)
_, err := os.Stat(cfgpath)
if err == nil {
return xdgconf
return cfgpath
}
}
// Use $HOME/.rclone.conf if already existing
// Use .rclone.conf from user's home directory if already existing
var homeconf string
if homedir != "" {
homeconf = filepath.Join(homedir, hiddenConfigFileName)
@@ -147,32 +149,39 @@ func makeConfigPath() string {
}
}
// Try to create $XDG_CONFIG_HOME/rclone/rclone.conf
if xdgconf != "" {
// xdgconf != "" implies xdgcfgdir != ""
err := os.MkdirAll(xdgcfgdir, os.ModePerm)
if err == nil {
return xdgconf
}
}
// Try to create $HOME/.rclone.conf
if homeconf != "" {
return homeconf
}
// Check to see if user supplied a --config variable or environment
// variable. We can't use pflag for this because it isn't initialised
// yet so we search the command line manually.
_, configSupplied := os.LookupEnv("RCLONE_CONFIG")
for _, item := range os.Args {
if item == "--config" || strings.HasPrefix(item, "--config=") {
configSupplied = true
break
if !configSupplied {
for _, item := range os.Args {
if item == "--config" || strings.HasPrefix(item, "--config=") {
configSupplied = true
break
}
}
}
// Default to ./.rclone.conf (current working directory)
// If user's configuration directory was found, then try to create it
// and assume rclone.conf can be written there. If user supplied config
// then skip creating the directory since it will not be used.
if cfgpath != "" {
// cfgpath != "" implies cfgdir != ""
if configSupplied {
return cfgpath
}
err := os.MkdirAll(cfgdir, os.ModePerm)
if err == nil {
return cfgpath
}
}
// Assume .rclone.conf can be written to user's home directory.
if homeconf != "" {
return homeconf
}
// Default to ./.rclone.conf (current working directory) if everything else fails.
if !configSupplied {
fs.Errorf(nil, "Couldn't find home directory or read HOME or XDG_CONFIG_HOME environment variables.")
fs.Errorf(nil, "Defaulting to storing config in current directory.")

View File

@@ -328,6 +328,25 @@ type ObjectUnWrapper interface {
UnWrap() Object
}
// SetTierer is an optional interface for Object
type SetTierer interface {
// SetTier performs changing storage tier of the Object if
// multiple storage classes supported
SetTier(tier string) error
}
// GetTierer is an optional interface for Object
type GetTierer interface {
// GetTier returns storage tier or class of the Object
GetTier() string
}
// ListTierer is an optional interface for Object
type ListTierer interface {
// ListTiers returns list of supported storage tiers
ListTiers() []string
}
// ListRCallback defines a callback function for ListR to use
//
// It is called for each tranche of entries read from the listing and
@@ -365,6 +384,9 @@ type Features struct {
WriteMimeType bool // can set the mime type of objects
CanHaveEmptyDirectories bool // can have empty directories
BucketBased bool // is bucket based (like s3, swift etc)
SetTier bool // allows set tier functionality on objects
GetTier bool // allows to retrieve storage tier of objects
ListTiers bool // allows to list supported tiers on objects
// Purge all files in the root and the root directory
//
@@ -583,6 +605,10 @@ func (ft *Features) Mask(f Fs) *Features {
ft.WriteMimeType = ft.WriteMimeType && mask.WriteMimeType
ft.CanHaveEmptyDirectories = ft.CanHaveEmptyDirectories && mask.CanHaveEmptyDirectories
ft.BucketBased = ft.BucketBased && mask.BucketBased
ft.SetTier = ft.SetTier && mask.SetTier
ft.GetTier = ft.GetTier && mask.GetTier
ft.ListTiers = ft.ListTiers && mask.ListTiers
if mask.Purge == nil {
ft.Purge = nil
}

View File

@@ -70,7 +70,7 @@ func (l *LogLevel) Type() string {
// LogPrint sends the text to the logger of level
var LogPrint = func(level LogLevel, text string) {
text = fmt.Sprintf("%-6s: %s", level, text)
log.Print(text)
_ = log.Output(4, text)
}
// LogPrintf produces a log string from the arguments passed in

View File

@@ -16,6 +16,7 @@ import (
// Flags
var (
logFile = flags.StringP("log-file", "", "", "Log everything to this file")
logFormat = flags.StringP("log-format", "", "date,time", "Comma separated list of log format options")
useSyslog = flags.BoolP("syslog", "", false, "Use Syslog for logging")
syslogFacility = flags.StringP("syslog-facility", "", "DAEMON", "Facility for syslog, eg KERN,USER,...")
)
@@ -66,6 +67,28 @@ func Trace(o interface{}, format string, a ...interface{}) func(string, ...inter
// InitLogging start the logging as per the command line flags
func InitLogging() {
flagsStr := "," + *logFormat + ","
var flags int
if strings.Contains(flagsStr, ",date,") {
flags |= log.Ldate
}
if strings.Contains(flagsStr, ",time,") {
flags |= log.Ltime
}
if strings.Contains(flagsStr, ",microseconds,") {
flags |= log.Lmicroseconds
}
if strings.Contains(flagsStr, ",longfile,") {
flags |= log.Llongfile
}
if strings.Contains(flagsStr, ",shortfile,") {
flags |= log.Lshortfile
}
if strings.Contains(flagsStr, ",UTC,") {
flags |= log.LUTC
}
log.SetFlags(flags)
// Log file output
if *logFile != "" {
f, err := os.OpenFile(*logFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)

View File

@@ -1407,6 +1407,21 @@ func CopyFile(fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (e
return moveOrCopyFile(fdst, fsrc, dstFileName, srcFileName, true)
}
// SetTier changes tier of object in remote
func SetTier(fsrc fs.Fs, tier string) error {
return ListFn(fsrc, func(o fs.Object) {
objImpl, ok := o.(fs.SetTierer)
if !ok {
fs.Errorf(fsrc, "Remote object does not implement SetTier")
return
}
err := objImpl.SetTier(tier)
if err != nil {
fs.Errorf(fsrc, "Failed to do SetTier, %v", err)
}
})
}
// ListFormat defines files information print format
type ListFormat struct {
separator string

View File

@@ -1,4 +1,4 @@
package fs
// Version of rclone
var Version = "v1.43"
var Version = "v1.43-DEV"

View File

@@ -134,11 +134,12 @@ type ExtraConfigItem struct{ Name, Key, Value string }
// Opt is options for Run
type Opt struct {
RemoteName string
NilObject fs.Object
ExtraConfig []ExtraConfigItem
// SkipBadWindowsCharacters skips unusable characters for windows if set
SkipBadWindowsCharacters bool
RemoteName string
NilObject fs.Object
ExtraConfig []ExtraConfigItem
SkipBadWindowsCharacters bool // skips unusable characters for windows if set
SkipFsMatch bool // if set skip exact matching of Fs value
}
// Run runs the basic integration tests for a remote using the remote
@@ -199,6 +200,14 @@ func Run(t *testing.T, opt *Opt) {
}
}
// Skip if remote is not SetTier capable
skipIfNotSetTier := func(t *testing.T) {
skipIfNotOk(t)
if remote.Features().SetTier == false {
t.Skip("FS has no SetTier interface")
}
}
// TestInit tests basic intitialisation
t.Run("TestInit", func(t *testing.T) {
var err error
@@ -815,13 +824,20 @@ func Run(t *testing.T, opt *Opt) {
skipIfNotOk(t)
obj := findObject(t, remote, file1.Path)
assert.Equal(t, file1.Path, obj.String())
assert.Equal(t, "<nil>", opt.NilObject.String())
if opt.NilObject != nil {
assert.Equal(t, "<nil>", opt.NilObject.String())
}
})
// TestObjectFs tests the object can be found
t.Run("TestObjectFs", func(t *testing.T) {
skipIfNotOk(t)
obj := findObject(t, remote, file1.Path)
// If this is set we don't do the direct comparison of
// the Fs from the object as it may be different
if opt.SkipFsMatch {
return
}
testRemote := remote
if obj.Fs() != testRemote {
// Check to see if this wraps something else
@@ -1051,6 +1067,25 @@ func Run(t *testing.T, opt *Opt) {
require.NotEqual(t, "", link4, "Link should not be empty")
})
// TestSetTier tests SetTier and GetTier functionality
t.Run("TestSetTier", func(t *testing.T) {
skipIfNotSetTier(t)
obj := findObject(t, remote, file1.Path)
lister, ok := obj.(fs.ListTierer)
assert.NotNil(t, ok)
supportedTiers := lister.ListTiers()
setter, ok := obj.(fs.SetTierer)
assert.NotNil(t, ok)
getter, ok := obj.(fs.GetTierer)
assert.NotNil(t, ok)
for _, tier := range supportedTiers {
err := setter.SetTier(tier)
assert.Nil(t, err)
got := getter.GetTier()
assert.Equal(t, tier, got)
}
})
// TestObjectRemove tests Remove
t.Run("TestObjectRemove", func(t *testing.T) {
skipIfNotOk(t)
@@ -1122,6 +1157,16 @@ func Run(t *testing.T, opt *Opt) {
assert.NotEqual(t, int64(0), usage.Total)
})
// TestInternal calls InternalTest() on the Fs
t.Run("TestInternal", func(t *testing.T) {
skipIfNotOk(t)
if it, ok := remote.(InternalTester); ok {
it.InternalTest(t)
} else {
t.Skipf("%T does not implement InternalTester", remote)
}
})
// TestObjectPurge tests Purge
t.Run("TestObjectPurge", func(t *testing.T) {
skipIfNotOk(t)
@@ -1134,16 +1179,6 @@ func Run(t *testing.T, opt *Opt) {
assert.Error(t, err, "Expecting error after on second purge")
})
// TestInternal calls InternalTest() on the Fs
t.Run("TestInternal", func(t *testing.T) {
skipIfNotOk(t)
if it, ok := remote.(InternalTester); ok {
it.InternalTest(t)
} else {
t.Skipf("%T does not implement InternalTester", remote)
}
})
// TestFinalise tidies up after the previous tests
t.Run("TestFinalise", func(t *testing.T) {
skipIfNotOk(t)

View File

@@ -75,7 +75,7 @@ var (
{
Name: "TestJottacloud:",
SubDir: false,
FastList: false,
FastList: true,
},
{
Name: "TestOneDrive:",
@@ -147,6 +147,11 @@ var (
SubDir: false,
FastList: false,
},
{
Name: "TestUnion:",
SubDir: false,
FastList: false,
},
}
// Flags
maxTries = flag.Int("maxtries", 5, "Number of times to try each test")

6
go.mod
View File

@@ -4,7 +4,7 @@ require (
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669
cloud.google.com/go v0.23.0 // indirect
github.com/Azure/azure-pipeline-go v0.0.0-20180607212504-7571e8eb0876 // indirect
github.com/Azure/azure-storage-blob-go v0.0.0-20180712005634-eaae161d9d5e
github.com/Azure/azure-storage-blob-go v0.0.0-20180906215025-bb46532f68b7
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78
github.com/Unknwon/goconfig v0.0.0-20180308125533-ef1e4c783f8f
github.com/a8m/tree v0.0.0-20180321023834-3cf936ce15d6
@@ -17,6 +17,7 @@ require (
github.com/djherbis/times v1.0.1
github.com/dropbox/dropbox-sdk-go-unofficial v4.1.0+incompatible
github.com/go-ini/ini v1.37.0 // indirect
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7 // indirect
github.com/golang/protobuf v1.1.0 // indirect
github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 // indirect
github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c // indirect
@@ -25,6 +26,7 @@ require (
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 // indirect
github.com/jtolds/gls v4.2.1+incompatible // indirect
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 // indirect
github.com/kisielk/errcheck v1.1.0 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/mattn/go-runewidth v0.0.2 // indirect
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2
@@ -49,12 +51,14 @@ require (
github.com/xanzy/ssh-agent v0.2.0
github.com/yunify/qingstor-sdk-go v2.2.14+incompatible
golang.org/x/crypto v0.0.0-20180617042118-027cca12c2d6
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7 // indirect
golang.org/x/net v0.0.0-20180611182652-db08ff08e862
golang.org/x/oauth2 v0.0.0-20180603041954-1e0a3fa8ba9a
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect
golang.org/x/sys v0.0.0-20180616030259-6c888cc515d3
golang.org/x/text v0.3.0
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2
golang.org/x/tools v0.0.0-20180904205237-0aa4b8830f48 // indirect
google.golang.org/api v0.0.0-20180614000435-2eea9ba0a3d9
google.golang.org/appengine v1.1.0 // indirect
gopkg.in/ini.v1 v1.38.2 // indirect

13
go.sum
View File

@@ -6,6 +6,8 @@ github.com/Azure/azure-pipeline-go v0.0.0-20180607212504-7571e8eb0876 h1:3c3mGlh
github.com/Azure/azure-pipeline-go v0.0.0-20180607212504-7571e8eb0876/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
github.com/Azure/azure-storage-blob-go v0.0.0-20180712005634-eaae161d9d5e h1:Ix5oKbq0MlolI+T4EPCL9sddfEw6LgRMpC+qx0Kz5/E=
github.com/Azure/azure-storage-blob-go v0.0.0-20180712005634-eaae161d9d5e/go.mod h1:x2mtS6O3mnMEZOJp7d7oldh8IvatBrMfReiyQ+cKgKY=
github.com/Azure/azure-storage-blob-go v0.0.0-20180906215025-bb46532f68b7 h1:/dp3cg/uwEn3nCIvapCpvwMLMCW34Z9U/UOeTroS6p0=
github.com/Azure/azure-storage-blob-go v0.0.0-20180906215025-bb46532f68b7/go.mod h1:x2mtS6O3mnMEZOJp7d7oldh8IvatBrMfReiyQ+cKgKY=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Unknwon/goconfig v0.0.0-20180308125533-ef1e4c783f8f h1:2h0zBHX3qNDltgnb2FiMJWYTcqJVDdUeHQU2/5CTc5w=
@@ -30,6 +32,8 @@ github.com/dropbox/dropbox-sdk-go-unofficial v4.1.0+incompatible h1:ZFvUIiBbGhDY
github.com/dropbox/dropbox-sdk-go-unofficial v4.1.0+incompatible/go.mod h1:lr+LhMM3F6Y3lW1T9j2U5l7QeuWm87N9+PPXo3yH4qY=
github.com/go-ini/ini v1.37.0 h1:/FpMfveJbc7ExTTDgT5nL9Vw+aZdst/c2dOxC931U+M=
github.com/go-ini/ini v1.37.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7 h1:2hRPrmiwPrp3fQX967rNJIhQPtiGXdlQWAxKbKw3VHA=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/protobuf v1.1.0 h1:0iH4Ffd/meGoXqF2lSAhZHt8X+cPgkfn/cb6Cce5Vpc=
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 h1:zLTLjkaOFEFIOxY5BWLFLwh+cL8vOBW4XJ2aqLE/Tf0=
@@ -46,6 +50,10 @@ github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpR
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 h1:PJPDf8OUfOK1bb/NeTKd4f1QXZItOX389VN3B6qC8ro=
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
github.com/kisielk/errcheck v1.1.0 h1:ZqfnKyx9KGpRcW04j5nnPDgRgoXUeLh2YFBeFzphcA0=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/mattn/go-runewidth v0.0.2 h1:UnlwIPBGaTZfPQ6T1IGzPI0EkYAQmT9fAEJ/poFC63o=
@@ -94,6 +102,8 @@ github.com/yunify/qingstor-sdk-go v2.2.14+incompatible h1:FKJYNihc8AXfaSm1R8Rpco
github.com/yunify/qingstor-sdk-go v2.2.14+incompatible/go.mod h1:w6wqLDQ5bBTzxGJ55581UrSwLrsTAsdo9N6yX/8d9RY=
golang.org/x/crypto v0.0.0-20180617042118-027cca12c2d6 h1:Y9MTpro8EV2sz/pZRxSgNsvSfMXLmIHhQO4BGv2My/Q=
golang.org/x/crypto v0.0.0-20180617042118-027cca12c2d6/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7 h1:00BeQWmeaGazuOrq8Q5K5d3/cHaGuFrZzpaHBXfrsUA=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/net v0.0.0-20180611182652-db08ff08e862 h1:JZi6BqOZ+iSgmLWe6llhGrNnEnK+YB/MRkStwnEfbqM=
golang.org/x/net v0.0.0-20180611182652-db08ff08e862/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/oauth2 v0.0.0-20180603041954-1e0a3fa8ba9a h1:1Fy38jwe/QZhQfFQBy6dMj9F/WU1C+jo3/zLNr/WhW4=
@@ -106,6 +116,9 @@ golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180904205237-0aa4b8830f48 h1:PIz+xUHW4G/jqfFWeKhQ96ZV/t2HDsXfWj923rV0bZY=
golang.org/x/tools v0.0.0-20180904205237-0aa4b8830f48/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
google.golang.org/api v0.0.0-20180614000435-2eea9ba0a3d9 h1:HcrS+AkAU4PJZAVkIazs99Rc+swHNe1NnV4Gm3RSDCo=
google.golang.org/api v0.0.0-20180614000435-2eea9ba0a3d9/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=

View File

@@ -59,6 +59,12 @@ const (
//
// See https://developers.google.com/drive/v2/web/handle-errors#exponential-backoff
GoogleDrivePacer
// S3Pacer is a specialised pacer for S3
//
// It is basically the defaultPacer, but allows the sleep time to go to 0
// when things are going well.
S3Pacer
)
// Paced is a function which is called by the Call and CallNoRetry
@@ -185,6 +191,8 @@ func (p *Pacer) SetPacer(t Type) *Pacer {
p.calculatePace = p.acdPacer
case GoogleDrivePacer:
p.calculatePace = p.drivePacer
case S3Pacer:
p.calculatePace = p.s3Pacer
default:
p.calculatePace = p.defaultPacer
}
@@ -309,6 +317,46 @@ func (p *Pacer) drivePacer(retry bool) {
}
}
// s3Pacer implements a pacer compatible with our expectations of S3, where it tries to not
// delay at all between successful calls, but backs off in the default fashion in response
// to any errors.
// The assumption is that errors should be exceedingly rare (S3 seems to have largely solved
// the sort of scability questions rclone is likely to run into), and in the happy case
// it can handle calls with no delays between them.
//
// Basically defaultPacer, but with some handling of sleepTime going to/from 0ms
// Ignores minSleep entirely
//
// Call with p.mu held
func (p *Pacer) s3Pacer(retry bool) {
oldSleepTime := p.sleepTime
if retry {
if p.attackConstant == 0 {
p.sleepTime = p.maxSleep
} else {
if p.sleepTime == 0 {
p.sleepTime = p.minSleep
} else {
p.sleepTime = (p.sleepTime << p.attackConstant) / ((1 << p.attackConstant) - 1)
}
}
if p.sleepTime > p.maxSleep {
p.sleepTime = p.maxSleep
}
if p.sleepTime != oldSleepTime {
fs.Debugf("pacer", "Rate limited, increasing sleep to %v", p.sleepTime)
}
} else {
p.sleepTime = (p.sleepTime<<p.decayConstant - p.sleepTime) >> p.decayConstant
if p.sleepTime < p.minSleep {
p.sleepTime = 0
}
if p.sleepTime != oldSleepTime {
fs.Debugf("pacer", "Reducing sleep to %v", p.sleepTime)
}
}
}
// endCall implements the pacing algorithm
//
// This should calculate a new sleepTime. It takes a boolean as to

View File

@@ -340,6 +340,32 @@ func TestGoogleDrivePacer(t *testing.T) {
}
}
func TestS3Pacer(t *testing.T) {
p := New().SetMinSleep(10 * time.Millisecond).SetPacer(S3Pacer).SetMaxSleep(time.Second).SetDecayConstant(2)
for _, test := range []struct {
in time.Duration
retry bool
want time.Duration
}{
{0, true, 10 * time.Millisecond}, //Things were going ok, we failed once, back off to minSleep
{10 * time.Millisecond, true, 20 * time.Millisecond}, //Another fail, double the backoff
{10 * time.Millisecond, false, 0}, //Things start going ok when we're at minSleep; should result in no sleep
{12 * time.Millisecond, false, 0}, //*near* minsleep and going ok, decay would take below minSleep, should go to 0
{0, false, 0}, //Things have been going ok; not retrying should keep sleep at 0
{time.Second, true, time.Second}, //Check maxSleep is enforced
{(3 * time.Second) / 4, true, time.Second}, //Check attack heading to maxSleep doesn't exceed maxSleep
{time.Second, false, 750 * time.Millisecond}, //Check decay from maxSleep
{48 * time.Millisecond, false, 36 * time.Millisecond}, //Check simple decay above minSleep
} {
p.sleepTime = test.in
p.s3Pacer(test.retry)
got := p.sleepTime
if got != test.want {
t.Errorf("bad sleep for %v with retry %v: want %v got %v", test.in, test.retry, test.want, got)
}
}
}
func TestEndCall(t *testing.T) {
p := New().SetMaxConnections(5)
emptyTokens(p)

View File

@@ -4,8 +4,8 @@ import (
"time"
)
// HTTPAccessConditions identifies standard HTTP access conditions which you optionally set.
type HTTPAccessConditions struct {
// ModifiedAccessConditions identifies standard HTTP access conditions which you optionally set.
type ModifiedAccessConditions struct {
IfModifiedSince time.Time
IfUnmodifiedSince time.Time
IfMatch ETag
@@ -13,7 +13,7 @@ type HTTPAccessConditions struct {
}
// pointers is for internal infrastructure. It returns the fields as pointers.
func (ac HTTPAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *ETag, inme *ETag) {
func (ac ModifiedAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *ETag, inme *ETag) {
if !ac.IfModifiedSince.IsZero() {
ims = &ac.IfModifiedSince
}
@@ -31,16 +31,14 @@ func (ac HTTPAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *
// ContainerAccessConditions identifies container-specific access conditions which you optionally set.
type ContainerAccessConditions struct {
HTTPAccessConditions
ModifiedAccessConditions
LeaseAccessConditions
}
// BlobAccessConditions identifies blob-specific access conditions which you optionally set.
type BlobAccessConditions struct {
HTTPAccessConditions
ModifiedAccessConditions
LeaseAccessConditions
AppendBlobAccessConditions
PageBlobAccessConditions
}
// LeaseAccessConditions identifies lease access conditions for a container or blob which you optionally set.

View File

@@ -5,12 +5,14 @@ import "sync/atomic"
// AtomicMorpherInt32 identifies a method passed to and invoked by the AtomicMorphInt32 function.
// The AtomicMorpher callback is passed a startValue and based on this value it returns
// what the new value should be and the result that AtomicMorph should return to its caller.
type AtomicMorpherInt32 func(startVal int32) (val int32, morphResult interface{})
type atomicMorpherInt32 func(startVal int32) (val int32, morphResult interface{})
const targetAndMorpherMustNotBeNil = "target and morpher must not be nil"
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func AtomicMorphInt32(target *int32, morpher AtomicMorpherInt32) interface{} {
func atomicMorphInt32(target *int32, morpher atomicMorpherInt32) interface{} {
if target == nil || morpher == nil {
panic("target and morpher mut not be nil")
panic(targetAndMorpherMustNotBeNil)
}
for {
currentVal := atomic.LoadInt32(target)
@@ -24,12 +26,12 @@ func AtomicMorphInt32(target *int32, morpher AtomicMorpherInt32) interface{} {
// AtomicMorpherUint32 identifies a method passed to and invoked by the AtomicMorph function.
// The AtomicMorpher callback is passed a startValue and based on this value it returns
// what the new value should be and the result that AtomicMorph should return to its caller.
type AtomicMorpherUint32 func(startVal uint32) (val uint32, morphResult interface{})
type atomicMorpherUint32 func(startVal uint32) (val uint32, morphResult interface{})
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func AtomicMorphUint32(target *uint32, morpher AtomicMorpherUint32) interface{} {
func atomicMorphUint32(target *uint32, morpher atomicMorpherUint32) interface{} {
if target == nil || morpher == nil {
panic("target and morpher mut not be nil")
panic(targetAndMorpherMustNotBeNil)
}
for {
currentVal := atomic.LoadUint32(target)
@@ -43,12 +45,12 @@ func AtomicMorphUint32(target *uint32, morpher AtomicMorpherUint32) interface{}
// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
// The AtomicMorpher callback is passed a startValue and based on this value it returns
// what the new value should be and the result that AtomicMorph should return to its caller.
type AtomicMorpherInt64 func(startVal int64) (val int64, morphResult interface{})
type atomicMorpherInt64 func(startVal int64) (val int64, morphResult interface{})
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func AtomicMorphInt64(target *int64, morpher AtomicMorpherInt64) interface{} {
func atomicMorphInt64(target *int64, morpher atomicMorpherInt64) interface{} {
if target == nil || morpher == nil {
panic("target and morpher mut not be nil")
panic(targetAndMorpherMustNotBeNil)
}
for {
currentVal := atomic.LoadInt64(target)
@@ -62,12 +64,12 @@ func AtomicMorphInt64(target *int64, morpher AtomicMorpherInt64) interface{} {
// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
// The AtomicMorpher callback is passed a startValue and based on this value it returns
// what the new value should be and the result that AtomicMorph should return to its caller.
type AtomicMorpherUint64 func(startVal uint64) (val uint64, morphResult interface{})
type atomicMorpherUint64 func(startVal uint64) (val uint64, morphResult interface{})
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func AtomicMorphUint64(target *uint64, morpher AtomicMorpherUint64) interface{} {
func atomicMorphUint64(target *uint64, morpher atomicMorpherUint64) interface{} {
if target == nil || morpher == nil {
panic("target and morpher mut not be nil")
panic(targetAndMorpherMustNotBeNil)
}
for {
currentVal := atomic.LoadUint64(target)

View File

@@ -12,10 +12,12 @@ import (
"sync"
"time"
"errors"
"github.com/Azure/azure-pipeline-go/pipeline"
)
// CommonResponseHeaders returns the headers common to all blob REST API responses.
// CommonResponse returns the headers common to all blob REST API responses.
type CommonResponse interface {
// ETag returns the value for header ETag.
ETag() ETag
@@ -42,6 +44,7 @@ type UploadToBlockBlobOptions struct {
BlockSize int64
// Progress is a function that is invoked periodically as bytes are sent to the BlockBlobURL.
// Note that the progress reporting is not always increasing; it can go down when retrying a request.
Progress pipeline.ProgressReceiver
// BlobHTTPHeaders indicates the HTTP headers to be associated with the blob.
@@ -65,12 +68,26 @@ func UploadBufferToBlockBlob(ctx context.Context, b []byte,
if o.BlockSize < 0 || o.BlockSize > BlockBlobMaxUploadBlobBytes {
panic(fmt.Sprintf("BlockSize option must be > 0 and <= %d", BlockBlobMaxUploadBlobBytes))
}
if o.BlockSize == 0 {
o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified
}
size := int64(len(b))
if size <= BlockBlobMaxUploadBlobBytes {
bufferSize := int64(len(b))
if o.BlockSize == 0 {
// If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error
if bufferSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks {
return nil, errors.New("Buffer is too large to upload to a block blob")
}
// If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request
if bufferSize <= BlockBlobMaxUploadBlobBytes {
o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified
} else {
o.BlockSize = bufferSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks
if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB
o.BlockSize = BlobDefaultDownloadBlockSize
}
// StageBlock will be called with blockSize blocks and a parallelism of (BufferSize / BlockSize).
}
}
if bufferSize <= BlockBlobMaxUploadBlobBytes {
// If the size can fit in 1 Upload call, do it this way
var body io.ReadSeeker = bytes.NewReader(b)
if o.Progress != nil {
@@ -79,7 +96,7 @@ func UploadBufferToBlockBlob(ctx context.Context, b []byte,
return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions)
}
var numBlocks = uint16(((size - 1) / o.BlockSize) + 1)
var numBlocks = uint16(((bufferSize - 1) / o.BlockSize) + 1)
if numBlocks > BlockBlobMaxBlocks {
panic(fmt.Sprintf("The buffer's size is too big or the BlockSize is too small; the number of blocks must be <= %d", BlockBlobMaxBlocks))
}
@@ -90,7 +107,7 @@ func UploadBufferToBlockBlob(ctx context.Context, b []byte,
err := doBatchTransfer(ctx, batchTransferOptions{
operationName: "UploadBufferToBlockBlob",
transferSize: size,
transferSize: bufferSize,
chunkSize: o.BlockSize,
parallelism: o.Parallelism,
operation: func(offset int64, count int64) error {
@@ -115,7 +132,7 @@ func UploadBufferToBlockBlob(ctx context.Context, b []byte,
// Block IDs are unique values to avoid issue if 2+ clients are uploading blocks
// at the same time causing PutBlockList to get a mix of blocks from all the clients.
blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes())
_, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions)
_, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions, nil)
return err
},
})
@@ -147,10 +164,9 @@ func UploadFileToBlockBlob(ctx context.Context, file *os.File,
///////////////////////////////////////////////////////////////////////////////
const BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB
// DownloadFromAzureFileOptions identifies options used by the DownloadAzureFileToBuffer and DownloadAzureFileToFile functions.
// DownloadFromBlobOptions identifies options used by the DownloadBlobToBuffer and DownloadBlobToFile functions.
type DownloadFromBlobOptions struct {
// BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize.
BlockSize int64
@@ -168,10 +184,9 @@ type DownloadFromBlobOptions struct {
RetryReaderOptionsPerBlock RetryReaderOptions
}
// downloadAzureFileToBuffer downloads an Azure file to a buffer with parallel.
// downloadBlobToBuffer downloads an Azure blob to a buffer with parallel.
func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64,
ac BlobAccessConditions, b []byte, o DownloadFromBlobOptions,
initialDownloadResponse *DownloadResponse) error {
b []byte, o DownloadFromBlobOptions, initialDownloadResponse *DownloadResponse) error {
// Validate parameters, and set defaults.
if o.BlockSize < 0 {
panic("BlockSize option must be >= 0")
@@ -193,7 +208,7 @@ func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, co
count = initialDownloadResponse.ContentLength() - offset // if we have the length, use it
} else {
// If we don't have the length at all, get it
dr, err := blobURL.Download(ctx, 0, CountToEnd, ac, false)
dr, err := blobURL.Download(ctx, 0, CountToEnd, o.AccessConditions, false)
if err != nil {
return err
}
@@ -211,11 +226,11 @@ func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, co
err := doBatchTransfer(ctx, batchTransferOptions{
operationName: "downloadBlobToBuffer",
transferSize: count,
transferSize: count,
chunkSize: o.BlockSize,
parallelism: o.Parallelism,
operation: func(chunkStart int64, count int64) error {
dr, err := blobURL.Download(ctx, chunkStart+ offset, count, ac, false)
dr, err := blobURL.Download(ctx, chunkStart+offset, count, o.AccessConditions, false)
body := dr.Body(o.RetryReaderOptionsPerBlock)
if o.Progress != nil {
rangeProgress := int64(0)
@@ -241,18 +256,18 @@ func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, co
return nil
}
// DownloadAzureFileToBuffer downloads an Azure file to a buffer with parallel.
// DownloadBlobToBuffer downloads an Azure blob to a buffer with parallel.
// Offset and count are optional, pass 0 for both to download the entire blob.
func DownloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64,
ac BlobAccessConditions, b []byte, o DownloadFromBlobOptions) error {
return downloadBlobToBuffer(ctx, blobURL, offset, count, ac, b, o, nil)
b []byte, o DownloadFromBlobOptions) error {
return downloadBlobToBuffer(ctx, blobURL, offset, count, b, o, nil)
}
// DownloadBlobToFile downloads an Azure file to a local file.
// DownloadBlobToFile downloads an Azure blob to a local file.
// The file would be truncated if the size doesn't match.
// Offset and count are optional, pass 0 for both to download the entire blob.
func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, count int64,
ac BlobAccessConditions, file *os.File, o DownloadFromBlobOptions) error {
file *os.File, o DownloadFromBlobOptions) error {
// 1. Validate parameters.
if file == nil {
panic("file must not be nil")
@@ -262,8 +277,8 @@ func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, coun
var size int64
if count == CountToEnd {
// Try to get Azure file's size
props, err := blobURL.GetProperties(ctx, ac)
// Try to get Azure blob's size
props, err := blobURL.GetProperties(ctx, o.AccessConditions)
if err != nil {
return err
}
@@ -272,7 +287,7 @@ func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, coun
size = count
}
// 3. Compare and try to resize local file's size if it doesn't match Azure file's size.
// 3. Compare and try to resize local file's size if it doesn't match Azure blob's size.
stat, err := file.Stat()
if err != nil {
return err
@@ -284,19 +299,18 @@ func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, coun
}
if size > 0 {
// 4. Set mmap and call DownloadAzureFileToBuffer.
// 4. Set mmap and call downloadBlobToBuffer.
m, err := newMMF(file, true, 0, int(size))
if err != nil {
return err
}
defer m.unmap()
return downloadBlobToBuffer(ctx, blobURL, offset, size, ac, m, o, nil)
return downloadBlobToBuffer(ctx, blobURL, offset, size, m, o, nil)
} else { // if the blob's size is 0, there is no need in downloading it
return nil
}
}
///////////////////////////////////////////////////////////////////////////////
// BatchTransferOptions identifies options used by doBatchTransfer.
@@ -374,7 +388,10 @@ func UploadStreamToBlockBlob(ctx context.Context, reader io.Reader, blockBlobURL
result, err := uploadStream(ctx, reader,
UploadStreamOptions{BufferSize: o.BufferSize, MaxBuffers: o.MaxBuffers},
&uploadStreamToBlockBlobOptions{b: blockBlobURL, o: o, blockIDPrefix: newUUID()})
return result.(CommonResponse), err
if err != nil {
return nil, err
}
return result.(CommonResponse), nil
}
type uploadStreamToBlockBlobOptions struct {
@@ -396,7 +413,7 @@ func (t *uploadStreamToBlockBlobOptions) chunk(ctx context.Context, num uint32,
return nil
}
// Else, upload a staged block...
AtomicMorphUint32(&t.maxBlockNum, func(startVal uint32) (val uint32, morphResult interface{}) {
atomicMorphUint32(&t.maxBlockNum, func(startVal uint32) (val uint32, morphResult interface{}) {
// Atomically remember (in t.numBlocks) the maximum block num we've ever seen
if startVal < num {
return num, nil
@@ -404,7 +421,7 @@ func (t *uploadStreamToBlockBlobOptions) chunk(ctx context.Context, num uint32,
return startVal, nil
})
blockID := newUuidBlockID(t.blockIDPrefix).WithBlockNumber(num).ToBase64()
_, err := t.b.StageBlock(ctx, blockID, bytes.NewReader(buffer), LeaseAccessConditions{})
_, err := t.b.StageBlock(ctx, blockID, bytes.NewReader(buffer), LeaseAccessConditions{}, nil)
return err
}
@@ -416,7 +433,7 @@ func (t *uploadStreamToBlockBlobOptions) end(ctx context.Context) (interface{},
}
// Multiple blocks staged, commit them all now
blockID := newUuidBlockID(t.blockIDPrefix)
blockIDs := make([]string, t.maxBlockNum + 1)
blockIDs := make([]string, t.maxBlockNum+1)
for bn := uint32(0); bn <= t.maxBlockNum; bn++ {
blockIDs[bn] = blockID.WithBlockNumber(bn).ToBase64()
}
@@ -436,7 +453,28 @@ type UploadStreamOptions struct {
BufferSize int
}
type firstErr struct {
lock sync.Mutex
finalError error
}
func (fe *firstErr) set(err error) {
fe.lock.Lock()
if fe.finalError == nil {
fe.finalError = err
}
fe.lock.Unlock()
}
func (fe *firstErr) get() (err error) {
fe.lock.Lock()
err = fe.finalError
fe.lock.Unlock()
return
}
func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions, t iTransfer) (interface{}, error) {
firstErr := firstErr{}
ctx, cancel := context.WithCancel(ctx) // New context so that any failure cancels everything
defer cancel()
wg := sync.WaitGroup{} // Used to know when all outgoing messages have finished processing
@@ -463,9 +501,12 @@ func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions,
err := t.chunk(ctx, outgoingMsg.chunkNum, outgoingMsg.buffer)
wg.Done() // Indicate this buffer was sent
if nil != err {
// NOTE: finalErr could be assigned to multiple times here which is OK,
// some error will be returned.
firstErr.set(err)
cancel()
}
incoming <- outgoingMsg.buffer // The goroutine reading from the stream can use reuse this buffer now
incoming <- outgoingMsg.buffer // The goroutine reading from the stream can reuse this buffer now
}
}()
}
@@ -490,7 +531,7 @@ func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions,
buffer = <-incoming
}
n, err := io.ReadFull(reader, buffer)
if err != nil {
if err != nil { // Less than len(buffer) bytes were read
buffer = buffer[:n] // Make slice match the # of read bytes
}
if len(buffer) > 0 {
@@ -499,12 +540,21 @@ func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions,
outgoing <- OutgoingMsg{chunkNum: c, buffer: buffer}
}
if err != nil { // The reader is done, no more outgoing buffers
if err == io.EOF || err == io.ErrUnexpectedEOF {
err = nil // This function does NOT return an error if io.ReadFull returns io.EOF or io.ErrUnexpectedEOF
} else {
firstErr.set(err)
}
break
}
}
// NOTE: Don't close the incoming channel because the outgoing goroutines post buffers into it when they are done
close(outgoing) // Make all the outgoing goroutines terminate when this channel is empty
wg.Wait() // Wait for all pending outgoing messages to complete
// After all blocks uploaded, commit them to the blob & return the result
return t.end(ctx)
err := firstErr.get()
if err == nil {
// If no error, after all blocks uploaded, commit them to the blob & return the result
return t.end(ctx)
}
return nil, err
}

View File

@@ -1,6 +1,7 @@
package azblob
import (
"net"
"net/url"
"strings"
)
@@ -14,21 +15,47 @@ const (
// existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL().
// NOTE: Changing any SAS-related field requires computing a new SAS signature.
type BlobURLParts struct {
Scheme string // Ex: "https://"
Host string // Ex: "account.blob.core.windows.net"
ContainerName string // "" if no container
BlobName string // "" if no blob
Snapshot string // "" if not a snapshot
SAS SASQueryParameters
UnparsedParams string
Scheme string // Ex: "https://"
Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80"
IPEndpointStyleInfo IPEndpointStyleInfo
ContainerName string // "" if no container
BlobName string // "" if no blob
Snapshot string // "" if not a snapshot
SAS SASQueryParameters
UnparsedParams string
}
// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator.
// Ex: "https://10.132.141.33/accountname/containername"
type IPEndpointStyleInfo struct {
AccountName string // "" if not using IP endpoint style
}
// isIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as:
// http(s)://IP(:port)/storageaccount/container/...
// As url's Host property, host could be both host or host:port
func isIPEndpointStyle(host string) bool {
if host == "" {
return false
}
if h, _, err := net.SplitHostPort(host); err == nil {
host = h
}
// For IPv6, there could be case where SplitHostPort fails for cannot finding port.
// In this case, eliminate the '[' and ']' in the URL.
// For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732
if host[0] == '[' && host[len(host)-1] == ']' {
host = host[1 : len(host)-1]
}
return net.ParseIP(host) != nil
}
// NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other
// query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object.
func NewBlobURLParts(u url.URL) BlobURLParts {
up := BlobURLParts{
Scheme: u.Scheme,
Host: u.Host,
Scheme: u.Scheme,
Host: u.Host,
}
// Find the container & blob names (if any)
@@ -37,10 +64,17 @@ func NewBlobURLParts(u url.URL) BlobURLParts {
if path[0] == '/' {
path = path[1:] // If path starts with a slash, remove it
}
if isIPEndpointStyle(up.Host) {
if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob
up.IPEndpointStyleInfo.AccountName = path
} else {
up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes
path = path[accountEndIndex+1:] // path refers to portion after the account name now (container & blob names)
}
}
// Find the next slash (if it exists)
containerEndIndex := strings.Index(path, "/")
if containerEndIndex == -1 { // Slash not found; path has container name & no blob name
containerEndIndex := strings.Index(path, "/") // Find the next slash (if it exists)
if containerEndIndex == -1 { // Slash not found; path has container name & no blob name
up.ContainerName = path
} else {
up.ContainerName = path[:containerEndIndex] // The container name is the part between the slashes
@@ -77,6 +111,9 @@ func (values caseInsensitiveValues) Get(key string) ([]string, bool) {
// field contains the SAS, snapshot, and unparsed query parameters.
func (up BlobURLParts) URL() url.URL {
path := ""
if isIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" {
path += "/" + up.IPEndpointStyleInfo.AccountName
}
// Concatenate container & blob names (if they exist)
if up.ContainerName != "" {
path += "/" + up.ContainerName

View File

@@ -8,6 +8,7 @@ import (
)
// BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas
type BlobSASSignatureValues struct {
Version string `param:"sv"` // If not specified, this defaults to SASVersion
Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants

View File

@@ -45,7 +45,7 @@ func (ab AppendBlobURL) WithSnapshot(snapshot string) AppendBlobURL {
// Create creates a 0-length append blob. Call AppendBlock to append data to an append blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*AppendBlobCreateResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.HTTPAccessConditions.pointers()
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers()
return ab.abClient.Create(ctx, 0, nil,
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
@@ -56,17 +56,23 @@ func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block.
func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac BlobAccessConditions) (*AppendBlobAppendBlockResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendBlobAccessConditions.pointers()
func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac AppendBlobAccessConditions, transactionalMD5 []byte) (*AppendBlobAppendBlockResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendPositionAccessConditions.pointers()
return ab.abClient.AppendBlock(ctx, body, validateSeekableStreamAt0AndGetCount(body), nil,
ac.LeaseAccessConditions.pointers(),
transactionalMD5, ac.LeaseAccessConditions.pointers(),
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
// AppendBlobAccessConditions identifies append blob-specific access conditions which you optionally set.
type AppendBlobAccessConditions struct {
ModifiedAccessConditions
LeaseAccessConditions
AppendPositionAccessConditions
}
// AppendPositionAccessConditions identifies append blob-specific access conditions which you optionally set.
type AppendPositionAccessConditions struct {
// IfAppendPositionEqual ensures that the AppendBlock operation succeeds
// only if the append position is equal to a value.
// IfAppendPositionEqual=0 means no 'IfAppendPositionEqual' header specified.
@@ -83,7 +89,7 @@ type AppendBlobAccessConditions struct {
}
// pointers is for internal infrastructure. It returns the fields as pointers.
func (ac AppendBlobAccessConditions) pointers() (iape *int64, imsltoe *int64) {
func (ac AppendPositionAccessConditions) pointers() (iape *int64, imsltoe *int64) {
if ac.IfAppendPositionEqual < -1 {
panic("IfAppendPositionEqual can't be less than -1")
}

View File

@@ -64,13 +64,14 @@ func (b BlobURL) ToPageBlobURL() PageBlobURL {
}
// DownloadBlob reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
// Passing azblob.CountToEnd (0) for count will download the blob from the offset to the end.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool) (*DownloadResponse, error) {
var xRangeGetContentMD5 *bool
if rangeGetContentMD5 {
xRangeGetContentMD5 = &rangeGetContentMD5
}
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
dr, err := b.blobClient.Download(ctx, nil, nil,
httpRange{offset: offset, count: count}.pointers(),
ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5,
@@ -90,7 +91,7 @@ func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac Blo
// Note that deleting a blob also deletes all its snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return b.blobClient.Delete(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
@@ -114,7 +115,7 @@ func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType) (*BlobSetTier
// GetBlobProperties returns the blob's properties.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions) (*BlobGetPropertiesResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return b.blobClient.GetProperties(ctx, nil, nil, ac.LeaseAccessConditions.pointers(),
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
@@ -122,7 +123,7 @@ func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions) (*B
// SetBlobHTTPHeaders changes a blob's HTTP headers.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobSetHTTPHeadersResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return b.blobClient.SetHTTPHeaders(ctx, nil,
&h.CacheControl, &h.ContentType, h.ContentMD5, &h.ContentEncoding, &h.ContentLanguage,
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
@@ -132,7 +133,7 @@ func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobA
// SetBlobMetadata changes a blob's metadata.
// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobSetMetadataResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return b.blobClient.SetMetadata(ctx, nil, metadata, ac.LeaseAccessConditions.pointers(),
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
@@ -143,14 +144,14 @@ func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobA
// CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter
// because checking this would be a performance hit for a VERY unusual path and I don't think the common case should suffer this
// performance hit.
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return b.blobClient.CreateSnapshot(ctx, nil, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, ac.LeaseAccessConditions.pointers(), nil)
}
// AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between
// 15 to 60 seconds, or infinite (-1).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac HTTPAccessConditions) (*BlobAcquireLeaseResponse, error) {
func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*BlobAcquireLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.AcquireLease(ctx, nil, &duration, &proposedID,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
@@ -158,7 +159,7 @@ func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration i
// RenewLease renews the blob's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*BlobRenewLeaseResponse, error) {
func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobRenewLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.RenewLease(ctx, leaseID, nil,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
@@ -166,7 +167,7 @@ func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac HTTPAccessCo
// ReleaseLease releases the blob's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*BlobReleaseLeaseResponse, error) {
func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobReleaseLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.ReleaseLease(ctx, leaseID, nil,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
@@ -175,7 +176,7 @@ func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac HTTPAccess
// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1)
// constant to break a fixed-duration lease when it expires or an infinite lease immediately.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac HTTPAccessConditions) (*BlobBreakLeaseResponse, error) {
func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac ModifiedAccessConditions) (*BlobBreakLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.BreakLease(ctx, nil, leasePeriodPointer(breakPeriodInSeconds),
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
@@ -183,7 +184,7 @@ func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac
// ChangeLease changes the blob's lease ID.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac HTTPAccessConditions) (*BlobChangeLeaseResponse, error) {
func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*BlobChangeLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.ChangeLease(ctx, leaseID, proposedID,
nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
@@ -201,10 +202,9 @@ func leasePeriodPointer(period int32) (p *int32) {
// StartCopyFromURL copies the data at the source URL to a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac BlobAccessConditions, dstac BlobAccessConditions) (*BlobStartCopyFromURLResponse, error) {
srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.HTTPAccessConditions.pointers()
dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.HTTPAccessConditions.pointers()
srcLeaseID := srcac.LeaseAccessConditions.pointers()
func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions) (*BlobStartCopyFromURLResponse, error) {
srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers()
dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers()
dstLeaseID := dstac.LeaseAccessConditions.pointers()
return b.blobClient.StartCopyFromURL(ctx, source.String(), nil, metadata,
@@ -212,7 +212,7 @@ func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata
srcIfMatchETag, srcIfNoneMatchETag,
dstIfModifiedSince, dstIfUnmodifiedSince,
dstIfMatchETag, dstIfNoneMatchETag,
dstLeaseID, srcLeaseID, nil)
dstLeaseID, nil)
}
// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.

View File

@@ -12,7 +12,7 @@ import (
)
const (
// BlockBlobMaxPutBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload.
// BlockBlobMaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload.
BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB
// BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock.
@@ -59,7 +59,7 @@ func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL {
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*BlockBlobUploadResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return bb.bbClient.Upload(ctx, body, validateSeekableStreamAt0AndGetCount(body), nil,
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(),
@@ -70,16 +70,15 @@ func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTT
// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block.
func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions) (*BlockBlobStageBlockResponse, error) {
return bb.bbClient.StageBlock(ctx, base64BlockID, validateSeekableStreamAt0AndGetCount(body), body, nil, ac.pointers(), nil)
func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions, transactionalMD5 []byte) (*BlockBlobStageBlockResponse, error) {
return bb.bbClient.StageBlock(ctx, base64BlockID, validateSeekableStreamAt0AndGetCount(body), body, transactionalMD5, nil, ac.pointers(), nil)
}
// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList.
// If count is CountToEnd (0), then data is read from specified offset to the end.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url.
func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL url.URL, offset int64, count int64, ac LeaseAccessConditions) (*BlockBlobStageBlockFromURLResponse, error) {
sourceURLStr := sourceURL.String()
return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, &sourceURLStr, httpRange{offset: offset, count: count}.pointers(), nil, nil, ac.pointers(), nil)
return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, sourceURL.String(), httpRange{offset: offset, count: count}.pointers(), nil, nil, ac.pointers(), nil)
}
// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob.
@@ -90,7 +89,7 @@ func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID stri
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list.
func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders,
metadata Metadata, ac BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return bb.bbClient.CommitBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil,
&h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,

View File

@@ -92,7 +92,7 @@ func (c ContainerURL) Delete(ctx context.Context, ac ContainerAccessConditions)
panic("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
}
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.HTTPAccessConditions.pointers()
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers()
return c.client.Delete(ctx, nil, ac.LeaseAccessConditions.pointers(),
ifModifiedSince, ifUnmodifiedSince, nil)
}
@@ -111,7 +111,7 @@ func (c ContainerURL) SetMetadata(ctx context.Context, metadata Metadata, ac Con
if !ac.IfUnmodifiedSince.IsZero() || ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
panic("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service")
}
ifModifiedSince, _, _, _ := ac.HTTPAccessConditions.pointers()
ifModifiedSince, _, _, _ := ac.ModifiedAccessConditions.pointers()
return c.client.SetMetadata(ctx, nil, ac.LeaseAccessConditions.pointers(), metadata, ifModifiedSince, nil)
}
@@ -183,14 +183,14 @@ func (c ContainerURL) SetAccessPolicy(ctx context.Context, accessType PublicAcce
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
panic("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
}
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.HTTPAccessConditions.pointers()
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers()
return c.client.SetAccessPolicy(ctx, si, nil, ac.LeaseAccessConditions.pointers(),
accessType, ifModifiedSince, ifUnmodifiedSince, nil)
}
// AcquireLease acquires a lease on the container for delete operations. The lease duration must be between 15 to 60 seconds, or infinite (-1).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac HTTPAccessConditions) (*ContainerAcquireLeaseResponse, error) {
func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*ContainerAcquireLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
return c.client.AcquireLease(ctx, nil, &duration, &proposedID,
ifModifiedSince, ifUnmodifiedSince, nil)
@@ -198,28 +198,28 @@ func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, durat
// RenewLease renews the container's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (c ContainerURL) RenewLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*ContainerRenewLeaseResponse, error) {
func (c ContainerURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerRenewLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
return c.client.RenewLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
}
// ReleaseLease releases the container's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (c ContainerURL) ReleaseLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*ContainerReleaseLeaseResponse, error) {
func (c ContainerURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerReleaseLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
return c.client.ReleaseLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
}
// BreakLease breaks the container's previously-acquired lease (if it exists).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (c ContainerURL) BreakLease(ctx context.Context, period int32, ac HTTPAccessConditions) (*ContainerBreakLeaseResponse, error) {
func (c ContainerURL) BreakLease(ctx context.Context, period int32, ac ModifiedAccessConditions) (*ContainerBreakLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
return c.client.BreakLease(ctx, nil, leasePeriodPointer(period), ifModifiedSince, ifUnmodifiedSince, nil)
}
// ChangeLease changes the container's lease ID.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac HTTPAccessConditions) (*ContainerChangeLeaseResponse, error) {
func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*ContainerChangeLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
return c.client.ChangeLease(ctx, leaseID, proposedID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
}

View File

@@ -47,28 +47,28 @@ func (pb PageBlobURL) WithSnapshot(snapshot string) PageBlobURL {
return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline())
}
// CreatePageBlob creates a page blob of the specified length. Call PutPage to upload data data to a page blob.
// Create creates a page blob of the specified length. Call PutPage to upload data data to a page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*PageBlobCreateResponse, error) {
if sequenceNumber < 0 {
panic("sequenceNumber must be greater than or equal to 0")
}
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return pb.pbClient.Create(ctx, 0, nil,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return pb.pbClient.Create(ctx, 0, size, nil,
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl,
metadata, ac.LeaseAccessConditions.pointers(),
&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &size, &sequenceNumber, nil)
&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &sequenceNumber, nil)
}
// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac BlobAccessConditions) (*PageBlobUploadPagesResponse, error) {
func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac PageBlobAccessConditions, transactionalMD5 []byte) (*PageBlobUploadPagesResponse, error) {
count := validateSeekableStreamAt0AndGetCount(body)
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.PageBlobAccessConditions.pointers()
return pb.pbClient.UploadPages(ctx, body, count, nil,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers()
return pb.pbClient.UploadPages(ctx, body, count, transactionalMD5, nil,
PageRange{Start: offset, End: offset + count - 1}.pointers(),
ac.LeaseAccessConditions.pointers(),
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual,
@@ -77,9 +77,9 @@ func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.Rea
// ClearPages frees the specified pages from the page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageBlobClearPagesResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.PageBlobAccessConditions.pointers()
func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac PageBlobAccessConditions) (*PageBlobClearPagesResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers()
return pb.pbClient.ClearPages(ctx, 0, nil,
PageRange{Start: offset, End: offset + count - 1}.pointers(),
ac.LeaseAccessConditions.pointers(),
@@ -90,7 +90,7 @@ func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64,
// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageList, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return pb.pbClient.GetPageRanges(ctx, nil, nil,
httpRange{offset: offset, count: count}.pointers(),
ac.LeaseAccessConditions.pointers(),
@@ -100,7 +100,7 @@ func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int
// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot string, ac BlobAccessConditions) (*PageList, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, &prevSnapshot,
httpRange{offset: offset, count: count}.pointers(),
ac.LeaseAccessConditions.pointers(),
@@ -114,7 +114,7 @@ func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessCondi
if size%PageBlobPageBytes != 0 {
panic("Size must be a multiple of PageBlobPageBytes (512)")
}
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(),
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
@@ -129,7 +129,7 @@ func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceN
if action == SequenceNumberActionIncrement {
sn = nil
}
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.HTTPAccessConditions.pointers()
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers()
return pb.pbClient.UpdateSequenceNumber(ctx, action, nil,
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch,
sn, nil)
@@ -141,7 +141,7 @@ func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceN
// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and
// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots.
func (pb PageBlobURL) StartCopyIncremental(ctx context.Context, source url.URL, snapshot string, ac BlobAccessConditions) (*PageBlobCopyIncrementalResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
qp := source.Query()
qp.Set("snapshot", snapshot)
source.RawQuery = qp.Encode()
@@ -170,8 +170,14 @@ func (pr PageRange) pointers() *string {
return &asString
}
// PageBlobAccessConditions identifies page blob-specific access conditions which you optionally set.
type PageBlobAccessConditions struct {
ModifiedAccessConditions
LeaseAccessConditions
SequenceNumberAccessConditions
}
// SequenceNumberAccessConditions identifies page blob-specific access conditions which you optionally set.
type SequenceNumberAccessConditions struct {
// IfSequenceNumberLessThan ensures that the page blob operation succeeds
// only if the blob's sequence number is less than a value.
// IfSequenceNumberLessThan=0 means no 'IfSequenceNumberLessThan' header specified.
@@ -195,7 +201,7 @@ type PageBlobAccessConditions struct {
}
// pointers is for internal infrastructure. It returns the fields as pointers.
func (ac PageBlobAccessConditions) pointers() (snltoe *int64, snlt *int64, sne *int64) {
func (ac SequenceNumberAccessConditions) pointers() (snltoe *int64, snlt *int64, sne *int64) {
if ac.IfSequenceNumberLessThan < -1 {
panic("Ifsequencenumberlessthan can't be less than -1")
}

View File

@@ -81,7 +81,7 @@ func appendToURLPath(u url.URL, name string) url.URL {
// After getting a segment, process it, and then call ListContainersFlatSegment again (passing the the
// previously-returned Marker) to get the next segment. For more information, see
// https://docs.microsoft.com/rest/api/storageservices/list-containers2.
func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersResponse, error) {
func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersSegmentResponse, error) {
prefix, include, maxResults := o.pointers()
return s.client.ListContainersSegment(ctx, prefix, marker.val, maxResults, include, nil, nil)
}
@@ -89,8 +89,8 @@ func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o
// ListContainersOptions defines options available when calling ListContainers.
type ListContainersSegmentOptions struct {
Detail ListContainersDetail // No IncludeType header is produced if ""
Prefix string // No Prefix header is produced if ""
MaxResults int32 // 0 means unspecified
Prefix string // No Prefix header is produced if ""
MaxResults int32 // 0 means unspecified
// TODO: update swagger to generate this type?
}

View File

@@ -17,12 +17,12 @@ import (
// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the
// storage account's name and either its primary or secondary key.
func NewSharedKeyCredential(accountName, accountKey string) *SharedKeyCredential {
func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) {
bytes, err := base64.StdEncoding.DecodeString(accountKey)
if err != nil {
panic(err)
return &SharedKeyCredential{}, err
}
return &SharedKeyCredential{accountName: accountName, accountKey: bytes}
return &SharedKeyCredential{accountName: accountName, accountKey: bytes}, nil
}
// SharedKeyCredential contains an account's name and its primary or secondary key.

View File

@@ -11,6 +11,10 @@ import (
"github.com/Azure/azure-pipeline-go/pipeline"
)
// TokenRefresher represents a callback method that you write; this method is called periodically
// so you can refresh the token credential's value.
type TokenRefresher func(credential TokenCredential) time.Duration
// TokenCredential represents a token credential (which is also a pipeline.Factory).
type TokenCredential interface {
Credential
@@ -20,12 +24,15 @@ type TokenCredential interface {
// NewTokenCredential creates a token credential for use with role-based access control (RBAC) access to Azure Storage
// resources. You initialize the TokenCredential with an initial token value. If you pass a non-nil value for
// tokenRefresher, then the function you pass will be called immediately (so it can refresh and change the
// TokenCredential's token value by calling SetToken; your tokenRefresher function must return a time.Duration
// tokenRefresher, then the function you pass will be called immediately so it can refresh and change the
// TokenCredential's token value by calling SetToken. Your tokenRefresher function must return a time.Duration
// indicating how long the TokenCredential object should wait before calling your tokenRefresher function again.
func NewTokenCredential(initialToken string, tokenRefresher func(credential TokenCredential) time.Duration) TokenCredential {
// If your tokenRefresher callback fails to refresh the token, you can return a duration of 0 to stop your
// TokenCredential object from ever invoking tokenRefresher again. Also, oen way to deal with failing to refresh a
// token is to cancel a context.Context object used by requests that have the TokenCredential object in their pipeline.
func NewTokenCredential(initialToken string, tokenRefresher TokenRefresher) TokenCredential {
tc := &tokenCredential{}
tc.SetToken(initialToken) // We dont' set it above to guarantee atomicity
tc.SetToken(initialToken) // We don't set it above to guarantee atomicity
if tokenRefresher == nil {
return tc // If no callback specified, return the simple tokenCredential
}
@@ -68,7 +75,7 @@ type tokenCredential struct {
// The members below are only used if the user specified a tokenRefresher callback function.
timer *time.Timer
tokenRefresher func(c TokenCredential) time.Duration
tokenRefresher TokenRefresher
lock sync.Mutex
stopped bool
}
@@ -84,7 +91,7 @@ func (f *tokenCredential) SetToken(token string) { f.token.Store(token) }
// startRefresh calls refresh which immediately calls tokenRefresher
// and then starts a timer to call tokenRefresher in the future.
func (f *tokenCredential) startRefresh(tokenRefresher func(c TokenCredential) time.Duration) {
func (f *tokenCredential) startRefresh(tokenRefresher TokenRefresher) {
f.tokenRefresher = tokenRefresher
f.stopped = false // In case user calls StartRefresh, StopRefresh, & then StartRefresh again
f.refresh()
@@ -95,11 +102,13 @@ func (f *tokenCredential) startRefresh(tokenRefresher func(c TokenCredential) ti
// in order to refresh the token again in the future.
func (f *tokenCredential) refresh() {
d := f.tokenRefresher(f) // Invoke the user's refresh callback outside of the lock
f.lock.Lock()
if !f.stopped {
f.timer = time.AfterFunc(d, f.refresh)
if d > 0 { // If duration is 0 or negative, refresher wants to not be called again
f.lock.Lock()
if !f.stopped {
f.timer = time.AfterFunc(d, f.refresh)
}
f.lock.Unlock()
}
f.lock.Unlock()
}
// stopRefresh stops any pending timer and sets stopped field to true to prevent

View File

@@ -109,6 +109,7 @@ func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory {
})
}
// redactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret.
func redactSigQueryParam(rawQuery string) (bool, string) {
rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig=
sigFound := strings.Contains(rawQuery, "?sig=")
@@ -135,7 +136,8 @@ func prepareRequestForLogging(request pipeline.Request) *http.Request {
req = request.Copy()
req.Request.URL.RawQuery = rawQuery
}
return req.Request
return prepareRequestForServiceLogging(req)
}
func stack() []byte {
@@ -148,3 +150,33 @@ func stack() []byte {
buf = make([]byte, 2*len(buf))
}
}
///////////////////////////////////////////////////////////////////////////////////////
// Redact phase useful for blob and file service only. For other services,
// this method can directly return request.Request.
///////////////////////////////////////////////////////////////////////////////////////
func prepareRequestForServiceLogging(request pipeline.Request) *http.Request {
req := request
if exist, key := doesHeaderExistCaseInsensitive(req.Header, xMsCopySourceHeader); exist {
req = request.Copy()
url, err := url.Parse(req.Header.Get(key))
if err == nil {
if sigFound, rawQuery := redactSigQueryParam(url.RawQuery); sigFound {
url.RawQuery = rawQuery
req.Header.Set(xMsCopySourceHeader, url.String())
}
}
}
return req.Request
}
const xMsCopySourceHeader = "x-ms-copy-source"
func doesHeaderExistCaseInsensitive(header http.Header, key string) (bool, string) {
for keyInHeader := range header {
if strings.EqualFold(keyInHeader, key) {
return true, keyInHeader
}
}
return false, ""
}

View File

@@ -2,15 +2,16 @@ package azblob
import (
"context"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"strconv"
"strings"
"time"
"github.com/Azure/azure-pipeline-go/pipeline"
"io/ioutil"
"io"
)
// RetryPolicy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.
@@ -57,11 +58,11 @@ type RetryOptions struct {
// If RetryReadsFromSecondaryHost is "" (the default) then operations are not retried against another host.
// NOTE: Before setting this field, make sure you understand the issues around reading stale & potentially-inconsistent
// data at this webpage: https://docs.microsoft.com/en-us/azure/storage/common/storage-designing-ha-apps-with-ragrs
RetryReadsFromSecondaryHost string // Comment this our for non-Blob SDKs
RetryReadsFromSecondaryHost string // Comment this our for non-Blob SDKs
}
func (o RetryOptions) retryReadsFromSecondaryHost() string {
return o.RetryReadsFromSecondaryHost // This is for the Blob SDK only
return o.RetryReadsFromSecondaryHost // This is for the Blob SDK only
//return "" // This is for non-blob SDKs
}
@@ -221,9 +222,27 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
considerSecondary = false
action = "Retry: Secondary URL returned 404"
case err != nil:
// NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation
if netErr, ok := err.(net.Error); ok && (netErr.Temporary() || netErr.Timeout()) {
action = "Retry: net.Error and Temporary() or Timeout()"
// NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation.
// Use ServiceCode to verify if the error is related to storage service-side,
// ServiceCode is set only when error related to storage service happened.
if stErr, ok := err.(StorageError); ok {
if stErr.Temporary() {
action = "Retry: StorageError with error service code and Temporary()"
} else if stErr.Response() != nil && isSuccessStatusCode(stErr.Response()) { // TODO: This is a temporarily work around, remove this after protocol layer fix the issue that net.Error is wrapped as storageError
action = "Retry: StorageError with success status code"
} else {
action = "NoRetry: StorageError not Temporary() and without retriable status code"
}
} else if netErr, ok := err.(net.Error); ok {
// Use non-retriable net.Error list, but not retriable list.
// As there are errors without Temporary() implementation,
// while need be retried, like 'connection reset by peer', 'transport connection broken' and etc.
// So the SDK do retry for most of the case, unless the error should not be retried for sure.
if !isNotRetriable(netErr) {
action = "Retry: net.Error and not in the non-retriable list"
} else {
action = "NoRetry: net.Error and in the non-retriable list"
}
} else {
action = "NoRetry: unrecognized error"
}
@@ -237,11 +256,18 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
if err != nil {
tryCancel() // If we're returning an error, cancel this current/last per-retry timeout context
} else {
// TODO: Right now, we've decided to leak the per-try Context until the user's Context is canceled.
// Another option is that we wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper.
// We wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper.
// So, when the user closes the Body, the our per-try context gets closed too.
// Another option, is that the Last Policy do this wrapping for a per-retry context (not for the user's context)
_ = tryCancel // So, for now, we don't call cancel: cancel()
if response == nil || response.Response() == nil {
// We do panic in the case response or response.Response() is nil,
// as for client, the response should not be nil if request is sent and the operations is executed successfully.
// Another option, is that execute the cancel function when response or response.Response() is nil,
// as in this case, current per-try has nothing to do in future.
panic("invalid state, response should not be nil when the operation is executed successfully")
}
response.Response().Body = &contextCancelReadCloser{cf: tryCancel, body: response.Response().Body}
}
break // Don't retry
}
@@ -259,6 +285,78 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
})
}
// contextCancelReadCloser helps to invoke context's cancelFunc properly when the ReadCloser is closed.
type contextCancelReadCloser struct {
cf context.CancelFunc
body io.ReadCloser
}
func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) {
return rc.body.Read(p)
}
func (rc *contextCancelReadCloser) Close() error {
err := rc.body.Close()
if rc.cf != nil {
rc.cf()
}
return err
}
// isNotRetriable checks if the provided net.Error isn't retriable.
func isNotRetriable(errToParse net.Error) bool {
// No error, so this is NOT retriable.
if errToParse == nil {
return true
}
// The error is either temporary or a timeout so it IS retriable (not not retriable).
if errToParse.Temporary() || errToParse.Timeout() {
return false
}
genericErr := error(errToParse)
// From here all the error are neither Temporary() nor Timeout().
switch err := errToParse.(type) {
case *net.OpError:
// The net.Error is also a net.OpError but the inner error is nil, so this is not retriable.
if err.Err == nil {
return true
}
genericErr = err.Err
}
switch genericErr.(type) {
case *net.AddrError, net.UnknownNetworkError, *net.DNSError, net.InvalidAddrError, *net.ParseError, *net.DNSConfigError:
// If the error is one of the ones listed, then it is NOT retriable.
return true
}
// If it's invalid header field name/value error thrown by http module, then it is NOT retriable.
// This could happen when metadata's key or value is invalid. (RoundTrip in transport.go)
if strings.Contains(genericErr.Error(), "invalid header field") {
return true
}
// Assume the error is retriable.
return false
}
var successStatusCodes = []int{http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent, http.StatusPartialContent}
func isSuccessStatusCode(resp *http.Response) bool {
if resp == nil {
return false
}
for _, i := range successStatusCodes {
if i == resp.StatusCode {
return true
}
}
return false
}
// According to https://github.com/golang/go/wiki/CompilerOptimizations, the compiler will inline this method and hopefully optimize all calls to it away
var logf = func(format string, a ...interface{}) {}

View File

@@ -106,7 +106,7 @@ func (s *retryReader) Read(p []byte) (n int, err error) {
return n, err // All retries exhausted
}
if netErr, ok := err.(net.Error); ok && (netErr.Timeout() || netErr.Temporary()) {
if _, ok := err.(net.Error); ok {
continue
// Loop around and try to get and read from new stream.
}

View File

@@ -205,7 +205,7 @@ func (rt *AccountSASResourceTypes) Parse(s string) error {
switch r {
case 's':
rt.Service = true
case 'q':
case 'c':
rt.Container = true
case 'o':
rt.Object = true

View File

@@ -43,11 +43,14 @@ func newStorageError(cause error, response *http.Response, description string) e
response: response,
description: description,
},
serviceCode: ServiceCodeType(response.Header.Get("x-ms-error-code")),
}
}
// ServiceCode returns service-error information. The caller may examine these values but should not modify any of them.
func (e *storageError) ServiceCode() ServiceCodeType { return e.serviceCode }
func (e *storageError) ServiceCode() ServiceCodeType {
return e.serviceCode
}
// Error implements the error interface's Error method to return a string representation of the error.
func (e *storageError) Error() string {
@@ -94,8 +97,6 @@ func (e *storageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err
break
case xml.CharData:
switch tokName {
case "Code":
e.serviceCode = ServiceCodeType(tt)
case "Message":
e.description = string(tt)
default:

View File

@@ -16,8 +16,8 @@ type httpRange struct {
}
func (r httpRange) pointers() *string {
if r.offset == 0 && r.count == 0 { // Do common case first for performance
return nil // No specified range
if r.offset == 0 && r.count == CountToEnd { // Do common case first for performance
return nil // No specified range
}
if r.offset < 0 {
panic("The range offset must be >= 0")
@@ -25,7 +25,7 @@ func (r httpRange) pointers() *string {
if r.count < 0 {
panic("The range count must be >= 0")
}
endOffset := "" // if count == 0
endOffset := "" // if count == CountToEnd (0)
if r.count > 0 {
endOffset = strconv.FormatInt((r.offset+r.count)-1, 10)
}

View File

@@ -33,20 +33,21 @@ func newAppendBlobClient(url url.URL, p pipeline.Pipeline) appendBlobClient {
// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
// information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
// lease is active and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for
// the append blob. If the Append Block operation would cause the blob to exceed that limit or if the blob size is
// already greater than the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error
// (HTTP status code 412 - Precondition Failed). appendPosition is optional conditional header, used only for the
// Append Block operation. A number indicating the byte offset to compare. Append Block will succeed only if the append
// position is equal to this number. If it is not, the request will fail with the AppendPositionConditionNotMet error
// (HTTP status code 412 - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob
// if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate
// only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to
// operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
// in the analytics logs when storage analytics logging is enabled.
func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockResponse, error) {
// Timeouts for Blob Service Operations.</a> transactionalContentMD5 is specify the transactional md5 for the body, to
// be validated by the service. leaseID is if specified, the operation only succeeds if the container's lease is active
// and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob.
// If the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than
// the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code
// 412 - Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation.
// A number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to
// this number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412
// - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob if it has been
// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if
// it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only on blobs
// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
// logs when storage analytics logging is enabled.
func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockResponse, error) {
if err := validate([]validation{
{targetValue: body,
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
@@ -55,7 +56,7 @@ func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeek
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.appendBlockPreparer(body, contentLength, timeout, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
req, err := client.appendBlockPreparer(body, contentLength, timeout, transactionalContentMD5, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
@@ -67,7 +68,7 @@ func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeek
}
// appendBlockPreparer prepares the AppendBlock request.
func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, body)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -79,6 +80,9 @@ func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLe
params.Set("comp", "appendblock")
req.URL.RawQuery = params.Encode()
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
if transactionalContentMD5 != nil {
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
}
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
@@ -147,10 +151,7 @@ func (client appendBlobClient) Create(ctx context.Context, contentLength int64,
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
{targetValue: metadata,
constraints: []constraint{{target: "metadata", name: null, rule: false,
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)

View File

@@ -6,14 +6,13 @@ package azblob
import (
"context"
"encoding/base64"
"github.com/Azure/azure-pipeline-go/pipeline"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"time"
"github.com/Azure/azure-pipeline-go/pipeline"
)
// blobClient is the client for the Blob methods of the Azblob service.
@@ -347,10 +346,7 @@ func (client blobClient) CreateSnapshot(ctx context.Context, timeout *int32, met
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
{targetValue: metadata,
constraints: []constraint{{target: "metadata", name: null, rule: false,
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.createSnapshotPreparer(timeout, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, leaseID, requestID)
@@ -593,6 +589,44 @@ func (client blobClient) downloadResponder(resp pipeline.Response) (pipeline.Res
return &downloadResponse{rawResponse: resp.Response()}, err
}
// GetAccountInfo returns the sku name and account kind
func (client blobClient) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
req, err := client.getAccountInfoPreparer()
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req)
if err != nil {
return nil, err
}
return resp.(*BlobGetAccountInfoResponse), err
}
// getAccountInfoPreparer prepares the GetAccountInfo request.
func (client blobClient) getAccountInfoPreparer() (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
params.Set("restype", "account")
params.Set("comp", "properties")
req.URL.RawQuery = params.Encode()
req.Header.Set("x-ms-version", ServiceVersion)
return req, nil
}
// getAccountInfoResponder handles the response to the GetAccountInfo request.
func (client blobClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &BlobGetAccountInfoResponse{rawResponse: resp.Response()}, err
}
// GetProperties the Get Properties operation returns all user-defined metadata, standard HTTP properties, and system
// properties for the blob. It does not return the content of the blob.
//
@@ -942,10 +976,7 @@ func (client blobClient) SetMetadata(ctx context.Context, timeout *int32, metada
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
{targetValue: metadata,
constraints: []constraint{{target: "metadata", name: null, rule: false,
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.setMetadataPreparer(timeout, metadata, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
@@ -1089,20 +1120,16 @@ func (client blobClient) setTierResponder(resp pipeline.Response) (pipeline.Resp
// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
// without a matching value. leaseID is if specified, the operation only succeeds if the container's lease is active
// and matches this ID. sourceLeaseID is specify this header to perform the operation only if the lease ID given
// matches the active lease ID of the source blob. requestID is provides a client-generated, opaque value with a 1 KB
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatches *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, leaseID *string, sourceLeaseID *string, requestID *string) (*BlobStartCopyFromURLResponse, error) {
// and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
// recorded in the analytics logs when storage analytics logging is enabled.
func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatches *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobStartCopyFromURLResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
{targetValue: metadata,
constraints: []constraint{{target: "metadata", name: null, rule: false,
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.startCopyFromURLPreparer(copySource, timeout, metadata, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatches, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, leaseID, sourceLeaseID, requestID)
req, err := client.startCopyFromURLPreparer(copySource, timeout, metadata, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatches, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, leaseID, requestID)
if err != nil {
return nil, err
}
@@ -1114,7 +1141,7 @@ func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string
}
// startCopyFromURLPreparer prepares the StartCopyFromURL request.
func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatches *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, leaseID *string, sourceLeaseID *string, requestID *string) (pipeline.Request, error) {
func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatches *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@@ -1157,9 +1184,6 @@ func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *in
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if sourceLeaseID != nil {
req.Header.Set("x-ms-source-lease-id", *sourceLeaseID)
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)

Some files were not shown because too many files have changed in this diff Show More