mirror of
https://github.com/rclone/rclone.git
synced 2025-12-15 07:43:35 +00:00
Compare commits
1 Commits
fix-4293-v
...
fix-vfs-vd
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
394a4b0afe |
22
.github/workflows/build.yml
vendored
22
.github/workflows/build.yml
vendored
@@ -19,13 +19,13 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'race', 'go1.11', 'go1.12', 'go1.13']
|
||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'modules_race', 'go1.11', 'go1.12', 'go1.13']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.14.x'
|
||||
modules: 'on'
|
||||
modules: 'off'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -35,7 +35,7 @@ jobs:
|
||||
- job_name: mac
|
||||
os: macOS-latest
|
||||
go: '1.14.x'
|
||||
modules: 'on'
|
||||
modules: 'off'
|
||||
gotags: '' # cmount doesn't work on osx travis for some reason
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
- job_name: windows_amd64
|
||||
os: windows-latest
|
||||
go: '1.14.x'
|
||||
modules: 'on'
|
||||
modules: 'off'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -55,7 +55,7 @@ jobs:
|
||||
- job_name: windows_386
|
||||
os: windows-latest
|
||||
go: '1.14.x'
|
||||
modules: 'on'
|
||||
modules: 'off'
|
||||
gotags: cmount
|
||||
goarch: '386'
|
||||
cgo: '1'
|
||||
@@ -66,12 +66,12 @@ jobs:
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.14.x'
|
||||
modules: 'on'
|
||||
modules: 'off'
|
||||
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: race
|
||||
- job_name: modules_race
|
||||
os: ubuntu-latest
|
||||
go: '1.14.x'
|
||||
modules: 'on'
|
||||
@@ -81,19 +81,19 @@ jobs:
|
||||
- job_name: go1.11
|
||||
os: ubuntu-latest
|
||||
go: '1.11.x'
|
||||
modules: 'on'
|
||||
modules: 'off'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.12
|
||||
os: ubuntu-latest
|
||||
go: '1.12.x'
|
||||
modules: 'on'
|
||||
modules: 'off'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.13
|
||||
os: ubuntu-latest
|
||||
go: '1.13.x'
|
||||
modules: 'on'
|
||||
modules: 'off'
|
||||
quicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
@@ -244,7 +244,7 @@ jobs:
|
||||
- name: Build rclone
|
||||
run: |
|
||||
docker pull golang
|
||||
docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=mod -v
|
||||
docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=vendor -v
|
||||
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
|
||||
@@ -186,6 +186,7 @@ with modules beneath.
|
||||
* pacer - retries with backoff and paces operations
|
||||
* readers - a selection of useful io.Readers
|
||||
* rest - a thin abstraction over net/http for REST
|
||||
* vendor - 3rd party code managed by `go mod`
|
||||
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
||||
|
||||
## Writing Documentation ##
|
||||
@@ -265,25 +266,41 @@ rclone uses the [go
|
||||
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
||||
support in go1.11 and later to manage its dependencies.
|
||||
|
||||
rclone can be built with modules outside of the GOPATH
|
||||
**NB** you must be using go1.11 or above to add a dependency to
|
||||
rclone. Rclone will still build with older versions of go, but we use
|
||||
the `go mod` command for dependencies which is only in go1.11 and
|
||||
above.
|
||||
|
||||
rclone can be built with modules outside of the GOPATH, but for
|
||||
backwards compatibility with older go versions, rclone also maintains
|
||||
a `vendor` directory with all the external code rclone needs for
|
||||
building.
|
||||
|
||||
The `vendor` directory is entirely managed by the `go mod` tool, do
|
||||
not add things manually.
|
||||
|
||||
To add a dependency `github.com/ncw/new_dependency` see the
|
||||
instructions below. These will fetch the dependency and add it to
|
||||
`go.mod` and `go.sum`.
|
||||
instructions below. These will fetch the dependency, add it to
|
||||
`go.mod` and `go.sum` and vendor it for older go versions.
|
||||
|
||||
GO111MODULE=on go get github.com/ncw/new_dependency
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
You can add constraints on that package when doing `go get` (see the
|
||||
go docs linked above), but don't unless you really need to.
|
||||
|
||||
Please check in the changes generated by `go mod` including `go.mod`
|
||||
and `go.sum` in the same commit as your other changes.
|
||||
Please check in the changes generated by `go mod` including the
|
||||
`vendor` directory and `go.mod` and `go.sum` in a single commit
|
||||
separate from any other code changes with the title "vendor: add
|
||||
github.com/ncw/new_dependency". Remember to `git add` any new files
|
||||
in `vendor`.
|
||||
|
||||
## Updating a dependency ##
|
||||
|
||||
If you need to update a dependency then run
|
||||
|
||||
GO111MODULE=on go get -u github.com/pkg/errors
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
Check in a single commit as above.
|
||||
|
||||
|
||||
12
Makefile
12
Makefile
@@ -27,7 +27,7 @@ ifndef RELEASE_TAG
|
||||
TAG := $(TAG)-beta
|
||||
endif
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_FILES := $(shell go list ./... )
|
||||
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||
ifdef BETA_SUBDIR
|
||||
BETA_SUBDIR := /$(BETA_SUBDIR)
|
||||
endif
|
||||
@@ -100,19 +100,15 @@ release_dep_windows:
|
||||
GO111MODULE=off GOOS="" GOARCH="" go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo
|
||||
|
||||
# Update dependencies
|
||||
showupdates:
|
||||
@echo "*** Direct dependencies that could be updated ***"
|
||||
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
||||
|
||||
# Update direct and indirect dependencies and test dependencies
|
||||
update:
|
||||
GO111MODULE=on go get -u -t ./...
|
||||
-#GO111MODULE=on go get -d $(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||
GO111MODULE=on go get -u ./...
|
||||
GO111MODULE=on go mod tidy
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
# Tidy the module dependencies
|
||||
tidy:
|
||||
GO111MODULE=on go mod tidy
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ This file describes how to make the various kinds of releases
|
||||
* make startdev
|
||||
* # announce with forum post, twitter post, patreon post
|
||||
|
||||
Early in the next release cycle update the dependencies
|
||||
Early in the next release cycle update the vendored dependencies
|
||||
|
||||
* Review any pinned packages in go.mod and remove if possible
|
||||
* make update
|
||||
@@ -53,6 +53,7 @@ Can be fixed with
|
||||
|
||||
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
|
||||
* GO111MODULE=on go mod tidy
|
||||
* GO111MODULE=on go mod vendor
|
||||
|
||||
|
||||
## Making a point release
|
||||
|
||||
@@ -8,8 +8,6 @@ import (
|
||||
"net/textproto"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -162,43 +160,6 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Enable debugging output
|
||||
type debugLog struct {
|
||||
mu sync.Mutex
|
||||
auth bool
|
||||
}
|
||||
|
||||
// Write writes len(p) bytes from p to the underlying data stream. It returns
|
||||
// the number of bytes written from p (0 <= n <= len(p)) and any error
|
||||
// encountered that caused the write to stop early. Write must return a non-nil
|
||||
// error if it returns n < len(p). Write must not modify the slice data, even
|
||||
// temporarily.
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
//
|
||||
// This writes debug info to the log
|
||||
func (dl *debugLog) Write(p []byte) (n int, err error) {
|
||||
dl.mu.Lock()
|
||||
defer dl.mu.Unlock()
|
||||
_, file, _, ok := runtime.Caller(1)
|
||||
direction := "FTP Rx"
|
||||
if ok && strings.Contains(file, "multi") {
|
||||
direction = "FTP Tx"
|
||||
}
|
||||
lines := strings.Split(string(p), "\r\n")
|
||||
if lines[len(lines)-1] == "" {
|
||||
lines = lines[:len(lines)-1]
|
||||
}
|
||||
for _, line := range lines {
|
||||
if !dl.auth && strings.HasPrefix(line, "PASS") {
|
||||
fs.Debugf(direction, "PASS *****")
|
||||
continue
|
||||
}
|
||||
fs.Debugf(direction, "%q", line)
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Open a new connection to the FTP server.
|
||||
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
||||
fs.Debugf(f, "Connecting to FTP server")
|
||||
@@ -222,9 +183,6 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
||||
if f.opt.DisableEPSV {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
||||
}
|
||||
if fs.Config.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: fs.Config.Dump&fs.DumpAuth != 0}))
|
||||
}
|
||||
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
|
||||
|
||||
@@ -2,128 +2,24 @@ package check
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
download = false
|
||||
oneway = false
|
||||
combined = ""
|
||||
missingOnSrc = ""
|
||||
missingOnDst = ""
|
||||
match = ""
|
||||
differ = ""
|
||||
errFile = ""
|
||||
download = false
|
||||
oneway = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
AddFlags(cmdFlags)
|
||||
}
|
||||
|
||||
// AddFlags adds the check flags to the cmdFlags command
|
||||
func AddFlags(cmdFlags *pflag.FlagSet) {
|
||||
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash.")
|
||||
flags.BoolVarP(cmdFlags, &oneway, "one-way", "", oneway, "Check one way only, source files must exist on remote")
|
||||
flags.StringVarP(cmdFlags, &combined, "combined", "", combined, "Make a combined report of changes to this file")
|
||||
flags.StringVarP(cmdFlags, &missingOnSrc, "missing-on-src", "", missingOnSrc, "Report all files missing from the source to this file")
|
||||
flags.StringVarP(cmdFlags, &missingOnDst, "missing-on-dst", "", missingOnDst, "Report all files missing from the destination to this file")
|
||||
flags.StringVarP(cmdFlags, &match, "match", "", match, "Report all matching files to this file")
|
||||
flags.StringVarP(cmdFlags, &differ, "differ", "", differ, "Report all non-matching files to this file")
|
||||
flags.StringVarP(cmdFlags, &errFile, "error", "", errFile, "Report all files with errors (hashing or reading) to this file")
|
||||
}
|
||||
|
||||
// FlagsHelp describes the flags for the help
|
||||
var FlagsHelp = strings.Replace(`
|
||||
If you supply the |--one-way| flag, it will only check that files in
|
||||
the source match the files in the destination, not the other way
|
||||
around. This means that extra files in the destination that are not in
|
||||
the source will not be detected.
|
||||
|
||||
The |--differ|, |--missing-on-dst|, |--missing-on-src|, |--src-only|
|
||||
and |--error| flags write paths, one per line, to the file name (or
|
||||
stdout if it is |-|) supplied. What they write is described in the
|
||||
help below. For example |--differ| will write all paths which are
|
||||
present on both the source and destination but different.
|
||||
|
||||
The |--combined| flag will write a file (or stdout) which contains all
|
||||
file paths with a symbol and then a space and then the path to tell
|
||||
you what happened to it. These are reminiscent of diff files.
|
||||
|
||||
- |= path| means path was found in source and destination and was identical
|
||||
- |- path| means path was missing on the source, so only in the destination
|
||||
- |+ path| means path was missing on the destination, so only in the source
|
||||
- |* path| means path was present in source and destination but different.
|
||||
- |! path| means there was an error reading or hashing the source or dest.
|
||||
`, "|", "`", -1)
|
||||
|
||||
// GetCheckOpt gets the options corresponding to the check flags
|
||||
func GetCheckOpt(fsrc, fdst fs.Fs) (opt *operations.CheckOpt, close func(), err error) {
|
||||
closers := []io.Closer{}
|
||||
|
||||
opt = &operations.CheckOpt{
|
||||
Fsrc: fsrc,
|
||||
Fdst: fdst,
|
||||
OneWay: oneway,
|
||||
}
|
||||
|
||||
open := func(name string, pout *io.Writer) error {
|
||||
if name == "" {
|
||||
return nil
|
||||
}
|
||||
if name == "-" {
|
||||
*pout = os.Stdout
|
||||
return nil
|
||||
}
|
||||
out, err := os.Create(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*pout = out
|
||||
closers = append(closers, out)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = open(combined, &opt.Combined); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err = open(missingOnSrc, &opt.MissingOnSrc); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err = open(missingOnDst, &opt.MissingOnDst); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err = open(match, &opt.Match); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err = open(differ, &opt.Differ); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err = open(errFile, &opt.Error); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
close = func() {
|
||||
for _, closer := range closers {
|
||||
err := closer.Close()
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to close report output: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return opt, close, nil
|
||||
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -141,20 +37,19 @@ If you supply the --download flag, it will download the data from
|
||||
both remotes and check them against each other on the fly. This can
|
||||
be useful for remotes that don't support hashes or if you really want
|
||||
to check all the data.
|
||||
` + FlagsHelp,
|
||||
|
||||
If you supply the --one-way flag, it will only check that files in source
|
||||
match the files in destination, not the other way around. Meaning extra files in
|
||||
destination that are not in the source will not trigger an error.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||
cmd.Run(false, true, command, func() error {
|
||||
opt, close, err := GetCheckOpt(fsrc, fdst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer close()
|
||||
if download {
|
||||
return operations.CheckDownload(context.Background(), opt)
|
||||
return operations.CheckDownload(context.Background(), fdst, fsrc, oneway)
|
||||
}
|
||||
return operations.Check(context.Background(), opt)
|
||||
return operations.Check(context.Background(), fdst, fsrc, oneway)
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
@@ -6,17 +6,22 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/crypt"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/check"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
oneway = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlag := commandDefinition.Flags()
|
||||
check.AddFlags(cmdFlag)
|
||||
flags.BoolVarP(cmdFlag, &oneway, "one-way", "", oneway, "Check one way only, source files must exist on destination")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -45,7 +50,11 @@ the files in remote:path.
|
||||
rclone cryptcheck remote:path encryptedremote:path
|
||||
|
||||
After it has run it will log the status of the encryptedremote:.
|
||||
` + check.FlagsHelp,
|
||||
|
||||
If you supply the --one-way flag, it will only check that files in source
|
||||
match the files in destination, not the other way around. Meaning extra files in
|
||||
destination that are not in the source will not trigger an error.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||
@@ -70,40 +79,39 @@ func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
|
||||
}
|
||||
fs.Infof(nil, "Using %v for hash comparisons", hashType)
|
||||
|
||||
opt, close, err := check.GetCheckOpt(fsrc, fcrypt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer close()
|
||||
|
||||
// checkIdentical checks to see if dst and src are identical
|
||||
//
|
||||
// it returns true if differences were found
|
||||
// it also returns whether it couldn't be hashed
|
||||
opt.Check = func(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
||||
checkIdentical := func(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool) {
|
||||
cryptDst := dst.(*crypt.Object)
|
||||
underlyingDst := cryptDst.UnWrap()
|
||||
underlyingHash, err := underlyingDst.Hash(ctx, hashType)
|
||||
if err != nil {
|
||||
return true, false, errors.Wrapf(err, "error reading hash from underlying %v", underlyingDst)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(dst, "Error reading hash from underlying %v: %v", underlyingDst, err)
|
||||
return true, false
|
||||
}
|
||||
if underlyingHash == "" {
|
||||
return false, true, nil
|
||||
return false, true
|
||||
}
|
||||
cryptHash, err := fcrypt.ComputeHash(ctx, cryptDst, src, hashType)
|
||||
if err != nil {
|
||||
return true, false, errors.Wrap(err, "error computing hash")
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(dst, "Error computing hash: %v", err)
|
||||
return true, false
|
||||
}
|
||||
if cryptHash == "" {
|
||||
return false, true, nil
|
||||
return false, true
|
||||
}
|
||||
if cryptHash != underlyingHash {
|
||||
err = errors.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(src, err.Error())
|
||||
return true, false, nil
|
||||
return true, false
|
||||
}
|
||||
return false, false, nil
|
||||
return false, false
|
||||
}
|
||||
|
||||
return operations.CheckFn(ctx, opt)
|
||||
return operations.CheckFn(ctx, fcrypt, fsrc, checkIdentical, oneway)
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
ftp "goftp.io/server/core"
|
||||
ftp "goftp.io/server"
|
||||
)
|
||||
|
||||
// Options contains options for the http Server
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/stretchr/testify/assert"
|
||||
ftp "goftp.io/server/core"
|
||||
ftp "goftp.io/server"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -392,6 +392,3 @@ put them back in again.` >}}
|
||||
* Kai Lüke <kai@kinvolk.io>
|
||||
* Garrett Squire <github@garrettsquire.com>
|
||||
* Evan Harris <eharris@puremagic.com>
|
||||
* Kevin <keyam@microsoft.com>
|
||||
* Morten Linderud <morten@linderud.pw>
|
||||
* Dmitry Ustalov <dmitry.ustalov@gmail.com>
|
||||
|
||||
@@ -1197,10 +1197,3 @@ for rclone to be able to get its token-id (but as this only happens during
|
||||
the remote configuration, it's not such a big deal).
|
||||
|
||||
(Thanks to @balazer on github for these instructions.)
|
||||
|
||||
Sometimes, creation of an OAuth consent in Google API Console fails due to an error message
|
||||
“The request failed because changes to one of the field of the resource is not supported”.
|
||||
As a convenient workaround, the necessary Google Drive API key can be created on the
|
||||
[Python Quickstart](https://developers.google.com/drive/api/v3/quickstart/python) page.
|
||||
Just push the Enable the Drive API button to receive the Client ID and Secret.
|
||||
Note that it will automatically create a new project in the API Console.
|
||||
|
||||
@@ -10,10 +10,6 @@ FTP is the File Transfer Protocol. FTP support is provided using the
|
||||
[github.com/jlaffaye/ftp](https://godoc.org/github.com/jlaffaye/ftp)
|
||||
package.
|
||||
|
||||
Paths are specified as `remote:path`. If the path does not begin with
|
||||
a `/` it is relative to the home directory of the user. An empty path
|
||||
`remote:` refers to the user's home directory.
|
||||
|
||||
Here is an example of making an FTP configuration. First run
|
||||
|
||||
rclone config
|
||||
@@ -257,9 +253,8 @@ See: the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
Note that FTP does have its own implementation of : `--dump headers`,
|
||||
`--dump bodies`, `--dump auth` for debugging which isn't the same as
|
||||
the HTTP based backends - it has less fine grained control.
|
||||
Note that since FTP isn't HTTP based the following flags don't work
|
||||
with it: `--dump-headers`, `--dump-bodies`, `--dump-auth`
|
||||
|
||||
Note that `--timeout` isn't supported (but `--contimeout` is).
|
||||
|
||||
|
||||
@@ -174,7 +174,7 @@ kill %1
|
||||
|
||||
## Install from source ##
|
||||
|
||||
Make sure you have at least [Go](https://golang.org/) 1.11
|
||||
Make sure you have at least [Go](https://golang.org/) 1.10
|
||||
installed. [Download go](https://golang.org/dl/) if necessary. The
|
||||
latest release is recommended. Then
|
||||
|
||||
@@ -183,14 +183,16 @@ latest release is recommended. Then
|
||||
go build
|
||||
./rclone version
|
||||
|
||||
This will leave you a checked out version of rclone you can modify.
|
||||
|
||||
You can also build rclone with:
|
||||
You can also build and install rclone in the
|
||||
[GOPATH](https://github.com/golang/go/wiki/GOPATH) (which defaults to
|
||||
`~/go`) with:
|
||||
|
||||
go get -u -v github.com/rclone/rclone
|
||||
|
||||
and this will build the binary in `$GOPATH/bin` (`~/go/bin/rclone` by
|
||||
default) after downloading the source to the go module cache..
|
||||
default) after downloading the source to
|
||||
`$GOPATH/src/github.com/rclone/rclone` (`~/go/src/github.com/rclone/rclone`
|
||||
by default).
|
||||
|
||||
## Installation with Ansible ##
|
||||
|
||||
|
||||
@@ -53,14 +53,6 @@ Users are advised to use social media platforms wisely and communicate / engage
|
||||
|
||||
This website may use social sharing buttons which help share web content directly from web pages to the social media platform in question. Users are advised before using such social sharing buttons that they do so at their own discretion and note that the social media platform may track and save your request to share a web page respectively through your social media platform account.
|
||||
|
||||
## Use of Cloud API User Data ##
|
||||
|
||||
Rclone is a command line program to manage files on cloud storage. Its sole purpose is to access and manipulate user content in the [supported](/overview/) cloud storage systems from a local machine of the end user. For accessing the user content via the cloud provider API, Rclone uses authentication mechanisms, such as OAuth or HTTP Cookies, depending on the particular cloud provider offerings. Use of these authentication mechanisms and user data is governed by the privacy policies mentioned in the [Resources & Further Information](/privacy/#resources-further-information) section and followed by the privacy policy of Rclone.
|
||||
|
||||
* Rclone provides the end user with access to their files available in a storage system associated by the authentication credentials via the publicly exposed API of the storage system.
|
||||
* Rclone allows storing the authentication credentials on the user machine in the local configuration file.
|
||||
* Rclone does not share any user data with third parties.
|
||||
|
||||
## Resources & Further Information ##
|
||||
|
||||
* [Data Protection Act 1998](http://www.legislation.gov.uk/ukpga/1998/29/contents)
|
||||
@@ -69,5 +61,4 @@ Rclone is a command line program to manage files on cloud storage. Its sole purp
|
||||
* [Twitter Privacy Policy](https://twitter.com/privacy)
|
||||
* [Facebook Privacy Policy](https://www.facebook.com/about/privacy/)
|
||||
* [Google Privacy Policy](https://www.google.com/privacy.html)
|
||||
* [Google API Services User Data Policy](https://developers.google.com/terms/api-services-user-data-policy)
|
||||
* [Sample Website Privacy Policy](http://www.jamieking.co.uk/resources/free_sample_privacy_policy.html)
|
||||
|
||||
@@ -104,8 +104,8 @@ Returns the following values:
|
||||
"eta": estimated time in seconds until file transfer completion
|
||||
"name": name of the file,
|
||||
"percentage": progress of the file transfer in percent,
|
||||
"speed": average speed over the whole transfer in bytes/sec,
|
||||
"speedAvg": current speed in bytes/sec as an exponentially weighted moving average,
|
||||
"speed": speed in bytes/sec,
|
||||
"speedAvg": speed in bytes/sec as an exponentially weighted moving average,
|
||||
"size": size of the file in bytes
|
||||
}
|
||||
],
|
||||
|
||||
@@ -1,350 +0,0 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/march"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
// checkFn is the type of the checking function used in CheckFn()
|
||||
//
|
||||
// It should check the two objects (a, b) and return if they differ
|
||||
// and whether the hash was used.
|
||||
//
|
||||
// If there are differences then this should Errorf the difference and
|
||||
// the reason but return with err = nil. It should not CountError in
|
||||
// this case.
|
||||
type checkFn func(ctx context.Context, a, b fs.Object) (differ bool, noHash bool, err error)
|
||||
|
||||
// CheckOpt contains options for the Check functions
|
||||
type CheckOpt struct {
|
||||
Fdst, Fsrc fs.Fs // fses to check
|
||||
Check checkFn // function to use for checking
|
||||
OneWay bool // one way only?
|
||||
Combined io.Writer // a file with file names with leading sigils
|
||||
MissingOnSrc io.Writer // files only in the destination
|
||||
MissingOnDst io.Writer // files only in the source
|
||||
Match io.Writer // matching files
|
||||
Differ io.Writer // differing files
|
||||
Error io.Writer // files with errors of some kind
|
||||
}
|
||||
|
||||
// checkMarch is used to march over two Fses in the same way as
|
||||
// sync/copy
|
||||
type checkMarch struct {
|
||||
ioMu sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
tokens chan struct{}
|
||||
differences int32
|
||||
noHashes int32
|
||||
srcFilesMissing int32
|
||||
dstFilesMissing int32
|
||||
matches int32
|
||||
opt CheckOpt
|
||||
}
|
||||
|
||||
// report outputs the fileName to out if required and to the combined log
|
||||
func (c *checkMarch) report(o fs.DirEntry, out io.Writer, sigil rune) {
|
||||
if out != nil {
|
||||
c.ioMu.Lock()
|
||||
_, _ = fmt.Fprintf(out, "%v\n", o)
|
||||
c.ioMu.Unlock()
|
||||
}
|
||||
if c.opt.Combined != nil {
|
||||
c.ioMu.Lock()
|
||||
_, _ = fmt.Fprintf(c.opt.Combined, "%c %v\n", sigil, o)
|
||||
c.ioMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// DstOnly have an object which is in the destination only
|
||||
func (c *checkMarch) DstOnly(dst fs.DirEntry) (recurse bool) {
|
||||
switch dst.(type) {
|
||||
case fs.Object:
|
||||
if c.opt.OneWay {
|
||||
return false
|
||||
}
|
||||
err := errors.Errorf("File not in %v", c.opt.Fsrc)
|
||||
fs.Errorf(dst, "%v", err)
|
||||
_ = fs.CountError(err)
|
||||
atomic.AddInt32(&c.differences, 1)
|
||||
atomic.AddInt32(&c.srcFilesMissing, 1)
|
||||
c.report(dst, c.opt.MissingOnSrc, '-')
|
||||
case fs.Directory:
|
||||
// Do the same thing to the entire contents of the directory
|
||||
if c.opt.OneWay {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
default:
|
||||
panic("Bad object in DirEntries")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// SrcOnly have an object which is in the source only
|
||||
func (c *checkMarch) SrcOnly(src fs.DirEntry) (recurse bool) {
|
||||
switch src.(type) {
|
||||
case fs.Object:
|
||||
err := errors.Errorf("File not in %v", c.opt.Fdst)
|
||||
fs.Errorf(src, "%v", err)
|
||||
_ = fs.CountError(err)
|
||||
atomic.AddInt32(&c.differences, 1)
|
||||
atomic.AddInt32(&c.dstFilesMissing, 1)
|
||||
c.report(src, c.opt.MissingOnDst, '+')
|
||||
case fs.Directory:
|
||||
// Do the same thing to the entire contents of the directory
|
||||
return true
|
||||
default:
|
||||
panic("Bad object in DirEntries")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// check to see if two objects are identical using the check function
|
||||
func (c *checkMarch) checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(src)
|
||||
defer func() {
|
||||
tr.Done(err)
|
||||
}()
|
||||
if sizeDiffers(src, dst) {
|
||||
err = errors.Errorf("Sizes differ")
|
||||
fs.Errorf(src, "%v", err)
|
||||
return true, false, nil
|
||||
}
|
||||
if fs.Config.SizeOnly {
|
||||
return false, false, nil
|
||||
}
|
||||
return c.opt.Check(ctx, dst, src)
|
||||
}
|
||||
|
||||
// Match is called when src and dst are present, so sync src to dst
|
||||
func (c *checkMarch) Match(ctx context.Context, dst, src fs.DirEntry) (recurse bool) {
|
||||
switch srcX := src.(type) {
|
||||
case fs.Object:
|
||||
dstX, ok := dst.(fs.Object)
|
||||
if ok {
|
||||
if SkipDestructive(ctx, src, "check") {
|
||||
return false
|
||||
}
|
||||
c.wg.Add(1)
|
||||
c.tokens <- struct{}{} // put a token to limit concurrency
|
||||
go func() {
|
||||
defer func() {
|
||||
<-c.tokens // get the token back to free up a slot
|
||||
c.wg.Done()
|
||||
}()
|
||||
differ, noHash, err := c.checkIdentical(ctx, dstX, srcX)
|
||||
if err != nil {
|
||||
fs.Errorf(src, "%v", err)
|
||||
_ = fs.CountError(err)
|
||||
c.report(src, c.opt.Error, '!')
|
||||
} else if differ {
|
||||
atomic.AddInt32(&c.differences, 1)
|
||||
err := errors.New("files differ")
|
||||
// the checkFn has already logged the reason
|
||||
_ = fs.CountError(err)
|
||||
c.report(src, c.opt.Differ, '*')
|
||||
} else {
|
||||
atomic.AddInt32(&c.matches, 1)
|
||||
c.report(src, c.opt.Match, '=')
|
||||
if noHash {
|
||||
atomic.AddInt32(&c.noHashes, 1)
|
||||
fs.Debugf(dstX, "OK - could not check hash")
|
||||
} else {
|
||||
fs.Debugf(dstX, "OK")
|
||||
}
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
err := errors.Errorf("is file on %v but directory on %v", c.opt.Fsrc, c.opt.Fdst)
|
||||
fs.Errorf(src, "%v", err)
|
||||
_ = fs.CountError(err)
|
||||
atomic.AddInt32(&c.differences, 1)
|
||||
atomic.AddInt32(&c.dstFilesMissing, 1)
|
||||
c.report(src, c.opt.MissingOnDst, '+')
|
||||
}
|
||||
case fs.Directory:
|
||||
// Do the same thing to the entire contents of the directory
|
||||
_, ok := dst.(fs.Directory)
|
||||
if ok {
|
||||
return true
|
||||
}
|
||||
err := errors.Errorf("is file on %v but directory on %v", c.opt.Fdst, c.opt.Fsrc)
|
||||
fs.Errorf(dst, "%v", err)
|
||||
_ = fs.CountError(err)
|
||||
atomic.AddInt32(&c.differences, 1)
|
||||
atomic.AddInt32(&c.srcFilesMissing, 1)
|
||||
c.report(dst, c.opt.MissingOnSrc, '-')
|
||||
|
||||
default:
|
||||
panic("Bad object in DirEntries")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CheckFn checks the files in fsrc and fdst according to Size and
|
||||
// hash using checkFunction on each file to check the hashes.
|
||||
//
|
||||
// checkFunction sees if dst and src are identical
|
||||
//
|
||||
// it returns true if differences were found
|
||||
// it also returns whether it couldn't be hashed
|
||||
func CheckFn(ctx context.Context, opt *CheckOpt) error {
|
||||
if opt.Check == nil {
|
||||
return errors.New("internal error: nil check function")
|
||||
}
|
||||
c := &checkMarch{
|
||||
tokens: make(chan struct{}, fs.Config.Checkers),
|
||||
opt: *opt,
|
||||
}
|
||||
|
||||
// set up a march over fdst and fsrc
|
||||
m := &march.March{
|
||||
Ctx: ctx,
|
||||
Fdst: c.opt.Fdst,
|
||||
Fsrc: c.opt.Fsrc,
|
||||
Dir: "",
|
||||
Callback: c,
|
||||
}
|
||||
fs.Debugf(c.opt.Fdst, "Waiting for checks to finish")
|
||||
err := m.Run()
|
||||
c.wg.Wait() // wait for background go-routines
|
||||
|
||||
if c.dstFilesMissing > 0 {
|
||||
fs.Logf(c.opt.Fdst, "%d files missing", c.dstFilesMissing)
|
||||
}
|
||||
if c.srcFilesMissing > 0 {
|
||||
fs.Logf(c.opt.Fsrc, "%d files missing", c.srcFilesMissing)
|
||||
}
|
||||
|
||||
fs.Logf(c.opt.Fdst, "%d differences found", accounting.Stats(ctx).GetErrors())
|
||||
if errs := accounting.Stats(ctx).GetErrors(); errs > 0 {
|
||||
fs.Logf(c.opt.Fdst, "%d errors while checking", errs)
|
||||
}
|
||||
if c.noHashes > 0 {
|
||||
fs.Logf(c.opt.Fdst, "%d hashes could not be checked", c.noHashes)
|
||||
}
|
||||
if c.matches > 0 {
|
||||
fs.Logf(c.opt.Fdst, "%d matching files", c.matches)
|
||||
}
|
||||
if c.differences > 0 {
|
||||
return errors.Errorf("%d differences found", c.differences)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the files in fsrc and fdst according to Size and hash
|
||||
func Check(ctx context.Context, opt *CheckOpt) error {
|
||||
optCopy := *opt
|
||||
optCopy.Check = func(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
||||
same, ht, err := CheckHashes(ctx, src, dst)
|
||||
if err != nil {
|
||||
return true, false, err
|
||||
}
|
||||
if ht == hash.None {
|
||||
return false, true, nil
|
||||
}
|
||||
if !same {
|
||||
err = errors.Errorf("%v differ", ht)
|
||||
fs.Errorf(src, "%v", err)
|
||||
return true, false, nil
|
||||
}
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
return CheckFn(ctx, &optCopy)
|
||||
}
|
||||
|
||||
// CheckEqualReaders checks to see if in1 and in2 have the same
|
||||
// content when read.
|
||||
//
|
||||
// it returns true if differences were found
|
||||
func CheckEqualReaders(in1, in2 io.Reader) (differ bool, err error) {
|
||||
const bufSize = 64 * 1024
|
||||
buf1 := make([]byte, bufSize)
|
||||
buf2 := make([]byte, bufSize)
|
||||
for {
|
||||
n1, err1 := readers.ReadFill(in1, buf1)
|
||||
n2, err2 := readers.ReadFill(in2, buf2)
|
||||
// check errors
|
||||
if err1 != nil && err1 != io.EOF {
|
||||
return true, err1
|
||||
} else if err2 != nil && err2 != io.EOF {
|
||||
return true, err2
|
||||
}
|
||||
// err1 && err2 are nil or io.EOF here
|
||||
// process the data
|
||||
if n1 != n2 || !bytes.Equal(buf1[:n1], buf2[:n2]) {
|
||||
return true, nil
|
||||
}
|
||||
// if both streams finished the we have finished
|
||||
if err1 == io.EOF && err2 == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// CheckIdenticalDownload checks to see if dst and src are identical
|
||||
// by reading all their bytes if necessary.
|
||||
//
|
||||
// it returns true if differences were found
|
||||
func CheckIdenticalDownload(ctx context.Context, dst, src fs.Object) (differ bool, err error) {
|
||||
err = Retry(src, fs.Config.LowLevelRetries, func() error {
|
||||
differ, err = checkIdenticalDownload(ctx, dst, src)
|
||||
return err
|
||||
})
|
||||
return differ, err
|
||||
}
|
||||
|
||||
// Does the work for CheckIdenticalDownload
|
||||
func checkIdenticalDownload(ctx context.Context, dst, src fs.Object) (differ bool, err error) {
|
||||
in1, err := dst.Open(ctx)
|
||||
if err != nil {
|
||||
return true, errors.Wrapf(err, "failed to open %q", dst)
|
||||
}
|
||||
tr1 := accounting.Stats(ctx).NewTransfer(dst)
|
||||
defer func() {
|
||||
tr1.Done(nil) // error handling is done by the caller
|
||||
}()
|
||||
in1 = tr1.Account(in1).WithBuffer() // account and buffer the transfer
|
||||
|
||||
in2, err := src.Open(ctx)
|
||||
if err != nil {
|
||||
return true, errors.Wrapf(err, "failed to open %q", src)
|
||||
}
|
||||
tr2 := accounting.Stats(ctx).NewTransfer(dst)
|
||||
defer func() {
|
||||
tr2.Done(nil) // error handling is done by the caller
|
||||
}()
|
||||
in2 = tr2.Account(in2).WithBuffer() // account and buffer the transfer
|
||||
|
||||
// To assign err variable before defer.
|
||||
differ, err = CheckEqualReaders(in1, in2)
|
||||
return
|
||||
}
|
||||
|
||||
// CheckDownload checks the files in fsrc and fdst according to Size
|
||||
// and the actual contents of the files.
|
||||
func CheckDownload(ctx context.Context, opt *CheckOpt) error {
|
||||
optCopy := *opt
|
||||
optCopy.Check = func(ctx context.Context, a, b fs.Object) (differ bool, noHash bool, err error) {
|
||||
differ, err = CheckIdenticalDownload(ctx, a, b)
|
||||
if err != nil {
|
||||
return true, true, errors.Wrap(err, "failed to download")
|
||||
}
|
||||
return differ, false, nil
|
||||
}
|
||||
return CheckFn(ctx, &optCopy)
|
||||
}
|
||||
@@ -1,272 +0,0 @@
|
||||
package operations_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operations.CheckOpt) error) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
addBuffers := func(opt *operations.CheckOpt) {
|
||||
opt.Combined = new(bytes.Buffer)
|
||||
opt.MissingOnSrc = new(bytes.Buffer)
|
||||
opt.MissingOnDst = new(bytes.Buffer)
|
||||
opt.Match = new(bytes.Buffer)
|
||||
opt.Differ = new(bytes.Buffer)
|
||||
opt.Error = new(bytes.Buffer)
|
||||
}
|
||||
|
||||
sortLines := func(in string) []string {
|
||||
if in == "" {
|
||||
return []string{}
|
||||
}
|
||||
lines := strings.Split(in, "\n")
|
||||
sort.Strings(lines)
|
||||
return lines
|
||||
}
|
||||
|
||||
checkBuffer := func(name string, want map[string]string, out io.Writer) {
|
||||
expected := want[name]
|
||||
buf, ok := out.(*bytes.Buffer)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, sortLines(expected), sortLines(buf.String()), name)
|
||||
}
|
||||
|
||||
checkBuffers := func(opt *operations.CheckOpt, want map[string]string) {
|
||||
checkBuffer("combined", want, opt.Combined)
|
||||
checkBuffer("missingonsrc", want, opt.MissingOnSrc)
|
||||
checkBuffer("missingondst", want, opt.MissingOnDst)
|
||||
checkBuffer("match", want, opt.Match)
|
||||
checkBuffer("differ", want, opt.Differ)
|
||||
checkBuffer("error", want, opt.Error)
|
||||
}
|
||||
|
||||
check := func(i int, wantErrors int64, wantChecks int64, oneway bool, wantOutput map[string]string) {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
var buf bytes.Buffer
|
||||
log.SetOutput(&buf)
|
||||
defer func() {
|
||||
log.SetOutput(os.Stderr)
|
||||
}()
|
||||
opt := operations.CheckOpt{
|
||||
Fdst: r.Fremote,
|
||||
Fsrc: r.Flocal,
|
||||
OneWay: oneway,
|
||||
}
|
||||
addBuffers(&opt)
|
||||
err := checkFunction(context.Background(), &opt)
|
||||
gotErrors := accounting.GlobalStats().GetErrors()
|
||||
gotChecks := accounting.GlobalStats().GetChecks()
|
||||
if wantErrors == 0 && err != nil {
|
||||
t.Errorf("%d: Got error when not expecting one: %v", i, err)
|
||||
}
|
||||
if wantErrors != 0 && err == nil {
|
||||
t.Errorf("%d: No error when expecting one", i)
|
||||
}
|
||||
if wantErrors != gotErrors {
|
||||
t.Errorf("%d: Expecting %d errors but got %d", i, wantErrors, gotErrors)
|
||||
}
|
||||
if gotChecks > 0 && !strings.Contains(buf.String(), "matching files") {
|
||||
t.Errorf("%d: Total files matching line missing", i)
|
||||
}
|
||||
if wantChecks != gotChecks {
|
||||
t.Errorf("%d: Expecting %d total matching files but got %d", i, wantChecks, gotChecks)
|
||||
}
|
||||
checkBuffers(&opt, wantOutput)
|
||||
})
|
||||
}
|
||||
|
||||
file1 := r.WriteBoth(context.Background(), "rutabaga", "is tasty", t3)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
check(1, 0, 1, false, map[string]string{
|
||||
"combined": "= rutabaga\n",
|
||||
"missingonsrc": "",
|
||||
"missingondst": "",
|
||||
"match": "rutabaga\n",
|
||||
"differ": "",
|
||||
"error": "",
|
||||
})
|
||||
|
||||
file2 := r.WriteFile("potato2", "------------------------------------------------------------", t1)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
check(2, 1, 1, false, map[string]string{
|
||||
"combined": "+ potato2\n= rutabaga\n",
|
||||
"missingonsrc": "",
|
||||
"missingondst": "potato2\n",
|
||||
"match": "rutabaga\n",
|
||||
"differ": "",
|
||||
"error": "",
|
||||
})
|
||||
|
||||
file3 := r.WriteObject(context.Background(), "empty space", "-", t2)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file3)
|
||||
check(3, 2, 1, false, map[string]string{
|
||||
"combined": "- empty space\n+ potato2\n= rutabaga\n",
|
||||
"missingonsrc": "empty space\n",
|
||||
"missingondst": "potato2\n",
|
||||
"match": "rutabaga\n",
|
||||
"differ": "",
|
||||
"error": "",
|
||||
})
|
||||
|
||||
file2r := file2
|
||||
if fs.Config.SizeOnly {
|
||||
file2r = r.WriteObject(context.Background(), "potato2", "--Some-Differences-But-Size-Only-Is-Enabled-----------------", t1)
|
||||
} else {
|
||||
r.WriteObject(context.Background(), "potato2", "------------------------------------------------------------", t1)
|
||||
}
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2r, file3)
|
||||
check(4, 1, 2, false, map[string]string{
|
||||
"combined": "- empty space\n= potato2\n= rutabaga\n",
|
||||
"missingonsrc": "empty space\n",
|
||||
"missingondst": "",
|
||||
"match": "rutabaga\npotato2\n",
|
||||
"differ": "",
|
||||
"error": "",
|
||||
})
|
||||
|
||||
file3r := file3
|
||||
file3l := r.WriteFile("empty space", "DIFFER", t2)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3l)
|
||||
check(5, 1, 3, false, map[string]string{
|
||||
"combined": "* empty space\n= potato2\n= rutabaga\n",
|
||||
"missingonsrc": "",
|
||||
"missingondst": "",
|
||||
"match": "potato2\nrutabaga\n",
|
||||
"differ": "empty space\n",
|
||||
"error": "",
|
||||
})
|
||||
|
||||
file4 := r.WriteObject(context.Background(), "remotepotato", "------------------------------------------------------------", t1)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2r, file3r, file4)
|
||||
check(6, 2, 3, false, map[string]string{
|
||||
"combined": "* empty space\n= potato2\n= rutabaga\n- remotepotato\n",
|
||||
"missingonsrc": "remotepotato\n",
|
||||
"missingondst": "",
|
||||
"match": "potato2\nrutabaga\n",
|
||||
"differ": "empty space\n",
|
||||
"error": "",
|
||||
})
|
||||
check(7, 1, 3, true, map[string]string{
|
||||
"combined": "* empty space\n= potato2\n= rutabaga\n",
|
||||
"missingonsrc": "",
|
||||
"missingondst": "",
|
||||
"match": "potato2\nrutabaga\n",
|
||||
"differ": "empty space\n",
|
||||
"error": "",
|
||||
})
|
||||
}
|
||||
|
||||
func TestCheck(t *testing.T) {
|
||||
testCheck(t, operations.Check)
|
||||
}
|
||||
|
||||
func TestCheckFsError(t *testing.T) {
|
||||
dstFs, err := fs.NewFs("non-existent")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
srcFs, err := fs.NewFs("non-existent")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
opt := operations.CheckOpt{
|
||||
Fdst: dstFs,
|
||||
Fsrc: srcFs,
|
||||
OneWay: false,
|
||||
}
|
||||
err = operations.Check(context.Background(), &opt)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCheckDownload(t *testing.T) {
|
||||
testCheck(t, operations.CheckDownload)
|
||||
}
|
||||
|
||||
func TestCheckSizeOnly(t *testing.T) {
|
||||
fs.Config.SizeOnly = true
|
||||
defer func() { fs.Config.SizeOnly = false }()
|
||||
TestCheck(t)
|
||||
}
|
||||
|
||||
func TestCheckEqualReaders(t *testing.T) {
|
||||
b65a := make([]byte, 65*1024)
|
||||
b65b := make([]byte, 65*1024)
|
||||
b65b[len(b65b)-1] = 1
|
||||
b66 := make([]byte, 66*1024)
|
||||
|
||||
differ, err := operations.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b65a))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, differ, false)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b65b))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b66))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b66), bytes.NewBuffer(b65a))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
myErr := errors.New("sentinel")
|
||||
wrap := func(b []byte) io.Reader {
|
||||
r := bytes.NewBuffer(b)
|
||||
e := readers.ErrorReader{Err: myErr}
|
||||
return io.MultiReader(r, e)
|
||||
}
|
||||
|
||||
differ, err = operations.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b65a))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b65b))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b66))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(wrap(b66), bytes.NewBuffer(b65a))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b65a))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b65b))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b66))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b66), wrap(b65a))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
}
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/march"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
@@ -725,6 +726,246 @@ func SameDir(fdst, fsrc fs.Info) bool {
|
||||
return fdstRoot == fsrcRoot
|
||||
}
|
||||
|
||||
// checkIdentical checks to see if dst and src are identical
|
||||
//
|
||||
// it returns true if differences were found
|
||||
// it also returns whether it couldn't be hashed
|
||||
func checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool) {
|
||||
same, ht, err := CheckHashes(ctx, src, dst)
|
||||
if err != nil {
|
||||
// CheckHashes will log and count errors
|
||||
return true, false
|
||||
}
|
||||
if ht == hash.None {
|
||||
return false, true
|
||||
}
|
||||
if !same {
|
||||
err = errors.Errorf("%v differ", ht)
|
||||
fs.Errorf(src, "%v", err)
|
||||
_ = fs.CountError(err)
|
||||
return true, false
|
||||
}
|
||||
return false, false
|
||||
}
|
||||
|
||||
// checkFn is the type of the checking function used in CheckFn()
|
||||
type checkFn func(ctx context.Context, a, b fs.Object) (differ bool, noHash bool)
|
||||
|
||||
// checkMarch is used to march over two Fses in the same way as
|
||||
// sync/copy
|
||||
type checkMarch struct {
|
||||
fdst, fsrc fs.Fs
|
||||
check checkFn
|
||||
wg sync.WaitGroup
|
||||
tokens chan struct{}
|
||||
oneway bool
|
||||
differences int32
|
||||
noHashes int32
|
||||
srcFilesMissing int32
|
||||
dstFilesMissing int32
|
||||
matches int32
|
||||
}
|
||||
|
||||
// DstOnly have an object which is in the destination only
|
||||
func (c *checkMarch) DstOnly(dst fs.DirEntry) (recurse bool) {
|
||||
switch dst.(type) {
|
||||
case fs.Object:
|
||||
if c.oneway {
|
||||
return false
|
||||
}
|
||||
err := errors.Errorf("File not in %v", c.fsrc)
|
||||
fs.Errorf(dst, "%v", err)
|
||||
_ = fs.CountError(err)
|
||||
atomic.AddInt32(&c.differences, 1)
|
||||
atomic.AddInt32(&c.srcFilesMissing, 1)
|
||||
case fs.Directory:
|
||||
// Do the same thing to the entire contents of the directory
|
||||
if c.oneway {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
default:
|
||||
panic("Bad object in DirEntries")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// SrcOnly have an object which is in the source only
|
||||
func (c *checkMarch) SrcOnly(src fs.DirEntry) (recurse bool) {
|
||||
switch src.(type) {
|
||||
case fs.Object:
|
||||
err := errors.Errorf("File not in %v", c.fdst)
|
||||
fs.Errorf(src, "%v", err)
|
||||
_ = fs.CountError(err)
|
||||
atomic.AddInt32(&c.differences, 1)
|
||||
atomic.AddInt32(&c.dstFilesMissing, 1)
|
||||
case fs.Directory:
|
||||
// Do the same thing to the entire contents of the directory
|
||||
return true
|
||||
default:
|
||||
panic("Bad object in DirEntries")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// check to see if two objects are identical using the check function
|
||||
func (c *checkMarch) checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool) {
|
||||
var err error
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(src)
|
||||
defer func() {
|
||||
tr.Done(err)
|
||||
}()
|
||||
if sizeDiffers(src, dst) {
|
||||
err = errors.Errorf("Sizes differ")
|
||||
fs.Errorf(src, "%v", err)
|
||||
return true, false
|
||||
}
|
||||
if fs.Config.SizeOnly {
|
||||
return false, false
|
||||
}
|
||||
return c.check(ctx, dst, src)
|
||||
}
|
||||
|
||||
// Match is called when src and dst are present, so sync src to dst
|
||||
func (c *checkMarch) Match(ctx context.Context, dst, src fs.DirEntry) (recurse bool) {
|
||||
switch srcX := src.(type) {
|
||||
case fs.Object:
|
||||
dstX, ok := dst.(fs.Object)
|
||||
if ok {
|
||||
if SkipDestructive(ctx, src, "check") {
|
||||
return false
|
||||
}
|
||||
c.wg.Add(1)
|
||||
c.tokens <- struct{}{} // put a token to limit concurrency
|
||||
go func() {
|
||||
defer func() {
|
||||
<-c.tokens // get the token back to free up a slot
|
||||
c.wg.Done()
|
||||
}()
|
||||
differ, noHash := c.checkIdentical(ctx, dstX, srcX)
|
||||
if differ {
|
||||
atomic.AddInt32(&c.differences, 1)
|
||||
} else {
|
||||
atomic.AddInt32(&c.matches, 1)
|
||||
if noHash {
|
||||
atomic.AddInt32(&c.noHashes, 1)
|
||||
fs.Debugf(dstX, "OK - could not check hash")
|
||||
} else {
|
||||
fs.Debugf(dstX, "OK")
|
||||
}
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
err := errors.Errorf("is file on %v but directory on %v", c.fsrc, c.fdst)
|
||||
fs.Errorf(src, "%v", err)
|
||||
_ = fs.CountError(err)
|
||||
atomic.AddInt32(&c.differences, 1)
|
||||
atomic.AddInt32(&c.dstFilesMissing, 1)
|
||||
}
|
||||
case fs.Directory:
|
||||
// Do the same thing to the entire contents of the directory
|
||||
_, ok := dst.(fs.Directory)
|
||||
if ok {
|
||||
return true
|
||||
}
|
||||
err := errors.Errorf("is file on %v but directory on %v", c.fdst, c.fsrc)
|
||||
fs.Errorf(dst, "%v", err)
|
||||
_ = fs.CountError(err)
|
||||
atomic.AddInt32(&c.differences, 1)
|
||||
atomic.AddInt32(&c.srcFilesMissing, 1)
|
||||
|
||||
default:
|
||||
panic("Bad object in DirEntries")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CheckFn checks the files in fsrc and fdst according to Size and
|
||||
// hash using checkFunction on each file to check the hashes.
|
||||
//
|
||||
// checkFunction sees if dst and src are identical
|
||||
//
|
||||
// it returns true if differences were found
|
||||
// it also returns whether it couldn't be hashed
|
||||
func CheckFn(ctx context.Context, fdst, fsrc fs.Fs, check checkFn, oneway bool) error {
|
||||
c := &checkMarch{
|
||||
fdst: fdst,
|
||||
fsrc: fsrc,
|
||||
check: check,
|
||||
oneway: oneway,
|
||||
tokens: make(chan struct{}, fs.Config.Checkers),
|
||||
}
|
||||
|
||||
// set up a march over fdst and fsrc
|
||||
m := &march.March{
|
||||
Ctx: ctx,
|
||||
Fdst: fdst,
|
||||
Fsrc: fsrc,
|
||||
Dir: "",
|
||||
Callback: c,
|
||||
}
|
||||
fs.Debugf(fdst, "Waiting for checks to finish")
|
||||
err := m.Run()
|
||||
c.wg.Wait() // wait for background go-routines
|
||||
|
||||
if c.dstFilesMissing > 0 {
|
||||
fs.Logf(fdst, "%d files missing", c.dstFilesMissing)
|
||||
}
|
||||
if c.srcFilesMissing > 0 {
|
||||
fs.Logf(fsrc, "%d files missing", c.srcFilesMissing)
|
||||
}
|
||||
|
||||
fs.Logf(fdst, "%d differences found", c.differences)
|
||||
if errs := accounting.Stats(ctx).GetErrors(); errs > 0 {
|
||||
fs.Logf(fdst, "%d errors while checking", errs)
|
||||
}
|
||||
if c.noHashes > 0 {
|
||||
fs.Logf(fdst, "%d hashes could not be checked", c.noHashes)
|
||||
}
|
||||
if c.matches > 0 {
|
||||
fs.Logf(fdst, "%d matching files", c.matches)
|
||||
}
|
||||
if c.differences > 0 {
|
||||
return errors.Errorf("%d differences found", c.differences)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the files in fsrc and fdst according to Size and hash
|
||||
func Check(ctx context.Context, fdst, fsrc fs.Fs, oneway bool) error {
|
||||
return CheckFn(ctx, fdst, fsrc, checkIdentical, oneway)
|
||||
}
|
||||
|
||||
// CheckEqualReaders checks to see if in1 and in2 have the same
|
||||
// content when read.
|
||||
//
|
||||
// it returns true if differences were found
|
||||
func CheckEqualReaders(in1, in2 io.Reader) (differ bool, err error) {
|
||||
const bufSize = 64 * 1024
|
||||
buf1 := make([]byte, bufSize)
|
||||
buf2 := make([]byte, bufSize)
|
||||
for {
|
||||
n1, err1 := readers.ReadFill(in1, buf1)
|
||||
n2, err2 := readers.ReadFill(in2, buf2)
|
||||
// check errors
|
||||
if err1 != nil && err1 != io.EOF {
|
||||
return true, err1
|
||||
} else if err2 != nil && err2 != io.EOF {
|
||||
return true, err2
|
||||
}
|
||||
// err1 && err2 are nil or io.EOF here
|
||||
// process the data
|
||||
if n1 != n2 || !bytes.Equal(buf1[:n1], buf2[:n2]) {
|
||||
return true, nil
|
||||
}
|
||||
// if both streams finished the we have finished
|
||||
if err1 == io.EOF && err2 == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Retry runs fn up to maxTries times if it returns a retriable error
|
||||
func Retry(o interface{}, maxTries int, fn func() error) (err error) {
|
||||
for tries := 1; tries <= maxTries; tries++ {
|
||||
@@ -743,6 +984,60 @@ func Retry(o interface{}, maxTries int, fn func() error) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// CheckIdenticalDownload checks to see if dst and src are identical
|
||||
// by reading all their bytes if necessary.
|
||||
//
|
||||
// it returns true if differences were found
|
||||
func CheckIdenticalDownload(ctx context.Context, dst, src fs.Object) (differ bool, err error) {
|
||||
err = Retry(src, fs.Config.LowLevelRetries, func() error {
|
||||
differ, err = checkIdenticalDownload(ctx, dst, src)
|
||||
return err
|
||||
})
|
||||
return differ, err
|
||||
}
|
||||
|
||||
// Does the work for CheckIdenticalDownload
|
||||
func checkIdenticalDownload(ctx context.Context, dst, src fs.Object) (differ bool, err error) {
|
||||
in1, err := dst.Open(ctx)
|
||||
if err != nil {
|
||||
return true, errors.Wrapf(err, "failed to open %q", dst)
|
||||
}
|
||||
tr1 := accounting.Stats(ctx).NewTransfer(dst)
|
||||
defer func() {
|
||||
tr1.Done(nil) // error handling is done by the caller
|
||||
}()
|
||||
in1 = tr1.Account(in1).WithBuffer() // account and buffer the transfer
|
||||
|
||||
in2, err := src.Open(ctx)
|
||||
if err != nil {
|
||||
return true, errors.Wrapf(err, "failed to open %q", src)
|
||||
}
|
||||
tr2 := accounting.Stats(ctx).NewTransfer(dst)
|
||||
defer func() {
|
||||
tr2.Done(nil) // error handling is done by the caller
|
||||
}()
|
||||
in2 = tr2.Account(in2).WithBuffer() // account and buffer the transfer
|
||||
|
||||
// To assign err variable before defer.
|
||||
differ, err = CheckEqualReaders(in1, in2)
|
||||
return
|
||||
}
|
||||
|
||||
// CheckDownload checks the files in fsrc and fdst according to Size
|
||||
// and the actual contents of the files.
|
||||
func CheckDownload(ctx context.Context, fdst, fsrc fs.Fs, oneway bool) error {
|
||||
check := func(ctx context.Context, a, b fs.Object) (differ bool, noHash bool) {
|
||||
differ, err := CheckIdenticalDownload(ctx, a, b)
|
||||
if err != nil {
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(a, "Failed to download: %v", err)
|
||||
return true, true
|
||||
}
|
||||
return differ, false
|
||||
}
|
||||
return CheckFn(ctx, fdst, fsrc, check, oneway)
|
||||
}
|
||||
|
||||
// ListFn lists the Fs to the supplied function
|
||||
//
|
||||
// Lists in parallel which may get them out of order
|
||||
|
||||
@@ -22,9 +22,11 @@ package operations_test
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
@@ -43,6 +45,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -351,6 +354,98 @@ func TestRetry(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func testCheck(t *testing.T, checkFunction func(ctx context.Context, fdst, fsrc fs.Fs, oneway bool) error) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
check := func(i int, wantErrors int64, wantChecks int64, oneway bool) {
|
||||
fs.Debugf(r.Fremote, "%d: Starting check test", i)
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
var buf bytes.Buffer
|
||||
log.SetOutput(&buf)
|
||||
defer func() {
|
||||
log.SetOutput(os.Stderr)
|
||||
}()
|
||||
err := checkFunction(context.Background(), r.Fremote, r.Flocal, oneway)
|
||||
gotErrors := accounting.GlobalStats().GetErrors()
|
||||
gotChecks := accounting.GlobalStats().GetChecks()
|
||||
if wantErrors == 0 && err != nil {
|
||||
t.Errorf("%d: Got error when not expecting one: %v", i, err)
|
||||
}
|
||||
if wantErrors != 0 && err == nil {
|
||||
t.Errorf("%d: No error when expecting one", i)
|
||||
}
|
||||
if wantErrors != gotErrors {
|
||||
t.Errorf("%d: Expecting %d errors but got %d", i, wantErrors, gotErrors)
|
||||
}
|
||||
if gotChecks > 0 && !strings.Contains(buf.String(), "matching files") {
|
||||
t.Errorf("%d: Total files matching line missing", i)
|
||||
}
|
||||
if wantChecks != gotChecks {
|
||||
t.Errorf("%d: Expecting %d total matching files but got %d", i, wantChecks, gotChecks)
|
||||
}
|
||||
fs.Debugf(r.Fremote, "%d: Ending check test", i)
|
||||
}
|
||||
|
||||
file1 := r.WriteBoth(context.Background(), "rutabaga", "is tasty", t3)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
check(1, 0, 1, false)
|
||||
|
||||
file2 := r.WriteFile("potato2", "------------------------------------------------------------", t1)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
check(2, 1, 1, false)
|
||||
|
||||
file3 := r.WriteObject(context.Background(), "empty space", "-", t2)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file3)
|
||||
check(3, 2, 1, false)
|
||||
|
||||
file2r := file2
|
||||
if fs.Config.SizeOnly {
|
||||
file2r = r.WriteObject(context.Background(), "potato2", "--Some-Differences-But-Size-Only-Is-Enabled-----------------", t1)
|
||||
} else {
|
||||
r.WriteObject(context.Background(), "potato2", "------------------------------------------------------------", t1)
|
||||
}
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2r, file3)
|
||||
check(4, 1, 2, false)
|
||||
|
||||
r.WriteFile("empty space", "-", t2)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||
check(5, 0, 3, false)
|
||||
|
||||
file4 := r.WriteObject(context.Background(), "remotepotato", "------------------------------------------------------------", t1)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2r, file3, file4)
|
||||
check(6, 1, 3, false)
|
||||
check(7, 0, 3, true)
|
||||
}
|
||||
|
||||
func TestCheck(t *testing.T) {
|
||||
testCheck(t, operations.Check)
|
||||
}
|
||||
|
||||
func TestCheckFsError(t *testing.T) {
|
||||
dstFs, err := fs.NewFs("non-existent")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
srcFs, err := fs.NewFs("non-existent")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = operations.Check(context.Background(), dstFs, srcFs, false)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCheckDownload(t *testing.T) {
|
||||
testCheck(t, operations.CheckDownload)
|
||||
}
|
||||
|
||||
func TestCheckSizeOnly(t *testing.T) {
|
||||
fs.Config.SizeOnly = true
|
||||
defer func() { fs.Config.SizeOnly = false }()
|
||||
TestCheck(t)
|
||||
}
|
||||
|
||||
func TestCat(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
@@ -1084,6 +1179,68 @@ func TestOverlapping(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckEqualReaders(t *testing.T) {
|
||||
b65a := make([]byte, 65*1024)
|
||||
b65b := make([]byte, 65*1024)
|
||||
b65b[len(b65b)-1] = 1
|
||||
b66 := make([]byte, 66*1024)
|
||||
|
||||
differ, err := operations.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b65a))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, differ, false)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b65b))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b66))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b66), bytes.NewBuffer(b65a))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
myErr := errors.New("sentinel")
|
||||
wrap := func(b []byte) io.Reader {
|
||||
r := bytes.NewBuffer(b)
|
||||
e := readers.ErrorReader{Err: myErr}
|
||||
return io.MultiReader(r, e)
|
||||
}
|
||||
|
||||
differ, err = operations.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b65a))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b65b))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b66))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(wrap(b66), bytes.NewBuffer(b65a))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b65a))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b65b))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b66))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
|
||||
differ, err = operations.CheckEqualReaders(bytes.NewBuffer(b66), wrap(b65a))
|
||||
assert.Equal(t, myErr, err)
|
||||
assert.Equal(t, differ, true)
|
||||
}
|
||||
|
||||
func TestListFormat(t *testing.T) {
|
||||
item0 := &operations.ListJSONItem{
|
||||
Path: "a",
|
||||
|
||||
@@ -658,7 +658,6 @@ func (s *syncCopyMove) pushRenameMap(hash string, obj fs.Object) {
|
||||
// renameMap or returns nil if not found.
|
||||
func (s *syncCopyMove) popRenameMap(hash string, src fs.Object) (dst fs.Object) {
|
||||
s.renameMapMu.Lock()
|
||||
defer s.renameMapMu.Unlock()
|
||||
dsts, ok := s.renameMap[hash]
|
||||
if ok && len(dsts) > 0 {
|
||||
// Element to remove
|
||||
@@ -691,6 +690,7 @@ func (s *syncCopyMove) popRenameMap(hash string, src fs.Object) (dst fs.Object)
|
||||
delete(s.renameMap, hash)
|
||||
}
|
||||
}
|
||||
s.renameMapMu.Unlock()
|
||||
return dst
|
||||
}
|
||||
|
||||
|
||||
19
go.mod
19
go.mod
@@ -5,6 +5,7 @@ go 1.14
|
||||
require (
|
||||
bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512
|
||||
cloud.google.com/go v0.59.0 // indirect
|
||||
code.gitea.io/log v0.0.0-20191208183219-f31613838113 // indirect
|
||||
github.com/Azure/azure-pipeline-go v0.2.2
|
||||
github.com/Azure/azure-storage-blob-go v0.10.0
|
||||
github.com/Unknwon/goconfig v0.0.0-20191126170842-860a72fb44fd
|
||||
@@ -18,18 +19,23 @@ require (
|
||||
github.com/btcsuite/btcutil v1.0.2 // indirect
|
||||
github.com/calebcase/tmpfile v1.0.2 // indirect
|
||||
github.com/coreos/go-semver v0.3.0
|
||||
github.com/djherbis/times v1.2.0 // indirect
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.6.0+incompatible
|
||||
github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813 // indirect
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.1 // indirect
|
||||
github.com/google/go-querystring v1.0.0 // indirect
|
||||
github.com/hanwen/go-fuse/v2 v2.0.3
|
||||
github.com/jlaffaye/ftp v0.0.0-20200720194710-13949d38913e
|
||||
github.com/jlaffaye/ftp v0.0.0-20200602180915-5563613968bf
|
||||
github.com/jzelinskie/whirlpool v0.0.0-20170603002051-c19460b8caa6
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
||||
github.com/klauspost/cpuid v1.3.0 // indirect
|
||||
github.com/koofr/go-httpclient v0.0.0-20200420163713-93aa7c75b348
|
||||
github.com/koofr/go-koofrclient v0.0.0-20190724113126-8e5366da203a
|
||||
github.com/mattn/go-colorable v0.1.7
|
||||
github.com/mattn/go-ieproxy v0.0.1 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9
|
||||
github.com/minio/minio-go/v6 v6.0.57 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2
|
||||
github.com/ncw/swift v1.0.52
|
||||
@@ -46,6 +52,7 @@ require (
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966
|
||||
github.com/spf13/cobra v1.0.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stephens2424/writerset v1.0.2 // indirect
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20200416171014-ffad7fcb44b8
|
||||
github.com/xanzy/ssh-agent v0.2.1
|
||||
@@ -54,18 +61,20 @@ require (
|
||||
go.etcd.io/bbolt v1.3.5
|
||||
go.opencensus.io v0.22.4 // indirect
|
||||
go.uber.org/zap v1.15.0 // indirect
|
||||
goftp.io/server v0.4.0
|
||||
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381
|
||||
goftp.io/server v0.3.5-0.20200630051340-d7b447417587
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
|
||||
golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae
|
||||
golang.org/x/text v0.3.3
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1
|
||||
google.golang.org/api v0.28.0
|
||||
google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5 // indirect
|
||||
google.golang.org/grpc v1.30.0 // indirect
|
||||
google.golang.org/protobuf v1.25.0 // indirect
|
||||
gopkg.in/ini.v1 v1.57.0 // indirect
|
||||
gopkg.in/src-d/go-git.v4 v4.13.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.3.0
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect
|
||||
storj.io/common v0.0.0-20200624215549-bf610d22d466 // indirect
|
||||
|
||||
70
go.sum
70
go.sum
@@ -1,5 +1,9 @@
|
||||
bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc h1:utDghgcjE8u+EBjHOgYT+dJPcnDF05KqWMBcjuJy510=
|
||||
bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM=
|
||||
bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512 h1:SRsZGA7aFnCZETmov57jwPrWuTmaZK6+4R4v5FUe1/c=
|
||||
bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM=
|
||||
bazil.org/fuse v0.0.0-20200524192727-fb710f7dfd05 h1:UrYe9YkT4Wpm6D+zByEyCJQzDqTPXqTDUI7bZ41i9VE=
|
||||
bazil.org/fuse v0.0.0-20200524192727-fb710f7dfd05/go.mod h1:h0h5FBYpXThbvSfTqthw+0I4nmHnhTHkO5BoOHsBWqg=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
@@ -32,6 +36,7 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
code.gitea.io/log v0.0.0-20191208183219-f31613838113/go.mod h1:YOBHMQw/14CwuwNStgQyvnzoDJEO6ARjcSdD48QRzhM=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY=
|
||||
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
|
||||
@@ -56,6 +61,7 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
|
||||
github.com/Unknwon/goconfig v0.0.0-20191126170842-860a72fb44fd h1:+CYOsXi89xOqBkj7CuEJjA2It+j+R3ngUZEydr6mtkw=
|
||||
@@ -67,6 +73,7 @@ github.com/aalpar/deheap v0.0.0-20200318053559-9a0c2883bd56/go.mod h1:EJFoWbcEEV
|
||||
github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0=
|
||||
github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
@@ -78,7 +85,9 @@ github.com/anacrolix/envpprof v1.0.0/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54g
|
||||
github.com/anacrolix/ffprobe v1.0.0/go.mod h1:BIw+Bjol6CWjm/CRWrVLk2Vy+UYlkgmBZ05vpSYqZPw=
|
||||
github.com/anacrolix/missinggo v1.1.0/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo=
|
||||
github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/atotto/clipboard v0.1.2 h1:YZCtFu5Ie8qX2VmVTBnrqLSiU9XOWwqNRmdT3gIQzbY=
|
||||
github.com/atotto/clipboard v0.1.2/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
|
||||
github.com/aws/aws-sdk-go v1.32.11 h1:1nYF+Tfccn/hnAZsuwPPMSCVUVnx3j6LKOpx/WhgH0A=
|
||||
@@ -127,6 +136,7 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
@@ -134,18 +144,25 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/djherbis/times v1.2.0 h1:xANXjsC/iBqbO00vkWlYwPWgBgEVU6m6AFYg0Pic+Mc=
|
||||
github.com/djherbis/times v1.2.0/go.mod h1:CGMZlo255K5r4Yw0b9RRfFQpM2y7uOmxg4jm9HsaVf8=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.6.0+incompatible h1:DtumzkLk2zZ2SeElEr+VNz+zV7l+BTe509cV4sKPXbM=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.6.0+incompatible/go.mod h1:lr+LhMM3F6Y3lW1T9j2U5l7QeuWm87N9+PPXo3yH4qY=
|
||||
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
|
||||
github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
||||
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
@@ -236,15 +253,18 @@ github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbc
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jlaffaye/ftp v0.0.0-20190624084859-c1312a7102bf/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
|
||||
github.com/jlaffaye/ftp v0.0.0-20200720194710-13949d38913e h1:itZyHiOkiB8mIGouegRNLM9LttGQ3yrgRmp/J/6H/0g=
|
||||
github.com/jlaffaye/ftp v0.0.0-20200720194710-13949d38913e/go.mod h1:2lmrmq866uF2tnje75wQHzmPXhmSWUt7Gyx2vgK1RCU=
|
||||
github.com/jlaffaye/ftp v0.0.0-20200602180915-5563613968bf h1:U96JHc+AF5zL3M3q/ljvvwuRgjbCAxlPh0KzpDcwlIE=
|
||||
github.com/jlaffaye/ftp v0.0.0-20200602180915-5563613968bf/go.mod h1:PwUeyujmhaGohgOf0kJKxPfk3HcRv8QD/wAUN44go4k=
|
||||
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
|
||||
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
@@ -257,10 +277,15 @@ github.com/jzelinskie/whirlpool v0.0.0-20170603002051-c19460b8caa6 h1:RyOL4+OIUc
|
||||
github.com/jzelinskie/whirlpool v0.0.0-20170603002051-c19460b8caa6/go.mod h1:KmHnJWQrgEvbuy0vcvj00gtMqbvNn1L+3YUZLK/B92c=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/cpuid v1.2.3 h1:CCtW0xUnWGVINKvE/WWOYKdsPV6mawAtvQuSl8guwQs=
|
||||
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid v1.3.0 h1:2JqaNE1hGdABW2YbA3TenkO7RiPFRvSWnEnGqWh9sHE=
|
||||
github.com/klauspost/cpuid v1.3.0/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
@@ -274,6 +299,7 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4=
|
||||
@@ -285,14 +311,19 @@ github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpu
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI=
|
||||
github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
|
||||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4=
|
||||
github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw=
|
||||
github.com/minio/minio-go/v6 v6.0.46 h1:waExJtO53xrnsNX//7cSc1h3478wqTryDx4RVD7o26I=
|
||||
github.com/minio/minio-go/v6 v6.0.46/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg=
|
||||
github.com/minio/minio-go/v6 v6.0.57 h1:ixPkbKkyD7IhnluRgQpGSpHdpvNVaW6OD5R9IAO/9Tw=
|
||||
github.com/minio/minio-go/v6 v6.0.57/go.mod h1:5+R/nM9Pwrh0vqF+HbYYDQ84wdUFPyXHkrdT4AIkifM=
|
||||
github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=
|
||||
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
@@ -326,6 +357,7 @@ github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg=
|
||||
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 h1:XeOYlK9W1uCmhjJSsY78Mcuh7MVkNjTzmHx1yBzizSU=
|
||||
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14/go.mod h1:jVblp62SafmidSkvWrXyxAme3gaTfEtWwRPGz5cpvHg=
|
||||
@@ -363,17 +395,20 @@ github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 h1:Y258uzX
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8/go.mod h1:bSJjRokAHHOhA+XFxplld8w2R/dXLH7Z3BZ532vhFwU=
|
||||
github.com/rfjakob/eme v1.1.1 h1:t+CgvcOn+eDvj2xdglxsSnkgg8LM8jwdxnV7OnsrTn0=
|
||||
github.com/rfjakob/eme v1.1.1/go.mod h1:U2bmx0hDj8EyDdcxmD5t3XHDnBFnyNNc22n1R4008eM=
|
||||
github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/sevlyar/go-daemon v0.1.5 h1:Zy/6jLbM8CfqJ4x4RPr7MJlSKt90f00kNM1D401C+Qk=
|
||||
github.com/sevlyar/go-daemon v0.1.5/go.mod h1:6dJpPatBT9eUwM5VCw9Bt6CdX9Tk6UWvhW3MebLDRKE=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
|
||||
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=
|
||||
@@ -399,8 +434,11 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
|
||||
github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
@@ -460,8 +498,12 @@ go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
|
||||
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||
goftp.io/server v0.4.0 h1:hqsVdwd1/l6QtYxD9pxca9mEAJYZ7+FPCnmeXKXHQNw=
|
||||
goftp.io/server v0.4.0/go.mod h1:hFZeR656ErRt3ojMKt7H10vQ5nuWV1e0YeUTeorlR6k=
|
||||
goftp.io/server v0.3.3 h1:ufplF8S62baWmzmFBkvaVfVGTD6cq8ona8BzNvwRiqc=
|
||||
goftp.io/server v0.3.3/go.mod h1:Ewqfa40Xnkh/AEA+Sf9KX2lp8aKnEum77YDHhHwABK4=
|
||||
goftp.io/server v0.3.4 h1:Sq0leWKoZFRWtbxNrHRqKpBygVg2kcRPqgbXnLIFRVU=
|
||||
goftp.io/server v0.3.4/go.mod h1:hFZeR656ErRt3ojMKt7H10vQ5nuWV1e0YeUTeorlR6k=
|
||||
goftp.io/server v0.3.5-0.20200630051340-d7b447417587 h1:wbWVo+1uNd5kviLopJ2mZbVQgRG91Y/tGEUu67WrLbA=
|
||||
goftp.io/server v0.3.5-0.20200630051340-d7b447417587/go.mod h1:hFZeR656ErRt3ojMKt7H10vQ5nuWV1e0YeUTeorlR6k=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
@@ -480,8 +522,6 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899 h1:DZhuSZLsGlFL4CmhA8BcRA0mnthyA/nZ00AqCUo7vHg=
|
||||
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -544,8 +584,8 @@ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -568,10 +608,12 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190415145633-3fd5a3612ccd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -600,8 +642,8 @@ golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200610111108-226ff32320da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666 h1:gVCS+QOncANNPlmlO1AhlU3oxs4V9z+gTtPwIk3p2N8=
|
||||
golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -628,6 +670,7 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
@@ -651,6 +694,7 @@ golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapK
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200423201157-2723c5de0d66/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
@@ -748,9 +792,15 @@ gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk=
|
||||
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww=
|
||||
gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98=
|
||||
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
|
||||
gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
||||
2
vendor/bazil.org/fuse/.gitattributes
generated
vendored
Normal file
2
vendor/bazil.org/fuse/.gitattributes
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*.go filter=gofmt
|
||||
*.cgo filter=gofmt
|
||||
11
vendor/bazil.org/fuse/.gitignore
generated
vendored
Normal file
11
vendor/bazil.org/fuse/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
*~
|
||||
.#*
|
||||
## the next line needs to start with a backslash to avoid looking like
|
||||
## a comment
|
||||
\#*#
|
||||
.*.swp
|
||||
|
||||
*.test
|
||||
|
||||
/clockfs
|
||||
/hellofs
|
||||
93
vendor/bazil.org/fuse/LICENSE
generated
vendored
Normal file
93
vendor/bazil.org/fuse/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
Copyright (c) 2013-2019 Tommi Virtanen.
|
||||
Copyright (c) 2009, 2011, 2012 The Go Authors.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
|
||||
The following included software components have additional copyright
|
||||
notices and license terms that may differ from the above.
|
||||
|
||||
|
||||
File fuse.go:
|
||||
|
||||
// Adapted from Plan 9 from User Space's src/cmd/9pfuse/fuse.c,
|
||||
// which carries this notice:
|
||||
//
|
||||
// The files in this directory are subject to the following license.
|
||||
//
|
||||
// The author of this software is Russ Cox.
|
||||
//
|
||||
// Copyright (c) 2006 Russ Cox
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose without fee is hereby granted, provided that this entire notice
|
||||
// is included in all copies of any software which is or includes a copy
|
||||
// or modification of this software and in all copies of the supporting
|
||||
// documentation for such software.
|
||||
//
|
||||
// THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
|
||||
// WARRANTY. IN PARTICULAR, THE AUTHOR MAKES NO REPRESENTATION OR WARRANTY
|
||||
// OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS
|
||||
// FITNESS FOR ANY PARTICULAR PURPOSE.
|
||||
|
||||
|
||||
File fuse_kernel.go:
|
||||
|
||||
// Derived from FUSE's fuse_kernel.h
|
||||
/*
|
||||
This file defines the kernel interface of FUSE
|
||||
Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
|
||||
|
||||
|
||||
This -- and only this -- header file may also be distributed under
|
||||
the terms of the BSD Licence as follows:
|
||||
|
||||
Copyright (C) 2001-2007 Miklos Szeredi. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGE.
|
||||
*/
|
||||
23
vendor/bazil.org/fuse/README.md
generated
vendored
Normal file
23
vendor/bazil.org/fuse/README.md
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
bazil.org/fuse -- Filesystems in Go
|
||||
===================================
|
||||
|
||||
`bazil.org/fuse` is a Go library for writing FUSE userspace
|
||||
filesystems.
|
||||
|
||||
It is a from-scratch implementation of the kernel-userspace
|
||||
communication protocol, and does not use the C library from the
|
||||
project called FUSE. `bazil.org/fuse` embraces Go fully for safety and
|
||||
ease of programming.
|
||||
|
||||
Here’s how to get going:
|
||||
|
||||
go get bazil.org/fuse
|
||||
|
||||
Website: http://bazil.org/fuse/
|
||||
|
||||
Github repository: https://github.com/bazil/fuse
|
||||
|
||||
API docs: http://godoc.org/bazil.org/fuse
|
||||
|
||||
Our thanks to Russ Cox for his fuse library, which this project is
|
||||
based on.
|
||||
35
vendor/bazil.org/fuse/buffer.go
generated
vendored
Normal file
35
vendor/bazil.org/fuse/buffer.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
package fuse
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// buffer provides a mechanism for constructing a message from
|
||||
// multiple segments.
|
||||
type buffer []byte
|
||||
|
||||
// alloc allocates size bytes and returns a pointer to the new
|
||||
// segment.
|
||||
func (w *buffer) alloc(size uintptr) unsafe.Pointer {
|
||||
s := int(size)
|
||||
if len(*w)+s > cap(*w) {
|
||||
old := *w
|
||||
*w = make([]byte, len(*w), 2*cap(*w)+s)
|
||||
copy(*w, old)
|
||||
}
|
||||
l := len(*w)
|
||||
*w = (*w)[:l+s]
|
||||
return unsafe.Pointer(&(*w)[l])
|
||||
}
|
||||
|
||||
// reset clears out the contents of the buffer.
|
||||
func (w *buffer) reset() {
|
||||
for i := range (*w)[:cap(*w)] {
|
||||
(*w)[i] = 0
|
||||
}
|
||||
*w = (*w)[:0]
|
||||
}
|
||||
|
||||
func newBuffer(extra uintptr) buffer {
|
||||
const hdrSize = unsafe.Sizeof(outHeader{})
|
||||
buf := make(buffer, hdrSize, hdrSize+extra)
|
||||
return buf
|
||||
}
|
||||
21
vendor/bazil.org/fuse/debug.go
generated
vendored
Normal file
21
vendor/bazil.org/fuse/debug.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func stack() string {
|
||||
buf := make([]byte, 1024)
|
||||
return string(buf[:runtime.Stack(buf, false)])
|
||||
}
|
||||
|
||||
func nop(msg interface{}) {}
|
||||
|
||||
// Debug is called to output debug messages, including protocol
|
||||
// traces. The default behavior is to do nothing.
|
||||
//
|
||||
// The messages have human-friendly string representations and are
|
||||
// safe to marshal to JSON.
|
||||
//
|
||||
// Implementations must not retain msg.
|
||||
var Debug func(msg interface{}) = nop
|
||||
17
vendor/bazil.org/fuse/error_darwin.go
generated
vendored
Normal file
17
vendor/bazil.org/fuse/error_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
ENOATTR = Errno(syscall.ENOATTR)
|
||||
)
|
||||
|
||||
const (
|
||||
errNoXattr = ENOATTR
|
||||
)
|
||||
|
||||
func init() {
|
||||
errnoNames[errNoXattr] = "ENOATTR"
|
||||
}
|
||||
15
vendor/bazil.org/fuse/error_freebsd.go
generated
vendored
Normal file
15
vendor/bazil.org/fuse/error_freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
package fuse
|
||||
|
||||
import "syscall"
|
||||
|
||||
const (
|
||||
ENOATTR = Errno(syscall.ENOATTR)
|
||||
)
|
||||
|
||||
const (
|
||||
errNoXattr = ENOATTR
|
||||
)
|
||||
|
||||
func init() {
|
||||
errnoNames[errNoXattr] = "ENOATTR"
|
||||
}
|
||||
17
vendor/bazil.org/fuse/error_linux.go
generated
vendored
Normal file
17
vendor/bazil.org/fuse/error_linux.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
ENODATA = Errno(syscall.ENODATA)
|
||||
)
|
||||
|
||||
const (
|
||||
errNoXattr = ENODATA
|
||||
)
|
||||
|
||||
func init() {
|
||||
errnoNames[errNoXattr] = "ENODATA"
|
||||
}
|
||||
31
vendor/bazil.org/fuse/error_std.go
generated
vendored
Normal file
31
vendor/bazil.org/fuse/error_std.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
package fuse
|
||||
|
||||
// There is very little commonality in extended attribute errors
|
||||
// across platforms.
|
||||
//
|
||||
// getxattr return value for "extended attribute does not exist" is
|
||||
// ENOATTR on OS X, and ENODATA on Linux and apparently at least
|
||||
// NetBSD. There may be a #define ENOATTR on Linux too, but the value
|
||||
// is ENODATA in the actual syscalls. FreeBSD and OpenBSD have no
|
||||
// ENODATA, only ENOATTR. ENOATTR is not in any of the standards,
|
||||
// ENODATA exists but is only used for STREAMs.
|
||||
//
|
||||
// Each platform will define it a errNoXattr constant, and this file
|
||||
// will enforce that it implements the right interfaces and hide the
|
||||
// implementation.
|
||||
//
|
||||
// https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/getxattr.2.html
|
||||
// http://mail-index.netbsd.org/tech-kern/2012/04/30/msg013090.html
|
||||
// http://mail-index.netbsd.org/tech-kern/2012/04/30/msg013097.html
|
||||
// http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html
|
||||
// http://www.freebsd.org/cgi/man.cgi?query=extattr_get_file&sektion=2
|
||||
// http://nixdoc.net/man-pages/openbsd/man2/extattr_get_file.2.html
|
||||
|
||||
// ErrNoXattr is a platform-independent error value meaning the
|
||||
// extended attribute was not found. It can be used to respond to
|
||||
// GetxattrRequest and such.
|
||||
const ErrNoXattr = errNoXattr
|
||||
|
||||
var _ error = ErrNoXattr
|
||||
var _ Errno = ErrNoXattr
|
||||
var _ ErrorNumber = ErrNoXattr
|
||||
1562
vendor/bazil.org/fuse/fs/serve.go
generated
vendored
Normal file
1562
vendor/bazil.org/fuse/fs/serve.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
97
vendor/bazil.org/fuse/fs/tree.go
generated
vendored
Normal file
97
vendor/bazil.org/fuse/fs/tree.go
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
// FUSE directory tree, for servers that wish to use it with the service loop.
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
pathpkg "path"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"bazil.org/fuse"
|
||||
)
|
||||
|
||||
// A Tree implements a basic read-only directory tree for FUSE.
|
||||
// The Nodes contained in it may still be writable.
|
||||
type Tree struct {
|
||||
tree
|
||||
}
|
||||
|
||||
func (t *Tree) Root() (Node, error) {
|
||||
return &t.tree, nil
|
||||
}
|
||||
|
||||
// Add adds the path to the tree, resolving to the given node.
|
||||
// If path or a prefix of path has already been added to the tree,
|
||||
// Add panics.
|
||||
//
|
||||
// Add is only safe to call before starting to serve requests.
|
||||
func (t *Tree) Add(path string, node Node) {
|
||||
path = pathpkg.Clean("/" + path)[1:]
|
||||
elems := strings.Split(path, "/")
|
||||
dir := Node(&t.tree)
|
||||
for i, elem := range elems {
|
||||
dt, ok := dir.(*tree)
|
||||
if !ok {
|
||||
panic("fuse: Tree.Add for " + strings.Join(elems[:i], "/") + " and " + path)
|
||||
}
|
||||
n := dt.lookup(elem)
|
||||
if n != nil {
|
||||
if i+1 == len(elems) {
|
||||
panic("fuse: Tree.Add for " + path + " conflicts with " + elem)
|
||||
}
|
||||
dir = n
|
||||
} else {
|
||||
if i+1 == len(elems) {
|
||||
dt.add(elem, node)
|
||||
} else {
|
||||
dir = &tree{}
|
||||
dt.add(elem, dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type treeDir struct {
|
||||
name string
|
||||
node Node
|
||||
}
|
||||
|
||||
type tree struct {
|
||||
dir []treeDir
|
||||
}
|
||||
|
||||
func (t *tree) lookup(name string) Node {
|
||||
for _, d := range t.dir {
|
||||
if d.name == name {
|
||||
return d.node
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tree) add(name string, n Node) {
|
||||
t.dir = append(t.dir, treeDir{name, n})
|
||||
}
|
||||
|
||||
func (t *tree) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Mode = os.ModeDir | 0555
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tree) Lookup(ctx context.Context, name string) (Node, error) {
|
||||
n := t.lookup(name)
|
||||
if n != nil {
|
||||
return n, nil
|
||||
}
|
||||
return nil, syscall.ENOENT
|
||||
}
|
||||
|
||||
func (t *tree) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
var out []fuse.Dirent
|
||||
for _, d := range t.dir {
|
||||
out = append(out, fuse.Dirent{Name: d.name})
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
2338
vendor/bazil.org/fuse/fuse.go
generated
vendored
Normal file
2338
vendor/bazil.org/fuse/fuse.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
9
vendor/bazil.org/fuse/fuse_darwin.go
generated
vendored
Normal file
9
vendor/bazil.org/fuse/fuse_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
package fuse
|
||||
|
||||
// Maximum file write size we are prepared to receive from the kernel.
|
||||
//
|
||||
// This value has to be >=16MB or OSXFUSE (3.4.0 observed) will
|
||||
// forcibly close the /dev/fuse file descriptor on a Setxattr with a
|
||||
// 16MB value. See TestSetxattr16MB and
|
||||
// https://github.com/bazil/fuse/issues/42
|
||||
const maxWrite = 16 * 1024 * 1024
|
||||
6
vendor/bazil.org/fuse/fuse_freebsd.go
generated
vendored
Normal file
6
vendor/bazil.org/fuse/fuse_freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
package fuse
|
||||
|
||||
// Maximum file write size we are prepared to receive from the kernel.
|
||||
//
|
||||
// This number is just a guess.
|
||||
const maxWrite = 128 * 1024
|
||||
772
vendor/bazil.org/fuse/fuse_kernel.go
generated
vendored
Normal file
772
vendor/bazil.org/fuse/fuse_kernel.go
generated
vendored
Normal file
@@ -0,0 +1,772 @@
|
||||
// See the file LICENSE for copyright and licensing information.
|
||||
|
||||
// Derived from FUSE's fuse_kernel.h, which carries this notice:
|
||||
/*
|
||||
This file defines the kernel interface of FUSE
|
||||
Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
|
||||
|
||||
|
||||
This -- and only this -- header file may also be distributed under
|
||||
the terms of the BSD Licence as follows:
|
||||
|
||||
Copyright (C) 2001-2007 Miklos Szeredi. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// The FUSE version implemented by the package.
|
||||
const (
|
||||
protoVersionMinMajor = 7
|
||||
protoVersionMinMinor = 8
|
||||
protoVersionMaxMajor = 7
|
||||
protoVersionMaxMinor = 12
|
||||
)
|
||||
|
||||
const (
|
||||
rootID = 1
|
||||
)
|
||||
|
||||
type kstatfs struct {
|
||||
Blocks uint64
|
||||
Bfree uint64
|
||||
Bavail uint64
|
||||
Files uint64
|
||||
Ffree uint64
|
||||
Bsize uint32
|
||||
Namelen uint32
|
||||
Frsize uint32
|
||||
_ uint32
|
||||
Spare [6]uint32
|
||||
}
|
||||
|
||||
type fileLock struct {
|
||||
Start uint64
|
||||
End uint64
|
||||
Type uint32
|
||||
Pid uint32
|
||||
}
|
||||
|
||||
// GetattrFlags are bit flags that can be seen in GetattrRequest.
|
||||
type GetattrFlags uint32
|
||||
|
||||
const (
|
||||
// Indicates the handle is valid.
|
||||
GetattrFh GetattrFlags = 1 << 0
|
||||
)
|
||||
|
||||
var getattrFlagsNames = []flagName{
|
||||
{uint32(GetattrFh), "GetattrFh"},
|
||||
}
|
||||
|
||||
func (fl GetattrFlags) String() string {
|
||||
return flagString(uint32(fl), getattrFlagsNames)
|
||||
}
|
||||
|
||||
// The SetattrValid are bit flags describing which fields in the SetattrRequest
|
||||
// are included in the change.
|
||||
type SetattrValid uint32
|
||||
|
||||
const (
|
||||
SetattrMode SetattrValid = 1 << 0
|
||||
SetattrUid SetattrValid = 1 << 1
|
||||
SetattrGid SetattrValid = 1 << 2
|
||||
SetattrSize SetattrValid = 1 << 3
|
||||
SetattrAtime SetattrValid = 1 << 4
|
||||
SetattrMtime SetattrValid = 1 << 5
|
||||
SetattrHandle SetattrValid = 1 << 6
|
||||
|
||||
// Linux only(?)
|
||||
SetattrAtimeNow SetattrValid = 1 << 7
|
||||
SetattrMtimeNow SetattrValid = 1 << 8
|
||||
SetattrLockOwner SetattrValid = 1 << 9 // http://www.mail-archive.com/git-commits-head@vger.kernel.org/msg27852.html
|
||||
|
||||
// OS X only
|
||||
SetattrCrtime SetattrValid = 1 << 28
|
||||
SetattrChgtime SetattrValid = 1 << 29
|
||||
SetattrBkuptime SetattrValid = 1 << 30
|
||||
SetattrFlags SetattrValid = 1 << 31
|
||||
)
|
||||
|
||||
func (fl SetattrValid) Mode() bool { return fl&SetattrMode != 0 }
|
||||
func (fl SetattrValid) Uid() bool { return fl&SetattrUid != 0 }
|
||||
func (fl SetattrValid) Gid() bool { return fl&SetattrGid != 0 }
|
||||
func (fl SetattrValid) Size() bool { return fl&SetattrSize != 0 }
|
||||
func (fl SetattrValid) Atime() bool { return fl&SetattrAtime != 0 }
|
||||
func (fl SetattrValid) Mtime() bool { return fl&SetattrMtime != 0 }
|
||||
func (fl SetattrValid) Handle() bool { return fl&SetattrHandle != 0 }
|
||||
func (fl SetattrValid) AtimeNow() bool { return fl&SetattrAtimeNow != 0 }
|
||||
func (fl SetattrValid) MtimeNow() bool { return fl&SetattrMtimeNow != 0 }
|
||||
func (fl SetattrValid) LockOwner() bool { return fl&SetattrLockOwner != 0 }
|
||||
func (fl SetattrValid) Crtime() bool { return fl&SetattrCrtime != 0 }
|
||||
func (fl SetattrValid) Chgtime() bool { return fl&SetattrChgtime != 0 }
|
||||
func (fl SetattrValid) Bkuptime() bool { return fl&SetattrBkuptime != 0 }
|
||||
func (fl SetattrValid) Flags() bool { return fl&SetattrFlags != 0 }
|
||||
|
||||
func (fl SetattrValid) String() string {
|
||||
return flagString(uint32(fl), setattrValidNames)
|
||||
}
|
||||
|
||||
var setattrValidNames = []flagName{
|
||||
{uint32(SetattrMode), "SetattrMode"},
|
||||
{uint32(SetattrUid), "SetattrUid"},
|
||||
{uint32(SetattrGid), "SetattrGid"},
|
||||
{uint32(SetattrSize), "SetattrSize"},
|
||||
{uint32(SetattrAtime), "SetattrAtime"},
|
||||
{uint32(SetattrMtime), "SetattrMtime"},
|
||||
{uint32(SetattrHandle), "SetattrHandle"},
|
||||
{uint32(SetattrAtimeNow), "SetattrAtimeNow"},
|
||||
{uint32(SetattrMtimeNow), "SetattrMtimeNow"},
|
||||
{uint32(SetattrLockOwner), "SetattrLockOwner"},
|
||||
{uint32(SetattrCrtime), "SetattrCrtime"},
|
||||
{uint32(SetattrChgtime), "SetattrChgtime"},
|
||||
{uint32(SetattrBkuptime), "SetattrBkuptime"},
|
||||
{uint32(SetattrFlags), "SetattrFlags"},
|
||||
}
|
||||
|
||||
// Flags that can be seen in OpenRequest.Flags.
|
||||
const (
|
||||
// Access modes. These are not 1-bit flags, but alternatives where
|
||||
// only one can be chosen. See the IsReadOnly etc convenience
|
||||
// methods.
|
||||
OpenReadOnly OpenFlags = syscall.O_RDONLY
|
||||
OpenWriteOnly OpenFlags = syscall.O_WRONLY
|
||||
OpenReadWrite OpenFlags = syscall.O_RDWR
|
||||
|
||||
// File was opened in append-only mode, all writes will go to end
|
||||
// of file. OS X does not provide this information.
|
||||
OpenAppend OpenFlags = syscall.O_APPEND
|
||||
OpenCreate OpenFlags = syscall.O_CREAT
|
||||
OpenDirectory OpenFlags = syscall.O_DIRECTORY
|
||||
OpenExclusive OpenFlags = syscall.O_EXCL
|
||||
OpenNonblock OpenFlags = syscall.O_NONBLOCK
|
||||
OpenSync OpenFlags = syscall.O_SYNC
|
||||
OpenTruncate OpenFlags = syscall.O_TRUNC
|
||||
)
|
||||
|
||||
// OpenAccessModeMask is a bitmask that separates the access mode
|
||||
// from the other flags in OpenFlags.
|
||||
const OpenAccessModeMask OpenFlags = syscall.O_ACCMODE
|
||||
|
||||
// OpenFlags are the O_FOO flags passed to open/create/etc calls. For
|
||||
// example, os.O_WRONLY | os.O_APPEND.
|
||||
type OpenFlags uint32
|
||||
|
||||
func (fl OpenFlags) String() string {
|
||||
// O_RDONLY, O_RWONLY, O_RDWR are not flags
|
||||
s := accModeName(fl & OpenAccessModeMask)
|
||||
flags := uint32(fl &^ OpenAccessModeMask)
|
||||
if flags != 0 {
|
||||
s = s + "+" + flagString(flags, openFlagNames)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Return true if OpenReadOnly is set.
|
||||
func (fl OpenFlags) IsReadOnly() bool {
|
||||
return fl&OpenAccessModeMask == OpenReadOnly
|
||||
}
|
||||
|
||||
// Return true if OpenWriteOnly is set.
|
||||
func (fl OpenFlags) IsWriteOnly() bool {
|
||||
return fl&OpenAccessModeMask == OpenWriteOnly
|
||||
}
|
||||
|
||||
// Return true if OpenReadWrite is set.
|
||||
func (fl OpenFlags) IsReadWrite() bool {
|
||||
return fl&OpenAccessModeMask == OpenReadWrite
|
||||
}
|
||||
|
||||
func accModeName(flags OpenFlags) string {
|
||||
switch flags {
|
||||
case OpenReadOnly:
|
||||
return "OpenReadOnly"
|
||||
case OpenWriteOnly:
|
||||
return "OpenWriteOnly"
|
||||
case OpenReadWrite:
|
||||
return "OpenReadWrite"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
var openFlagNames = []flagName{
|
||||
{uint32(OpenAppend), "OpenAppend"},
|
||||
{uint32(OpenCreate), "OpenCreate"},
|
||||
{uint32(OpenDirectory), "OpenDirectory"},
|
||||
{uint32(OpenExclusive), "OpenExclusive"},
|
||||
{uint32(OpenNonblock), "OpenNonblock"},
|
||||
{uint32(OpenSync), "OpenSync"},
|
||||
{uint32(OpenTruncate), "OpenTruncate"},
|
||||
}
|
||||
|
||||
// The OpenResponseFlags are returned in the OpenResponse.
|
||||
type OpenResponseFlags uint32
|
||||
|
||||
const (
|
||||
OpenDirectIO OpenResponseFlags = 1 << 0 // bypass page cache for this open file
|
||||
OpenKeepCache OpenResponseFlags = 1 << 1 // don't invalidate the data cache on open
|
||||
OpenNonSeekable OpenResponseFlags = 1 << 2 // mark the file as non-seekable (not supported on OS X or FreeBSD)
|
||||
|
||||
OpenPurgeAttr OpenResponseFlags = 1 << 30 // OS X
|
||||
OpenPurgeUBC OpenResponseFlags = 1 << 31 // OS X
|
||||
)
|
||||
|
||||
func (fl OpenResponseFlags) String() string {
|
||||
return flagString(uint32(fl), openResponseFlagNames)
|
||||
}
|
||||
|
||||
var openResponseFlagNames = []flagName{
|
||||
{uint32(OpenDirectIO), "OpenDirectIO"},
|
||||
{uint32(OpenKeepCache), "OpenKeepCache"},
|
||||
{uint32(OpenNonSeekable), "OpenNonSeekable"},
|
||||
{uint32(OpenPurgeAttr), "OpenPurgeAttr"},
|
||||
{uint32(OpenPurgeUBC), "OpenPurgeUBC"},
|
||||
}
|
||||
|
||||
// The InitFlags are used in the Init exchange.
|
||||
type InitFlags uint32
|
||||
|
||||
const (
|
||||
InitAsyncRead InitFlags = 1 << 0
|
||||
InitPosixLocks InitFlags = 1 << 1
|
||||
InitFileOps InitFlags = 1 << 2
|
||||
InitAtomicTrunc InitFlags = 1 << 3
|
||||
InitExportSupport InitFlags = 1 << 4
|
||||
InitBigWrites InitFlags = 1 << 5
|
||||
// Do not mask file access modes with umask. Not supported on OS X.
|
||||
InitDontMask InitFlags = 1 << 6
|
||||
InitSpliceWrite InitFlags = 1 << 7
|
||||
InitSpliceMove InitFlags = 1 << 8
|
||||
InitSpliceRead InitFlags = 1 << 9
|
||||
InitFlockLocks InitFlags = 1 << 10
|
||||
InitHasIoctlDir InitFlags = 1 << 11
|
||||
InitAutoInvalData InitFlags = 1 << 12
|
||||
InitDoReaddirplus InitFlags = 1 << 13
|
||||
InitReaddirplusAuto InitFlags = 1 << 14
|
||||
InitAsyncDIO InitFlags = 1 << 15
|
||||
InitWritebackCache InitFlags = 1 << 16
|
||||
InitNoOpenSupport InitFlags = 1 << 17
|
||||
|
||||
InitCaseSensitive InitFlags = 1 << 29 // OS X only
|
||||
InitVolRename InitFlags = 1 << 30 // OS X only
|
||||
InitXtimes InitFlags = 1 << 31 // OS X only
|
||||
)
|
||||
|
||||
type flagName struct {
|
||||
bit uint32
|
||||
name string
|
||||
}
|
||||
|
||||
var initFlagNames = []flagName{
|
||||
{uint32(InitAsyncRead), "InitAsyncRead"},
|
||||
{uint32(InitPosixLocks), "InitPosixLocks"},
|
||||
{uint32(InitFileOps), "InitFileOps"},
|
||||
{uint32(InitAtomicTrunc), "InitAtomicTrunc"},
|
||||
{uint32(InitExportSupport), "InitExportSupport"},
|
||||
{uint32(InitBigWrites), "InitBigWrites"},
|
||||
{uint32(InitDontMask), "InitDontMask"},
|
||||
{uint32(InitSpliceWrite), "InitSpliceWrite"},
|
||||
{uint32(InitSpliceMove), "InitSpliceMove"},
|
||||
{uint32(InitSpliceRead), "InitSpliceRead"},
|
||||
{uint32(InitFlockLocks), "InitFlockLocks"},
|
||||
{uint32(InitHasIoctlDir), "InitHasIoctlDir"},
|
||||
{uint32(InitAutoInvalData), "InitAutoInvalData"},
|
||||
{uint32(InitDoReaddirplus), "InitDoReaddirplus"},
|
||||
{uint32(InitReaddirplusAuto), "InitReaddirplusAuto"},
|
||||
{uint32(InitAsyncDIO), "InitAsyncDIO"},
|
||||
{uint32(InitWritebackCache), "InitWritebackCache"},
|
||||
{uint32(InitNoOpenSupport), "InitNoOpenSupport"},
|
||||
|
||||
{uint32(InitCaseSensitive), "InitCaseSensitive"},
|
||||
{uint32(InitVolRename), "InitVolRename"},
|
||||
{uint32(InitXtimes), "InitXtimes"},
|
||||
}
|
||||
|
||||
func (fl InitFlags) String() string {
|
||||
return flagString(uint32(fl), initFlagNames)
|
||||
}
|
||||
|
||||
func flagString(f uint32, names []flagName) string {
|
||||
var s string
|
||||
|
||||
if f == 0 {
|
||||
return "0"
|
||||
}
|
||||
|
||||
for _, n := range names {
|
||||
if f&n.bit != 0 {
|
||||
s += "+" + n.name
|
||||
f &^= n.bit
|
||||
}
|
||||
}
|
||||
if f != 0 {
|
||||
s += fmt.Sprintf("%+#x", f)
|
||||
}
|
||||
return s[1:]
|
||||
}
|
||||
|
||||
// The ReleaseFlags are used in the Release exchange.
|
||||
type ReleaseFlags uint32
|
||||
|
||||
const (
|
||||
ReleaseFlush ReleaseFlags = 1 << 0
|
||||
)
|
||||
|
||||
func (fl ReleaseFlags) String() string {
|
||||
return flagString(uint32(fl), releaseFlagNames)
|
||||
}
|
||||
|
||||
var releaseFlagNames = []flagName{
|
||||
{uint32(ReleaseFlush), "ReleaseFlush"},
|
||||
}
|
||||
|
||||
// Opcodes
|
||||
const (
|
||||
opLookup = 1
|
||||
opForget = 2 // no reply
|
||||
opGetattr = 3
|
||||
opSetattr = 4
|
||||
opReadlink = 5
|
||||
opSymlink = 6
|
||||
opMknod = 8
|
||||
opMkdir = 9
|
||||
opUnlink = 10
|
||||
opRmdir = 11
|
||||
opRename = 12
|
||||
opLink = 13
|
||||
opOpen = 14
|
||||
opRead = 15
|
||||
opWrite = 16
|
||||
opStatfs = 17
|
||||
opRelease = 18
|
||||
opFsync = 20
|
||||
opSetxattr = 21
|
||||
opGetxattr = 22
|
||||
opListxattr = 23
|
||||
opRemovexattr = 24
|
||||
opFlush = 25
|
||||
opInit = 26
|
||||
opOpendir = 27
|
||||
opReaddir = 28
|
||||
opReleasedir = 29
|
||||
opFsyncdir = 30
|
||||
opGetlk = 31
|
||||
opSetlk = 32
|
||||
opSetlkw = 33
|
||||
opAccess = 34
|
||||
opCreate = 35
|
||||
opInterrupt = 36
|
||||
opBmap = 37
|
||||
opDestroy = 38
|
||||
opIoctl = 39 // Linux?
|
||||
opPoll = 40 // Linux?
|
||||
|
||||
// OS X
|
||||
opSetvolname = 61
|
||||
opGetxtimes = 62
|
||||
opExchange = 63
|
||||
)
|
||||
|
||||
type entryOut struct {
|
||||
Nodeid uint64 // Inode ID
|
||||
Generation uint64 // Inode generation
|
||||
EntryValid uint64 // Cache timeout for the name
|
||||
AttrValid uint64 // Cache timeout for the attributes
|
||||
EntryValidNsec uint32
|
||||
AttrValidNsec uint32
|
||||
Attr attr
|
||||
}
|
||||
|
||||
func entryOutSize(p Protocol) uintptr {
|
||||
switch {
|
||||
case p.LT(Protocol{7, 9}):
|
||||
return unsafe.Offsetof(entryOut{}.Attr) + unsafe.Offsetof(entryOut{}.Attr.Blksize)
|
||||
default:
|
||||
return unsafe.Sizeof(entryOut{})
|
||||
}
|
||||
}
|
||||
|
||||
type forgetIn struct {
|
||||
Nlookup uint64
|
||||
}
|
||||
|
||||
type getattrIn struct {
|
||||
GetattrFlags uint32
|
||||
_ uint32
|
||||
Fh uint64
|
||||
}
|
||||
|
||||
type attrOut struct {
|
||||
AttrValid uint64 // Cache timeout for the attributes
|
||||
AttrValidNsec uint32
|
||||
_ uint32
|
||||
Attr attr
|
||||
}
|
||||
|
||||
func attrOutSize(p Protocol) uintptr {
|
||||
switch {
|
||||
case p.LT(Protocol{7, 9}):
|
||||
return unsafe.Offsetof(attrOut{}.Attr) + unsafe.Offsetof(attrOut{}.Attr.Blksize)
|
||||
default:
|
||||
return unsafe.Sizeof(attrOut{})
|
||||
}
|
||||
}
|
||||
|
||||
// OS X
|
||||
type getxtimesOut struct {
|
||||
Bkuptime uint64
|
||||
Crtime uint64
|
||||
BkuptimeNsec uint32
|
||||
CrtimeNsec uint32
|
||||
}
|
||||
|
||||
type mknodIn struct {
|
||||
Mode uint32
|
||||
Rdev uint32
|
||||
Umask uint32
|
||||
_ uint32
|
||||
// "filename\x00" follows.
|
||||
}
|
||||
|
||||
func mknodInSize(p Protocol) uintptr {
|
||||
switch {
|
||||
case p.LT(Protocol{7, 12}):
|
||||
return unsafe.Offsetof(mknodIn{}.Umask)
|
||||
default:
|
||||
return unsafe.Sizeof(mknodIn{})
|
||||
}
|
||||
}
|
||||
|
||||
type mkdirIn struct {
|
||||
Mode uint32
|
||||
Umask uint32
|
||||
// filename follows
|
||||
}
|
||||
|
||||
func mkdirInSize(p Protocol) uintptr {
|
||||
switch {
|
||||
case p.LT(Protocol{7, 12}):
|
||||
return unsafe.Offsetof(mkdirIn{}.Umask) + 4
|
||||
default:
|
||||
return unsafe.Sizeof(mkdirIn{})
|
||||
}
|
||||
}
|
||||
|
||||
type renameIn struct {
|
||||
Newdir uint64
|
||||
// "oldname\x00newname\x00" follows
|
||||
}
|
||||
|
||||
// OS X
|
||||
type exchangeIn struct {
|
||||
Olddir uint64
|
||||
Newdir uint64
|
||||
Options uint64
|
||||
// "oldname\x00newname\x00" follows
|
||||
}
|
||||
|
||||
type linkIn struct {
|
||||
Oldnodeid uint64
|
||||
}
|
||||
|
||||
type setattrInCommon struct {
|
||||
Valid uint32
|
||||
_ uint32
|
||||
Fh uint64
|
||||
Size uint64
|
||||
LockOwner uint64 // unused on OS X?
|
||||
Atime uint64
|
||||
Mtime uint64
|
||||
Unused2 uint64
|
||||
AtimeNsec uint32
|
||||
MtimeNsec uint32
|
||||
Unused3 uint32
|
||||
Mode uint32
|
||||
Unused4 uint32
|
||||
Uid uint32
|
||||
Gid uint32
|
||||
Unused5 uint32
|
||||
}
|
||||
|
||||
type openIn struct {
|
||||
Flags uint32
|
||||
Unused uint32
|
||||
}
|
||||
|
||||
type openOut struct {
|
||||
Fh uint64
|
||||
OpenFlags uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
type createIn struct {
|
||||
Flags uint32
|
||||
Mode uint32
|
||||
Umask uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
func createInSize(p Protocol) uintptr {
|
||||
switch {
|
||||
case p.LT(Protocol{7, 12}):
|
||||
return unsafe.Offsetof(createIn{}.Umask)
|
||||
default:
|
||||
return unsafe.Sizeof(createIn{})
|
||||
}
|
||||
}
|
||||
|
||||
type releaseIn struct {
|
||||
Fh uint64
|
||||
Flags uint32
|
||||
ReleaseFlags uint32
|
||||
LockOwner uint32
|
||||
}
|
||||
|
||||
type flushIn struct {
|
||||
Fh uint64
|
||||
FlushFlags uint32
|
||||
_ uint32
|
||||
LockOwner uint64
|
||||
}
|
||||
|
||||
type readIn struct {
|
||||
Fh uint64
|
||||
Offset uint64
|
||||
Size uint32
|
||||
ReadFlags uint32
|
||||
LockOwner uint64
|
||||
Flags uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
func readInSize(p Protocol) uintptr {
|
||||
switch {
|
||||
case p.LT(Protocol{7, 9}):
|
||||
return unsafe.Offsetof(readIn{}.ReadFlags) + 4
|
||||
default:
|
||||
return unsafe.Sizeof(readIn{})
|
||||
}
|
||||
}
|
||||
|
||||
// The ReadFlags are passed in ReadRequest.
|
||||
type ReadFlags uint32
|
||||
|
||||
const (
|
||||
// LockOwner field is valid.
|
||||
ReadLockOwner ReadFlags = 1 << 1
|
||||
)
|
||||
|
||||
var readFlagNames = []flagName{
|
||||
{uint32(ReadLockOwner), "ReadLockOwner"},
|
||||
}
|
||||
|
||||
func (fl ReadFlags) String() string {
|
||||
return flagString(uint32(fl), readFlagNames)
|
||||
}
|
||||
|
||||
type writeIn struct {
|
||||
Fh uint64
|
||||
Offset uint64
|
||||
Size uint32
|
||||
WriteFlags uint32
|
||||
LockOwner uint64
|
||||
Flags uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
func writeInSize(p Protocol) uintptr {
|
||||
switch {
|
||||
case p.LT(Protocol{7, 9}):
|
||||
return unsafe.Offsetof(writeIn{}.LockOwner)
|
||||
default:
|
||||
return unsafe.Sizeof(writeIn{})
|
||||
}
|
||||
}
|
||||
|
||||
type writeOut struct {
|
||||
Size uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
// The WriteFlags are passed in WriteRequest.
|
||||
type WriteFlags uint32
|
||||
|
||||
const (
|
||||
WriteCache WriteFlags = 1 << 0
|
||||
// LockOwner field is valid.
|
||||
WriteLockOwner WriteFlags = 1 << 1
|
||||
)
|
||||
|
||||
var writeFlagNames = []flagName{
|
||||
{uint32(WriteCache), "WriteCache"},
|
||||
{uint32(WriteLockOwner), "WriteLockOwner"},
|
||||
}
|
||||
|
||||
func (fl WriteFlags) String() string {
|
||||
return flagString(uint32(fl), writeFlagNames)
|
||||
}
|
||||
|
||||
type statfsOut struct {
|
||||
St kstatfs
|
||||
}
|
||||
|
||||
type fsyncIn struct {
|
||||
Fh uint64
|
||||
FsyncFlags uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
type setxattrInCommon struct {
|
||||
Size uint32
|
||||
Flags uint32
|
||||
}
|
||||
|
||||
func (setxattrInCommon) position() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
type getxattrInCommon struct {
|
||||
Size uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
func (getxattrInCommon) position() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
type getxattrOut struct {
|
||||
Size uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
type lkIn struct {
|
||||
Fh uint64
|
||||
Owner uint64
|
||||
Lk fileLock
|
||||
LkFlags uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
func lkInSize(p Protocol) uintptr {
|
||||
switch {
|
||||
case p.LT(Protocol{7, 9}):
|
||||
return unsafe.Offsetof(lkIn{}.LkFlags)
|
||||
default:
|
||||
return unsafe.Sizeof(lkIn{})
|
||||
}
|
||||
}
|
||||
|
||||
type lkOut struct {
|
||||
Lk fileLock
|
||||
}
|
||||
|
||||
type accessIn struct {
|
||||
Mask uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
type initIn struct {
|
||||
Major uint32
|
||||
Minor uint32
|
||||
MaxReadahead uint32
|
||||
Flags uint32
|
||||
}
|
||||
|
||||
const initInSize = int(unsafe.Sizeof(initIn{}))
|
||||
|
||||
type initOut struct {
|
||||
Major uint32
|
||||
Minor uint32
|
||||
MaxReadahead uint32
|
||||
Flags uint32
|
||||
Unused uint32
|
||||
MaxWrite uint32
|
||||
}
|
||||
|
||||
type interruptIn struct {
|
||||
Unique uint64
|
||||
}
|
||||
|
||||
type bmapIn struct {
|
||||
Block uint64
|
||||
BlockSize uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
type bmapOut struct {
|
||||
Block uint64
|
||||
}
|
||||
|
||||
type inHeader struct {
|
||||
Len uint32
|
||||
Opcode uint32
|
||||
Unique uint64
|
||||
Nodeid uint64
|
||||
Uid uint32
|
||||
Gid uint32
|
||||
Pid uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
const inHeaderSize = int(unsafe.Sizeof(inHeader{}))
|
||||
|
||||
type outHeader struct {
|
||||
Len uint32
|
||||
Error int32
|
||||
Unique uint64
|
||||
}
|
||||
|
||||
type dirent struct {
|
||||
Ino uint64
|
||||
Off uint64
|
||||
Namelen uint32
|
||||
Type uint32
|
||||
Name [0]byte
|
||||
}
|
||||
|
||||
const direntSize = 8 + 8 + 4 + 4
|
||||
|
||||
const (
|
||||
notifyCodePoll int32 = 1
|
||||
notifyCodeInvalInode int32 = 2
|
||||
notifyCodeInvalEntry int32 = 3
|
||||
)
|
||||
|
||||
type notifyInvalInodeOut struct {
|
||||
Ino uint64
|
||||
Off int64
|
||||
Len int64
|
||||
}
|
||||
|
||||
type notifyInvalEntryOut struct {
|
||||
Parent uint64
|
||||
Namelen uint32
|
||||
_ uint32
|
||||
}
|
||||
88
vendor/bazil.org/fuse/fuse_kernel_darwin.go
generated
vendored
Normal file
88
vendor/bazil.org/fuse/fuse_kernel_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type attr struct {
|
||||
Ino uint64
|
||||
Size uint64
|
||||
Blocks uint64
|
||||
Atime uint64
|
||||
Mtime uint64
|
||||
Ctime uint64
|
||||
Crtime_ uint64 // OS X only
|
||||
AtimeNsec uint32
|
||||
MtimeNsec uint32
|
||||
CtimeNsec uint32
|
||||
CrtimeNsec uint32 // OS X only
|
||||
Mode uint32
|
||||
Nlink uint32
|
||||
Uid uint32
|
||||
Gid uint32
|
||||
Rdev uint32
|
||||
Flags_ uint32 // OS X only; see chflags(2)
|
||||
Blksize uint32
|
||||
padding uint32
|
||||
}
|
||||
|
||||
func (a *attr) SetCrtime(s uint64, ns uint32) {
|
||||
a.Crtime_, a.CrtimeNsec = s, ns
|
||||
}
|
||||
|
||||
func (a *attr) SetFlags(f uint32) {
|
||||
a.Flags_ = f
|
||||
}
|
||||
|
||||
type setattrIn struct {
|
||||
setattrInCommon
|
||||
|
||||
// OS X only
|
||||
Bkuptime_ uint64
|
||||
Chgtime_ uint64
|
||||
Crtime uint64
|
||||
BkuptimeNsec uint32
|
||||
ChgtimeNsec uint32
|
||||
CrtimeNsec uint32
|
||||
Flags_ uint32 // see chflags(2)
|
||||
}
|
||||
|
||||
func (in *setattrIn) BkupTime() time.Time {
|
||||
return time.Unix(int64(in.Bkuptime_), int64(in.BkuptimeNsec))
|
||||
}
|
||||
|
||||
func (in *setattrIn) Chgtime() time.Time {
|
||||
return time.Unix(int64(in.Chgtime_), int64(in.ChgtimeNsec))
|
||||
}
|
||||
|
||||
func (in *setattrIn) Flags() uint32 {
|
||||
return in.Flags_
|
||||
}
|
||||
|
||||
func openFlags(flags uint32) OpenFlags {
|
||||
return OpenFlags(flags)
|
||||
}
|
||||
|
||||
type getxattrIn struct {
|
||||
getxattrInCommon
|
||||
|
||||
// OS X only
|
||||
Position uint32
|
||||
Padding uint32
|
||||
}
|
||||
|
||||
func (g *getxattrIn) position() uint32 {
|
||||
return g.Position
|
||||
}
|
||||
|
||||
type setxattrIn struct {
|
||||
setxattrInCommon
|
||||
|
||||
// OS X only
|
||||
Position uint32
|
||||
Padding uint32
|
||||
}
|
||||
|
||||
func (s *setxattrIn) position() uint32 {
|
||||
return s.Position
|
||||
}
|
||||
62
vendor/bazil.org/fuse/fuse_kernel_freebsd.go
generated
vendored
Normal file
62
vendor/bazil.org/fuse/fuse_kernel_freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
package fuse
|
||||
|
||||
import "time"
|
||||
|
||||
type attr struct {
|
||||
Ino uint64
|
||||
Size uint64
|
||||
Blocks uint64
|
||||
Atime uint64
|
||||
Mtime uint64
|
||||
Ctime uint64
|
||||
AtimeNsec uint32
|
||||
MtimeNsec uint32
|
||||
CtimeNsec uint32
|
||||
Mode uint32
|
||||
Nlink uint32
|
||||
Uid uint32
|
||||
Gid uint32
|
||||
Rdev uint32
|
||||
Blksize uint32
|
||||
padding uint32
|
||||
}
|
||||
|
||||
func (a *attr) Crtime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (a *attr) SetCrtime(s uint64, ns uint32) {
|
||||
// ignored on freebsd
|
||||
}
|
||||
|
||||
func (a *attr) SetFlags(f uint32) {
|
||||
// ignored on freebsd
|
||||
}
|
||||
|
||||
type setattrIn struct {
|
||||
setattrInCommon
|
||||
}
|
||||
|
||||
func (in *setattrIn) BkupTime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (in *setattrIn) Chgtime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (in *setattrIn) Flags() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func openFlags(flags uint32) OpenFlags {
|
||||
return OpenFlags(flags)
|
||||
}
|
||||
|
||||
type getxattrIn struct {
|
||||
getxattrInCommon
|
||||
}
|
||||
|
||||
type setxattrIn struct {
|
||||
setxattrInCommon
|
||||
}
|
||||
70
vendor/bazil.org/fuse/fuse_kernel_linux.go
generated
vendored
Normal file
70
vendor/bazil.org/fuse/fuse_kernel_linux.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
package fuse
|
||||
|
||||
import "time"
|
||||
|
||||
type attr struct {
|
||||
Ino uint64
|
||||
Size uint64
|
||||
Blocks uint64
|
||||
Atime uint64
|
||||
Mtime uint64
|
||||
Ctime uint64
|
||||
AtimeNsec uint32
|
||||
MtimeNsec uint32
|
||||
CtimeNsec uint32
|
||||
Mode uint32
|
||||
Nlink uint32
|
||||
Uid uint32
|
||||
Gid uint32
|
||||
Rdev uint32
|
||||
Blksize uint32
|
||||
_ uint32
|
||||
}
|
||||
|
||||
func (a *attr) Crtime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (a *attr) SetCrtime(s uint64, ns uint32) {
|
||||
// Ignored on Linux.
|
||||
}
|
||||
|
||||
func (a *attr) SetFlags(f uint32) {
|
||||
// Ignored on Linux.
|
||||
}
|
||||
|
||||
type setattrIn struct {
|
||||
setattrInCommon
|
||||
}
|
||||
|
||||
func (in *setattrIn) BkupTime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (in *setattrIn) Chgtime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (in *setattrIn) Flags() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func openFlags(flags uint32) OpenFlags {
|
||||
// on amd64, the 32-bit O_LARGEFILE flag is always seen;
|
||||
// on i386, the flag probably depends on the app
|
||||
// requesting, but in any case should be utterly
|
||||
// uninteresting to us here; our kernel protocol messages
|
||||
// are not directly related to the client app's kernel
|
||||
// API/ABI
|
||||
flags &^= 0x8000
|
||||
|
||||
return OpenFlags(flags)
|
||||
}
|
||||
|
||||
type getxattrIn struct {
|
||||
getxattrInCommon
|
||||
}
|
||||
|
||||
type setxattrIn struct {
|
||||
setxattrInCommon
|
||||
}
|
||||
1
vendor/bazil.org/fuse/fuse_kernel_std.go
generated
vendored
Normal file
1
vendor/bazil.org/fuse/fuse_kernel_std.go
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
package fuse
|
||||
7
vendor/bazil.org/fuse/fuse_linux.go
generated
vendored
Normal file
7
vendor/bazil.org/fuse/fuse_linux.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
package fuse
|
||||
|
||||
// Maximum file write size we are prepared to receive from the kernel.
|
||||
//
|
||||
// Linux 4.2.0 has been observed to cap this value at 128kB
|
||||
// (FUSE_MAX_PAGES_PER_REQ=32, 4kB pages).
|
||||
const maxWrite = 128 * 1024
|
||||
20
vendor/bazil.org/fuse/fuseutil/fuseutil.go
generated
vendored
Normal file
20
vendor/bazil.org/fuse/fuseutil/fuseutil.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
package fuseutil // import "bazil.org/fuse/fuseutil"
|
||||
|
||||
import (
|
||||
"bazil.org/fuse"
|
||||
)
|
||||
|
||||
// HandleRead handles a read request assuming that data is the entire file content.
|
||||
// It adjusts the amount returned in resp according to req.Offset and req.Size.
|
||||
func HandleRead(req *fuse.ReadRequest, resp *fuse.ReadResponse, data []byte) {
|
||||
if req.Offset >= int64(len(data)) {
|
||||
data = nil
|
||||
} else {
|
||||
data = data[req.Offset:]
|
||||
}
|
||||
if len(data) > req.Size {
|
||||
data = data[:req.Size]
|
||||
}
|
||||
n := copy(resp.Data[:req.Size], data)
|
||||
resp.Data = resp.Data[:n]
|
||||
}
|
||||
8
vendor/bazil.org/fuse/go.mod
generated
vendored
Normal file
8
vendor/bazil.org/fuse/go.mod
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
module bazil.org/fuse
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449
|
||||
)
|
||||
4
vendor/bazil.org/fuse/go.sum
generated
vendored
Normal file
4
vendor/bazil.org/fuse/go.sum
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ=
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
38
vendor/bazil.org/fuse/mount.go
generated
vendored
Normal file
38
vendor/bazil.org/fuse/mount.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"io"
|
||||
"log"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrOSXFUSENotFound is returned from Mount when the OSXFUSE
|
||||
// installation is not detected.
|
||||
//
|
||||
// Only happens on OS X. Make sure OSXFUSE is installed, or see
|
||||
// OSXFUSELocations for customization.
|
||||
ErrOSXFUSENotFound = errors.New("cannot locate OSXFUSE")
|
||||
)
|
||||
|
||||
func neverIgnoreLine(line string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func lineLogger(wg *sync.WaitGroup, prefix string, ignore func(line string) bool, r io.ReadCloser) {
|
||||
defer wg.Done()
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if ignore(line) {
|
||||
continue
|
||||
}
|
||||
log.Printf("%s: %s", prefix, line)
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
log.Printf("%s, error reading: %v", prefix, err)
|
||||
}
|
||||
}
|
||||
208
vendor/bazil.org/fuse/mount_darwin.go
generated
vendored
Normal file
208
vendor/bazil.org/fuse/mount_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,208 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
errNoAvail = errors.New("no available fuse devices")
|
||||
errNotLoaded = errors.New("osxfuse is not loaded")
|
||||
)
|
||||
|
||||
func loadOSXFUSE(bin string) error {
|
||||
cmd := exec.Command(bin)
|
||||
cmd.Dir = "/"
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
err := cmd.Run()
|
||||
return err
|
||||
}
|
||||
|
||||
func openOSXFUSEDev(devPrefix string) (*os.File, error) {
|
||||
var f *os.File
|
||||
var err error
|
||||
for i := uint64(0); ; i++ {
|
||||
path := devPrefix + strconv.FormatUint(i, 10)
|
||||
f, err = os.OpenFile(path, os.O_RDWR, 0000)
|
||||
if os.IsNotExist(err) {
|
||||
if i == 0 {
|
||||
// not even the first device was found -> fuse is not loaded
|
||||
return nil, errNotLoaded
|
||||
}
|
||||
|
||||
// we've run out of kernel-provided devices
|
||||
return nil, errNoAvail
|
||||
}
|
||||
|
||||
if err2, ok := err.(*os.PathError); ok && err2.Err == syscall.EBUSY {
|
||||
// try the next one
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
}
|
||||
|
||||
func handleMountOSXFUSE(helperName string, errCh chan<- error) func(line string) (ignore bool) {
|
||||
var noMountpointPrefix = helperName + `: `
|
||||
const noMountpointSuffix = `: No such file or directory`
|
||||
return func(line string) (ignore bool) {
|
||||
if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) {
|
||||
// re-extract it from the error message in case some layer
|
||||
// changed the path
|
||||
mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)]
|
||||
err := &MountpointDoesNotExistError{
|
||||
Path: mountpoint,
|
||||
}
|
||||
select {
|
||||
case errCh <- err:
|
||||
return true
|
||||
default:
|
||||
// not the first error; fall back to logging it
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// isBoringMountOSXFUSEError returns whether the Wait error is
|
||||
// uninteresting; exit status 64 is.
|
||||
func isBoringMountOSXFUSEError(err error) bool {
|
||||
if err, ok := err.(*exec.ExitError); ok && err.Exited() {
|
||||
if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 64 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func callMount(bin string, daemonVar string, dir string, conf *mountConfig, f *os.File, ready chan<- struct{}, errp *error) error {
|
||||
for k, v := range conf.options {
|
||||
if strings.Contains(k, ",") || strings.Contains(v, ",") {
|
||||
// Silly limitation but the mount helper does not
|
||||
// understand any escaping. See TestMountOptionCommaError.
|
||||
return fmt.Errorf("mount options cannot contain commas on darwin: %q=%q", k, v)
|
||||
}
|
||||
}
|
||||
cmd := exec.Command(
|
||||
bin,
|
||||
"-o", conf.getOptions(),
|
||||
// Tell osxfuse-kext how large our buffer is. It must split
|
||||
// writes larger than this into multiple writes.
|
||||
//
|
||||
// OSXFUSE seems to ignore InitResponse.MaxWrite, and uses
|
||||
// this instead.
|
||||
"-o", "iosize="+strconv.FormatUint(maxWrite, 10),
|
||||
// refers to fd passed in cmd.ExtraFiles
|
||||
"3",
|
||||
dir,
|
||||
)
|
||||
cmd.ExtraFiles = []*os.File{f}
|
||||
cmd.Env = os.Environ()
|
||||
// OSXFUSE <3.3.0
|
||||
cmd.Env = append(cmd.Env, "MOUNT_FUSEFS_CALL_BY_LIB=")
|
||||
// OSXFUSE >=3.3.0
|
||||
cmd.Env = append(cmd.Env, "MOUNT_OSXFUSE_CALL_BY_LIB=")
|
||||
|
||||
daemon := os.Args[0]
|
||||
if daemonVar != "" {
|
||||
cmd.Env = append(cmd.Env, daemonVar+"="+daemon)
|
||||
}
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up mount_osxfusefs stderr: %v", err)
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up mount_osxfusefs stderr: %v", err)
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return fmt.Errorf("mount_osxfusefs: %v", err)
|
||||
}
|
||||
helperErrCh := make(chan error, 1)
|
||||
go func() {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout)
|
||||
helperName := path.Base(bin)
|
||||
go lineLogger(&wg, "mount helper error", handleMountOSXFUSE(helperName, helperErrCh), stderr)
|
||||
wg.Wait()
|
||||
if err := cmd.Wait(); err != nil {
|
||||
// see if we have a better error to report
|
||||
select {
|
||||
case helperErr := <-helperErrCh:
|
||||
// log the Wait error if it's not what we expected
|
||||
if !isBoringMountOSXFUSEError(err) {
|
||||
log.Printf("mount helper failed: %v", err)
|
||||
}
|
||||
// and now return what we grabbed from stderr as the real
|
||||
// error
|
||||
*errp = helperErr
|
||||
close(ready)
|
||||
return
|
||||
default:
|
||||
// nope, fall back to generic message
|
||||
}
|
||||
|
||||
*errp = fmt.Errorf("mount_osxfusefs: %v", err)
|
||||
close(ready)
|
||||
return
|
||||
}
|
||||
|
||||
*errp = nil
|
||||
close(ready)
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (*os.File, error) {
|
||||
locations := conf.osxfuseLocations
|
||||
if locations == nil {
|
||||
locations = []OSXFUSEPaths{
|
||||
OSXFUSELocationV3,
|
||||
OSXFUSELocationV2,
|
||||
}
|
||||
}
|
||||
for _, loc := range locations {
|
||||
if _, err := os.Stat(loc.Mount); os.IsNotExist(err) {
|
||||
// try the other locations
|
||||
continue
|
||||
}
|
||||
|
||||
f, err := openOSXFUSEDev(loc.DevicePrefix)
|
||||
if err == errNotLoaded {
|
||||
err = loadOSXFUSE(loc.Load)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// try again
|
||||
f, err = openOSXFUSEDev(loc.DevicePrefix)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = callMount(loc.Mount, loc.DaemonVar, dir, conf, f, ready, errp)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
return nil, ErrOSXFUSENotFound
|
||||
}
|
||||
111
vendor/bazil.org/fuse/mount_freebsd.go
generated
vendored
Normal file
111
vendor/bazil.org/fuse/mount_freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,111 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func handleMountFusefsStderr(errCh chan<- error) func(line string) (ignore bool) {
|
||||
return func(line string) (ignore bool) {
|
||||
const (
|
||||
noMountpointPrefix = `mount_fusefs: `
|
||||
noMountpointSuffix = `: No such file or directory`
|
||||
)
|
||||
if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) {
|
||||
// re-extract it from the error message in case some layer
|
||||
// changed the path
|
||||
mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)]
|
||||
err := &MountpointDoesNotExistError{
|
||||
Path: mountpoint,
|
||||
}
|
||||
select {
|
||||
case errCh <- err:
|
||||
return true
|
||||
default:
|
||||
// not the first error; fall back to logging it
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// isBoringMountFusefsError returns whether the Wait error is
|
||||
// uninteresting; exit status 1 is.
|
||||
func isBoringMountFusefsError(err error) bool {
|
||||
if err, ok := err.(*exec.ExitError); ok && err.Exited() {
|
||||
if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (*os.File, error) {
|
||||
for k, v := range conf.options {
|
||||
if strings.Contains(k, ",") || strings.Contains(v, ",") {
|
||||
// Silly limitation but the mount helper does not
|
||||
// understand any escaping. See TestMountOptionCommaError.
|
||||
return nil, fmt.Errorf("mount options cannot contain commas on FreeBSD: %q=%q", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
f, err := os.OpenFile("/dev/fuse", os.O_RDWR, 0000)
|
||||
if err != nil {
|
||||
*errp = err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cmd := exec.Command(
|
||||
"/sbin/mount_fusefs",
|
||||
"--safe",
|
||||
"-o", conf.getOptions(),
|
||||
"3",
|
||||
dir,
|
||||
)
|
||||
cmd.ExtraFiles = []*os.File{f}
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting up mount_fusefs stderr: %v", err)
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting up mount_fusefs stderr: %v", err)
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, fmt.Errorf("mount_fusefs: %v", err)
|
||||
}
|
||||
helperErrCh := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout)
|
||||
go lineLogger(&wg, "mount helper error", handleMountFusefsStderr(helperErrCh), stderr)
|
||||
wg.Wait()
|
||||
if err := cmd.Wait(); err != nil {
|
||||
// see if we have a better error to report
|
||||
select {
|
||||
case helperErr := <-helperErrCh:
|
||||
// log the Wait error if it's not what we expected
|
||||
if !isBoringMountFusefsError(err) {
|
||||
log.Printf("mount helper failed: %v", err)
|
||||
}
|
||||
// and now return what we grabbed from stderr as the real
|
||||
// error
|
||||
return nil, helperErr
|
||||
default:
|
||||
// nope, fall back to generic message
|
||||
}
|
||||
return nil, fmt.Errorf("mount_fusefs: %v", err)
|
||||
}
|
||||
|
||||
close(ready)
|
||||
return f, nil
|
||||
}
|
||||
150
vendor/bazil.org/fuse/mount_linux.go
generated
vendored
Normal file
150
vendor/bazil.org/fuse/mount_linux.go
generated
vendored
Normal file
@@ -0,0 +1,150 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func handleFusermountStderr(errCh chan<- error) func(line string) (ignore bool) {
|
||||
return func(line string) (ignore bool) {
|
||||
if line == `fusermount: failed to open /etc/fuse.conf: Permission denied` {
|
||||
// Silence this particular message, it occurs way too
|
||||
// commonly and isn't very relevant to whether the mount
|
||||
// succeeds or not.
|
||||
return true
|
||||
}
|
||||
|
||||
const (
|
||||
noMountpointPrefix = `fusermount: failed to access mountpoint `
|
||||
noMountpointSuffix = `: No such file or directory`
|
||||
)
|
||||
if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) {
|
||||
// re-extract it from the error message in case some layer
|
||||
// changed the path
|
||||
mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)]
|
||||
err := &MountpointDoesNotExistError{
|
||||
Path: mountpoint,
|
||||
}
|
||||
select {
|
||||
case errCh <- err:
|
||||
return true
|
||||
default:
|
||||
// not the first error; fall back to logging it
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// isBoringFusermountError returns whether the Wait error is
|
||||
// uninteresting; exit status 1 is.
|
||||
func isBoringFusermountError(err error) bool {
|
||||
if err, ok := err.(*exec.ExitError); ok && err.Exited() {
|
||||
if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (fusefd *os.File, err error) {
|
||||
// linux mount is never delayed
|
||||
close(ready)
|
||||
|
||||
fds, err := syscall.Socketpair(syscall.AF_FILE, syscall.SOCK_STREAM, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("socketpair error: %v", err)
|
||||
}
|
||||
|
||||
writeFile := os.NewFile(uintptr(fds[0]), "fusermount-child-writes")
|
||||
defer writeFile.Close()
|
||||
|
||||
readFile := os.NewFile(uintptr(fds[1]), "fusermount-parent-reads")
|
||||
defer readFile.Close()
|
||||
|
||||
cmd := exec.Command(
|
||||
"fusermount",
|
||||
"-o", conf.getOptions(),
|
||||
"--",
|
||||
dir,
|
||||
)
|
||||
cmd.Env = append(os.Environ(), "_FUSE_COMMFD=3")
|
||||
|
||||
cmd.ExtraFiles = []*os.File{writeFile}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting up fusermount stderr: %v", err)
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting up fusermount stderr: %v", err)
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, fmt.Errorf("fusermount: %v", err)
|
||||
}
|
||||
helperErrCh := make(chan error, 1)
|
||||
wg.Add(2)
|
||||
go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout)
|
||||
go lineLogger(&wg, "mount helper error", handleFusermountStderr(helperErrCh), stderr)
|
||||
wg.Wait()
|
||||
if err := cmd.Wait(); err != nil {
|
||||
// see if we have a better error to report
|
||||
select {
|
||||
case helperErr := <-helperErrCh:
|
||||
// log the Wait error if it's not what we expected
|
||||
if !isBoringFusermountError(err) {
|
||||
log.Printf("mount helper failed: %v", err)
|
||||
}
|
||||
// and now return what we grabbed from stderr as the real
|
||||
// error
|
||||
return nil, helperErr
|
||||
default:
|
||||
// nope, fall back to generic message
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("fusermount: %v", err)
|
||||
}
|
||||
|
||||
c, err := net.FileConn(readFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("FileConn from fusermount socket: %v", err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
uc, ok := c.(*net.UnixConn)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected FileConn type; expected UnixConn, got %T", c)
|
||||
}
|
||||
|
||||
buf := make([]byte, 32) // expect 1 byte
|
||||
oob := make([]byte, 32) // expect 24 bytes
|
||||
_, oobn, _, _, err := uc.ReadMsgUnix(buf, oob)
|
||||
scms, err := syscall.ParseSocketControlMessage(oob[:oobn])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ParseSocketControlMessage: %v", err)
|
||||
}
|
||||
if len(scms) != 1 {
|
||||
return nil, fmt.Errorf("expected 1 SocketControlMessage; got scms = %#v", scms)
|
||||
}
|
||||
scm := scms[0]
|
||||
gotFds, err := syscall.ParseUnixRights(&scm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("syscall.ParseUnixRights: %v", err)
|
||||
}
|
||||
if len(gotFds) != 1 {
|
||||
return nil, fmt.Errorf("wanted 1 fd; got %#v", gotFds)
|
||||
}
|
||||
f := os.NewFile(uintptr(gotFds[0]), "/dev/fuse")
|
||||
return f, nil
|
||||
}
|
||||
296
vendor/bazil.org/fuse/options.go
generated
vendored
Normal file
296
vendor/bazil.org/fuse/options.go
generated
vendored
Normal file
@@ -0,0 +1,296 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func dummyOption(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// mountConfig holds the configuration for a mount operation.
|
||||
// Use it by passing MountOption values to Mount.
|
||||
type mountConfig struct {
|
||||
options map[string]string
|
||||
maxReadahead uint32
|
||||
initFlags InitFlags
|
||||
osxfuseLocations []OSXFUSEPaths
|
||||
}
|
||||
|
||||
func escapeComma(s string) string {
|
||||
s = strings.Replace(s, `\`, `\\`, -1)
|
||||
s = strings.Replace(s, `,`, `\,`, -1)
|
||||
return s
|
||||
}
|
||||
|
||||
// getOptions makes a string of options suitable for passing to FUSE
|
||||
// mount flag `-o`. Returns an empty string if no options were set.
|
||||
// Any platform specific adjustments should happen before the call.
|
||||
func (m *mountConfig) getOptions() string {
|
||||
var opts []string
|
||||
for k, v := range m.options {
|
||||
k = escapeComma(k)
|
||||
if v != "" {
|
||||
k += "=" + escapeComma(v)
|
||||
}
|
||||
opts = append(opts, k)
|
||||
}
|
||||
return strings.Join(opts, ",")
|
||||
}
|
||||
|
||||
type mountOption func(*mountConfig) error
|
||||
|
||||
// MountOption is passed to Mount to change the behavior of the mount.
|
||||
type MountOption mountOption
|
||||
|
||||
// FSName sets the file system name (also called source) that is
|
||||
// visible in the list of mounted file systems.
|
||||
//
|
||||
// FreeBSD ignores this option.
|
||||
func FSName(name string) MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["fsname"] = name
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Subtype sets the subtype of the mount. The main type is always
|
||||
// `fuse`. The type in a list of mounted file systems will look like
|
||||
// `fuse.foo`.
|
||||
//
|
||||
// OS X ignores this option.
|
||||
// FreeBSD ignores this option.
|
||||
func Subtype(fstype string) MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["subtype"] = fstype
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// LocalVolume sets the volume to be local (instead of network),
|
||||
// changing the behavior of Finder, Spotlight, and such.
|
||||
//
|
||||
// OS X only. Others ignore this option.
|
||||
func LocalVolume() MountOption {
|
||||
return localVolume
|
||||
}
|
||||
|
||||
// VolumeName sets the volume name shown in Finder.
|
||||
//
|
||||
// OS X only. Others ignore this option.
|
||||
func VolumeName(name string) MountOption {
|
||||
return volumeName(name)
|
||||
}
|
||||
|
||||
// NoAppleDouble makes OSXFUSE disallow files with names used by OS X
|
||||
// to store extended attributes on file systems that do not support
|
||||
// them natively.
|
||||
//
|
||||
// Such file names are:
|
||||
//
|
||||
// ._*
|
||||
// .DS_Store
|
||||
//
|
||||
// OS X only. Others ignore this option.
|
||||
func NoAppleDouble() MountOption {
|
||||
return noAppleDouble
|
||||
}
|
||||
|
||||
// NoAppleXattr makes OSXFUSE disallow extended attributes with the
|
||||
// prefix "com.apple.". This disables persistent Finder state and
|
||||
// other such information.
|
||||
//
|
||||
// OS X only. Others ignore this option.
|
||||
func NoAppleXattr() MountOption {
|
||||
return noAppleXattr
|
||||
}
|
||||
|
||||
// NoBrowse makes OSXFUSE mark the volume as non-browsable, so that
|
||||
// Finder won't automatically browse it.
|
||||
//
|
||||
// OS X only. Others ignore this option.
|
||||
func NoBrowse() MountOption {
|
||||
return noBrowse
|
||||
}
|
||||
|
||||
// ExclCreate causes O_EXCL flag to be set for only "truly" exclusive creates,
|
||||
// i.e. create calls for which the initiator explicitly set the O_EXCL flag.
|
||||
//
|
||||
// OSXFUSE expects all create calls to return EEXIST in case the file
|
||||
// already exists, regardless of whether O_EXCL was specified or not.
|
||||
// To ensure this behavior, it normally sets OpenExclusive for all
|
||||
// Create calls, regardless of whether the original call had it set.
|
||||
// For distributed filesystems, that may force every file create to be
|
||||
// a distributed consensus action, causing undesirable delays.
|
||||
//
|
||||
// This option makes the FUSE filesystem see the original flag value,
|
||||
// and better decide when to ensure global consensus.
|
||||
//
|
||||
// Note that returning EEXIST on existing file create is still
|
||||
// expected with OSXFUSE, regardless of the presence of the
|
||||
// OpenExclusive flag.
|
||||
//
|
||||
// For more information, see
|
||||
// https://github.com/osxfuse/osxfuse/issues/209
|
||||
//
|
||||
// OS X only. Others ignore this options.
|
||||
// Requires OSXFUSE 3.4.1 or newer.
|
||||
func ExclCreate() MountOption {
|
||||
return exclCreate
|
||||
}
|
||||
|
||||
// DaemonTimeout sets the time in seconds between a request and a reply before
|
||||
// the FUSE mount is declared dead.
|
||||
//
|
||||
// OS X and FreeBSD only. Others ignore this option.
|
||||
func DaemonTimeout(name string) MountOption {
|
||||
return daemonTimeout(name)
|
||||
}
|
||||
|
||||
// AllowOther allows other users to access the file system.
|
||||
func AllowOther() MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["allow_other"] = ""
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// AllowDev enables interpreting character or block special devices on the
|
||||
// filesystem.
|
||||
func AllowDev() MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["dev"] = ""
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// AllowSUID allows set-user-identifier or set-group-identifier bits to take
|
||||
// effect.
|
||||
func AllowSUID() MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["suid"] = ""
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultPermissions makes the kernel enforce access control based on
|
||||
// the file mode (as in chmod).
|
||||
//
|
||||
// Without this option, the Node itself decides what is and is not
|
||||
// allowed. This is normally ok because FUSE file systems cannot be
|
||||
// accessed by other users without AllowOther.
|
||||
//
|
||||
// FreeBSD ignores this option.
|
||||
func DefaultPermissions() MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["default_permissions"] = ""
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ReadOnly makes the mount read-only.
|
||||
func ReadOnly() MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["ro"] = ""
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// MaxReadahead sets the number of bytes that can be prefetched for
|
||||
// sequential reads. The kernel can enforce a maximum value lower than
|
||||
// this.
|
||||
//
|
||||
// This setting makes the kernel perform speculative reads that do not
|
||||
// originate from any client process. This usually tremendously
|
||||
// improves read performance.
|
||||
func MaxReadahead(n uint32) MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.maxReadahead = n
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// AsyncRead enables multiple outstanding read requests for the same
|
||||
// handle. Without this, there is at most one request in flight at a
|
||||
// time.
|
||||
func AsyncRead() MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.initFlags |= InitAsyncRead
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WritebackCache enables the kernel to buffer writes before sending
|
||||
// them to the FUSE server. Without this, writethrough caching is
|
||||
// used.
|
||||
func WritebackCache() MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.initFlags |= InitWritebackCache
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OSXFUSEPaths describes the paths used by an installed OSXFUSE
|
||||
// version. See OSXFUSELocationV3 for typical values.
|
||||
type OSXFUSEPaths struct {
|
||||
// Prefix for the device file. At mount time, an incrementing
|
||||
// number is suffixed until a free FUSE device is found.
|
||||
DevicePrefix string
|
||||
// Path of the load helper, used to load the kernel extension if
|
||||
// no device files are found.
|
||||
Load string
|
||||
// Path of the mount helper, used for the actual mount operation.
|
||||
Mount string
|
||||
// Environment variable used to pass the path to the executable
|
||||
// calling the mount helper.
|
||||
DaemonVar string
|
||||
}
|
||||
|
||||
// Default paths for OSXFUSE. See OSXFUSELocations.
|
||||
var (
|
||||
OSXFUSELocationV3 = OSXFUSEPaths{
|
||||
DevicePrefix: "/dev/osxfuse",
|
||||
Load: "/Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse",
|
||||
Mount: "/Library/Filesystems/osxfuse.fs/Contents/Resources/mount_osxfuse",
|
||||
DaemonVar: "MOUNT_OSXFUSE_DAEMON_PATH",
|
||||
}
|
||||
OSXFUSELocationV2 = OSXFUSEPaths{
|
||||
DevicePrefix: "/dev/osxfuse",
|
||||
Load: "/Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs",
|
||||
Mount: "/Library/Filesystems/osxfusefs.fs/Support/mount_osxfusefs",
|
||||
DaemonVar: "MOUNT_FUSEFS_DAEMON_PATH",
|
||||
}
|
||||
)
|
||||
|
||||
// OSXFUSELocations sets where to look for OSXFUSE files. The
|
||||
// arguments are all the possible locations. The previous locations
|
||||
// are replaced.
|
||||
//
|
||||
// Without this option, OSXFUSELocationV3 and OSXFUSELocationV2 are
|
||||
// used.
|
||||
//
|
||||
// OS X only. Others ignore this option.
|
||||
func OSXFUSELocations(paths ...OSXFUSEPaths) MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
if len(paths) == 0 {
|
||||
return errors.New("must specify at least one location for OSXFUSELocations")
|
||||
}
|
||||
// replace previous values, but make a copy so there's no
|
||||
// worries about caller mutating their slice
|
||||
conf.osxfuseLocations = append(conf.osxfuseLocations[:0], paths...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// AllowNonEmptyMount allows the mounting over a non-empty directory.
|
||||
//
|
||||
// The files in it will be shadowed by the freshly created mount. By
|
||||
// default these mounts are rejected to prevent accidental covering up
|
||||
// of data, which could for example prevent automatic backup.
|
||||
func AllowNonEmptyMount() MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["nonempty"] = ""
|
||||
return nil
|
||||
}
|
||||
}
|
||||
40
vendor/bazil.org/fuse/options_darwin.go
generated
vendored
Normal file
40
vendor/bazil.org/fuse/options_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
package fuse
|
||||
|
||||
func localVolume(conf *mountConfig) error {
|
||||
conf.options["local"] = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func volumeName(name string) MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["volname"] = name
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func daemonTimeout(name string) MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["daemon_timeout"] = name
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func noAppleXattr(conf *mountConfig) error {
|
||||
conf.options["noapplexattr"] = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func noAppleDouble(conf *mountConfig) error {
|
||||
conf.options["noappledouble"] = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func exclCreate(conf *mountConfig) error {
|
||||
conf.options["excl_create"] = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func noBrowse(conf *mountConfig) error {
|
||||
conf.options["nobrowse"] = ""
|
||||
return nil
|
||||
}
|
||||
32
vendor/bazil.org/fuse/options_freebsd.go
generated
vendored
Normal file
32
vendor/bazil.org/fuse/options_freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
package fuse
|
||||
|
||||
func localVolume(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func volumeName(name string) MountOption {
|
||||
return dummyOption
|
||||
}
|
||||
|
||||
func daemonTimeout(name string) MountOption {
|
||||
return func(conf *mountConfig) error {
|
||||
conf.options["timeout"] = name
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func noAppleXattr(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func noAppleDouble(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func exclCreate(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func noBrowse(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
29
vendor/bazil.org/fuse/options_linux.go
generated
vendored
Normal file
29
vendor/bazil.org/fuse/options_linux.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
package fuse
|
||||
|
||||
func localVolume(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func volumeName(name string) MountOption {
|
||||
return dummyOption
|
||||
}
|
||||
|
||||
func daemonTimeout(name string) MountOption {
|
||||
return dummyOption
|
||||
}
|
||||
|
||||
func noAppleXattr(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func noAppleDouble(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func exclCreate(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func noBrowse(conf *mountConfig) error {
|
||||
return nil
|
||||
}
|
||||
75
vendor/bazil.org/fuse/protocol.go
generated
vendored
Normal file
75
vendor/bazil.org/fuse/protocol.go
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Protocol is a FUSE protocol version number.
|
||||
type Protocol struct {
|
||||
Major uint32
|
||||
Minor uint32
|
||||
}
|
||||
|
||||
func (p Protocol) String() string {
|
||||
return fmt.Sprintf("%d.%d", p.Major, p.Minor)
|
||||
}
|
||||
|
||||
// LT returns whether a is less than b.
|
||||
func (a Protocol) LT(b Protocol) bool {
|
||||
return a.Major < b.Major ||
|
||||
(a.Major == b.Major && a.Minor < b.Minor)
|
||||
}
|
||||
|
||||
// GE returns whether a is greater than or equal to b.
|
||||
func (a Protocol) GE(b Protocol) bool {
|
||||
return a.Major > b.Major ||
|
||||
(a.Major == b.Major && a.Minor >= b.Minor)
|
||||
}
|
||||
|
||||
func (a Protocol) is79() bool {
|
||||
return a.GE(Protocol{7, 9})
|
||||
}
|
||||
|
||||
// HasAttrBlockSize returns whether Attr.BlockSize is respected by the
|
||||
// kernel.
|
||||
func (a Protocol) HasAttrBlockSize() bool {
|
||||
return a.is79()
|
||||
}
|
||||
|
||||
// HasReadWriteFlags returns whether ReadRequest/WriteRequest
|
||||
// fields Flags and FileFlags are valid.
|
||||
func (a Protocol) HasReadWriteFlags() bool {
|
||||
return a.is79()
|
||||
}
|
||||
|
||||
// HasGetattrFlags returns whether GetattrRequest field Flags is
|
||||
// valid.
|
||||
func (a Protocol) HasGetattrFlags() bool {
|
||||
return a.is79()
|
||||
}
|
||||
|
||||
func (a Protocol) is710() bool {
|
||||
return a.GE(Protocol{7, 10})
|
||||
}
|
||||
|
||||
// HasOpenNonSeekable returns whether OpenResponse field Flags flag
|
||||
// OpenNonSeekable is supported.
|
||||
func (a Protocol) HasOpenNonSeekable() bool {
|
||||
return a.is710()
|
||||
}
|
||||
|
||||
func (a Protocol) is712() bool {
|
||||
return a.GE(Protocol{7, 12})
|
||||
}
|
||||
|
||||
// HasUmask returns whether CreateRequest/MkdirRequest/MknodRequest
|
||||
// field Umask is valid.
|
||||
func (a Protocol) HasUmask() bool {
|
||||
return a.is712()
|
||||
}
|
||||
|
||||
// HasInvalidate returns whether InvalidateNode/InvalidateEntry are
|
||||
// supported.
|
||||
func (a Protocol) HasInvalidate() bool {
|
||||
return a.is712()
|
||||
}
|
||||
6
vendor/bazil.org/fuse/unmount.go
generated
vendored
Normal file
6
vendor/bazil.org/fuse/unmount.go
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
package fuse
|
||||
|
||||
// Unmount tries to unmount the filesystem mounted at dir.
|
||||
func Unmount(dir string) error {
|
||||
return unmount(dir)
|
||||
}
|
||||
21
vendor/bazil.org/fuse/unmount_linux.go
generated
vendored
Normal file
21
vendor/bazil.org/fuse/unmount_linux.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func unmount(dir string) error {
|
||||
cmd := exec.Command("fusermount", "-u", dir)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
if len(output) > 0 {
|
||||
output = bytes.TrimRight(output, "\n")
|
||||
msg := err.Error() + ": " + string(output)
|
||||
err = errors.New(msg)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
17
vendor/bazil.org/fuse/unmount_std.go
generated
vendored
Normal file
17
vendor/bazil.org/fuse/unmount_std.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
// +build !linux
|
||||
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func unmount(dir string) error {
|
||||
err := syscall.Unmount(dir, 0)
|
||||
if err != nil {
|
||||
err = &os.PathError{Op: "unmount", Path: dir, Err: err}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
Normal file
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
518
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
Normal file
518
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
Normal file
@@ -0,0 +1,518 @@
|
||||
// Copyright 2014 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package metadata provides access to Google Compute Engine (GCE)
|
||||
// metadata and API service accounts.
|
||||
//
|
||||
// This package is a wrapper around the GCE metadata service,
|
||||
// as documented at https://developers.google.com/compute/docs/metadata.
|
||||
package metadata // import "cloud.google.com/go/compute/metadata"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// metadataIP is the documented metadata server IP address.
|
||||
metadataIP = "169.254.169.254"
|
||||
|
||||
// metadataHostEnv is the environment variable specifying the
|
||||
// GCE metadata hostname. If empty, the default value of
|
||||
// metadataIP ("169.254.169.254") is used instead.
|
||||
// This is variable name is not defined by any spec, as far as
|
||||
// I know; it was made up for the Go package.
|
||||
metadataHostEnv = "GCE_METADATA_HOST"
|
||||
|
||||
userAgent = "gcloud-golang/0.1"
|
||||
)
|
||||
|
||||
type cachedValue struct {
|
||||
k string
|
||||
trim bool
|
||||
mu sync.Mutex
|
||||
v string
|
||||
}
|
||||
|
||||
var (
|
||||
projID = &cachedValue{k: "project/project-id", trim: true}
|
||||
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
|
||||
instID = &cachedValue{k: "instance/id", trim: true}
|
||||
)
|
||||
|
||||
var defaultClient = &Client{hc: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
},
|
||||
}}
|
||||
|
||||
// NotDefinedError is returned when requested metadata is not defined.
|
||||
//
|
||||
// The underlying string is the suffix after "/computeMetadata/v1/".
|
||||
//
|
||||
// This error is not returned if the value is defined to be the empty
|
||||
// string.
|
||||
type NotDefinedError string
|
||||
|
||||
func (suffix NotDefinedError) Error() string {
|
||||
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
|
||||
}
|
||||
|
||||
func (c *cachedValue) get(cl *Client) (v string, err error) {
|
||||
defer c.mu.Unlock()
|
||||
c.mu.Lock()
|
||||
if c.v != "" {
|
||||
return c.v, nil
|
||||
}
|
||||
if c.trim {
|
||||
v, err = cl.getTrimmed(c.k)
|
||||
} else {
|
||||
v, err = cl.Get(c.k)
|
||||
}
|
||||
if err == nil {
|
||||
c.v = v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
onGCEOnce sync.Once
|
||||
onGCE bool
|
||||
)
|
||||
|
||||
// OnGCE reports whether this process is running on Google Compute Engine.
|
||||
func OnGCE() bool {
|
||||
onGCEOnce.Do(initOnGCE)
|
||||
return onGCE
|
||||
}
|
||||
|
||||
func initOnGCE() {
|
||||
onGCE = testOnGCE()
|
||||
}
|
||||
|
||||
func testOnGCE() bool {
|
||||
// The user explicitly said they're on GCE, so trust them.
|
||||
if os.Getenv(metadataHostEnv) != "" {
|
||||
return true
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
resc := make(chan bool, 2)
|
||||
|
||||
// Try two strategies in parallel.
|
||||
// See https://github.com/googleapis/google-cloud-go/issues/194
|
||||
go func() {
|
||||
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := defaultClient.hc.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
resc <- false
|
||||
return
|
||||
}
|
||||
defer res.Body.Close()
|
||||
resc <- res.Header.Get("Metadata-Flavor") == "Google"
|
||||
}()
|
||||
|
||||
go func() {
|
||||
addrs, err := net.DefaultResolver.LookupHost(ctx, "metadata.google.internal")
|
||||
if err != nil || len(addrs) == 0 {
|
||||
resc <- false
|
||||
return
|
||||
}
|
||||
resc <- strsContains(addrs, metadataIP)
|
||||
}()
|
||||
|
||||
tryHarder := systemInfoSuggestsGCE()
|
||||
if tryHarder {
|
||||
res := <-resc
|
||||
if res {
|
||||
// The first strategy succeeded, so let's use it.
|
||||
return true
|
||||
}
|
||||
// Wait for either the DNS or metadata server probe to
|
||||
// contradict the other one and say we are running on
|
||||
// GCE. Give it a lot of time to do so, since the system
|
||||
// info already suggests we're running on a GCE BIOS.
|
||||
timer := time.NewTimer(5 * time.Second)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case res = <-resc:
|
||||
return res
|
||||
case <-timer.C:
|
||||
// Too slow. Who knows what this system is.
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// There's no hint from the system info that we're running on
|
||||
// GCE, so use the first probe's result as truth, whether it's
|
||||
// true or false. The goal here is to optimize for speed for
|
||||
// users who are NOT running on GCE. We can't assume that
|
||||
// either a DNS lookup or an HTTP request to a blackholed IP
|
||||
// address is fast. Worst case this should return when the
|
||||
// metaClient's Transport.ResponseHeaderTimeout or
|
||||
// Transport.Dial.Timeout fires (in two seconds).
|
||||
return <-resc
|
||||
}
|
||||
|
||||
// systemInfoSuggestsGCE reports whether the local system (without
|
||||
// doing network requests) suggests that we're running on GCE. If this
|
||||
// returns true, testOnGCE tries a bit harder to reach its metadata
|
||||
// server.
|
||||
func systemInfoSuggestsGCE() bool {
|
||||
if runtime.GOOS != "linux" {
|
||||
// We don't have any non-Linux clues available, at least yet.
|
||||
return false
|
||||
}
|
||||
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
|
||||
name := strings.TrimSpace(string(slurp))
|
||||
return name == "Google" || name == "Google Compute Engine"
|
||||
}
|
||||
|
||||
// Subscribe calls Client.Subscribe on the default client.
|
||||
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||
return defaultClient.Subscribe(suffix, fn)
|
||||
}
|
||||
|
||||
// Get calls Client.Get on the default client.
|
||||
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
|
||||
|
||||
// ProjectID returns the current instance's project ID string.
|
||||
func ProjectID() (string, error) { return defaultClient.ProjectID() }
|
||||
|
||||
// NumericProjectID returns the current instance's numeric project ID.
|
||||
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
|
||||
|
||||
// InternalIP returns the instance's primary internal IP address.
|
||||
func InternalIP() (string, error) { return defaultClient.InternalIP() }
|
||||
|
||||
// ExternalIP returns the instance's primary external (public) IP address.
|
||||
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
|
||||
|
||||
// Email calls Client.Email on the default client.
|
||||
func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) }
|
||||
|
||||
// Hostname returns the instance's hostname. This will be of the form
|
||||
// "<instanceID>.c.<projID>.internal".
|
||||
func Hostname() (string, error) { return defaultClient.Hostname() }
|
||||
|
||||
// InstanceTags returns the list of user-defined instance tags,
|
||||
// assigned when initially creating a GCE instance.
|
||||
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
|
||||
|
||||
// InstanceID returns the current VM's numeric instance ID.
|
||||
func InstanceID() (string, error) { return defaultClient.InstanceID() }
|
||||
|
||||
// InstanceName returns the current VM's instance ID string.
|
||||
func InstanceName() (string, error) { return defaultClient.InstanceName() }
|
||||
|
||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||
func Zone() (string, error) { return defaultClient.Zone() }
|
||||
|
||||
// InstanceAttributes calls Client.InstanceAttributes on the default client.
|
||||
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
|
||||
|
||||
// ProjectAttributes calls Client.ProjectAttributes on the default client.
|
||||
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
|
||||
|
||||
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
|
||||
func InstanceAttributeValue(attr string) (string, error) {
|
||||
return defaultClient.InstanceAttributeValue(attr)
|
||||
}
|
||||
|
||||
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
|
||||
func ProjectAttributeValue(attr string) (string, error) {
|
||||
return defaultClient.ProjectAttributeValue(attr)
|
||||
}
|
||||
|
||||
// Scopes calls Client.Scopes on the default client.
|
||||
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
|
||||
|
||||
func strsContains(ss []string, s string) bool {
|
||||
for _, v := range ss {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// A Client provides metadata.
|
||||
type Client struct {
|
||||
hc *http.Client
|
||||
}
|
||||
|
||||
// NewClient returns a Client that can be used to fetch metadata.
|
||||
// Returns the client that uses the specified http.Client for HTTP requests.
|
||||
// If nil is specified, returns the default client.
|
||||
func NewClient(c *http.Client) *Client {
|
||||
if c == nil {
|
||||
return defaultClient
|
||||
}
|
||||
|
||||
return &Client{hc: c}
|
||||
}
|
||||
|
||||
// getETag returns a value from the metadata service as well as the associated ETag.
|
||||
// This func is otherwise equivalent to Get.
|
||||
func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
||||
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
||||
// a container, which is an important use-case for local testing of cloud
|
||||
// deployments. To enable spoofing of the metadata service, the environment
|
||||
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
||||
// requests shall go.
|
||||
host := os.Getenv(metadataHostEnv)
|
||||
if host == "" {
|
||||
// Using 169.254.169.254 instead of "metadata" here because Go
|
||||
// binaries built with the "netgo" tag and without cgo won't
|
||||
// know the search suffix for "metadata" is
|
||||
// ".google.internal", and this IP address is documented as
|
||||
// being stable anyway.
|
||||
host = metadataIP
|
||||
}
|
||||
u := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||
req, err := http.NewRequest("GET", u, nil)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
req.Header.Set("Metadata-Flavor", "Google")
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return "", "", NotDefinedError(suffix)
|
||||
}
|
||||
all, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", &Error{Code: res.StatusCode, Message: string(all)}
|
||||
}
|
||||
return string(all), res.Header.Get("Etag"), nil
|
||||
}
|
||||
|
||||
// Get returns a value from the metadata service.
|
||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||
//
|
||||
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
||||
// 169.254.169.254 will be used instead.
|
||||
//
|
||||
// If the requested metadata is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
func (c *Client) Get(suffix string) (string, error) {
|
||||
val, _, err := c.getETag(suffix)
|
||||
return val, err
|
||||
}
|
||||
|
||||
func (c *Client) getTrimmed(suffix string) (s string, err error) {
|
||||
s, err = c.Get(suffix)
|
||||
s = strings.TrimSpace(s)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Client) lines(suffix string) ([]string, error) {
|
||||
j, err := c.Get(suffix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := strings.Split(strings.TrimSpace(j), "\n")
|
||||
for i := range s {
|
||||
s[i] = strings.TrimSpace(s[i])
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// ProjectID returns the current instance's project ID string.
|
||||
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
|
||||
|
||||
// NumericProjectID returns the current instance's numeric project ID.
|
||||
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
|
||||
|
||||
// InstanceID returns the current VM's numeric instance ID.
|
||||
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
|
||||
|
||||
// InternalIP returns the instance's primary internal IP address.
|
||||
func (c *Client) InternalIP() (string, error) {
|
||||
return c.getTrimmed("instance/network-interfaces/0/ip")
|
||||
}
|
||||
|
||||
// Email returns the email address associated with the service account.
|
||||
// The account may be empty or the string "default" to use the instance's
|
||||
// main account.
|
||||
func (c *Client) Email(serviceAccount string) (string, error) {
|
||||
if serviceAccount == "" {
|
||||
serviceAccount = "default"
|
||||
}
|
||||
return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email")
|
||||
}
|
||||
|
||||
// ExternalIP returns the instance's primary external (public) IP address.
|
||||
func (c *Client) ExternalIP() (string, error) {
|
||||
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
||||
}
|
||||
|
||||
// Hostname returns the instance's hostname. This will be of the form
|
||||
// "<instanceID>.c.<projID>.internal".
|
||||
func (c *Client) Hostname() (string, error) {
|
||||
return c.getTrimmed("instance/hostname")
|
||||
}
|
||||
|
||||
// InstanceTags returns the list of user-defined instance tags,
|
||||
// assigned when initially creating a GCE instance.
|
||||
func (c *Client) InstanceTags() ([]string, error) {
|
||||
var s []string
|
||||
j, err := c.Get("instance/tags")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// InstanceName returns the current VM's instance ID string.
|
||||
func (c *Client) InstanceName() (string, error) {
|
||||
return c.getTrimmed("instance/name")
|
||||
}
|
||||
|
||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||
func (c *Client) Zone() (string, error) {
|
||||
zone, err := c.getTrimmed("instance/zone")
|
||||
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return zone[strings.LastIndex(zone, "/")+1:], nil
|
||||
}
|
||||
|
||||
// InstanceAttributes returns the list of user-defined attributes,
|
||||
// assigned when initially creating a GCE VM instance. The value of an
|
||||
// attribute can be obtained with InstanceAttributeValue.
|
||||
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
|
||||
|
||||
// ProjectAttributes returns the list of user-defined attributes
|
||||
// applying to the project as a whole, not just this VM. The value of
|
||||
// an attribute can be obtained with ProjectAttributeValue.
|
||||
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
|
||||
|
||||
// InstanceAttributeValue returns the value of the provided VM
|
||||
// instance attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// InstanceAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
|
||||
return c.Get("instance/attributes/" + attr)
|
||||
}
|
||||
|
||||
// ProjectAttributeValue returns the value of the provided
|
||||
// project attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// ProjectAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
|
||||
return c.Get("project/attributes/" + attr)
|
||||
}
|
||||
|
||||
// Scopes returns the service account scopes for the given account.
|
||||
// The account may be empty or the string "default" to use the instance's
|
||||
// main account.
|
||||
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
|
||||
if serviceAccount == "" {
|
||||
serviceAccount = "default"
|
||||
}
|
||||
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
|
||||
}
|
||||
|
||||
// Subscribe subscribes to a value from the metadata service.
|
||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||
// The suffix may contain query parameters.
|
||||
//
|
||||
// Subscribe calls fn with the latest metadata value indicated by the provided
|
||||
// suffix. If the metadata value is deleted, fn is called with the empty string
|
||||
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
|
||||
// is deleted. Subscribe returns the error value returned from the last call to
|
||||
// fn, which may be nil when ok == false.
|
||||
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||
const failedSubscribeSleep = time.Second * 5
|
||||
|
||||
// First check to see if the metadata value exists at all.
|
||||
val, lastETag, err := c.getETag(suffix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := fn(val, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ok := true
|
||||
if strings.ContainsRune(suffix, '?') {
|
||||
suffix += "&wait_for_change=true&last_etag="
|
||||
} else {
|
||||
suffix += "?wait_for_change=true&last_etag="
|
||||
}
|
||||
for {
|
||||
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
|
||||
if err != nil {
|
||||
if _, deleted := err.(NotDefinedError); !deleted {
|
||||
time.Sleep(failedSubscribeSleep)
|
||||
continue // Retry on other errors.
|
||||
}
|
||||
ok = false
|
||||
}
|
||||
lastETag = etag
|
||||
|
||||
if err := fn(val, ok); err != nil || !ok {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Error contains an error response from the server.
|
||||
type Error struct {
|
||||
// Code is the HTTP response status code.
|
||||
Code int
|
||||
// Message is the server response message.
|
||||
Message string
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message)
|
||||
}
|
||||
21
vendor/github.com/Azure/azure-pipeline-go/LICENSE
generated
vendored
Normal file
21
vendor/github.com/Azure/azure-pipeline-go/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE
|
||||
284
vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go
generated
vendored
Normal file
284
vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go
generated
vendored
Normal file
@@ -0,0 +1,284 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/mattn/go-ieproxy"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// The Factory interface represents an object that can create its Policy object. Each HTTP request sent
|
||||
// requires that this Factory create a new instance of its Policy object.
|
||||
type Factory interface {
|
||||
New(next Policy, po *PolicyOptions) Policy
|
||||
}
|
||||
|
||||
// FactoryFunc is an adapter that allows the use of an ordinary function as a Factory interface.
|
||||
type FactoryFunc func(next Policy, po *PolicyOptions) PolicyFunc
|
||||
|
||||
// New calls f(next,po).
|
||||
func (f FactoryFunc) New(next Policy, po *PolicyOptions) Policy {
|
||||
return f(next, po)
|
||||
}
|
||||
|
||||
// The Policy interface represents a mutable Policy object created by a Factory. The object can mutate/process
|
||||
// the HTTP request and then forward it on to the next Policy object in the linked-list. The returned
|
||||
// Response goes backward through the linked-list for additional processing.
|
||||
// NOTE: Request is passed by value so changes do not change the caller's version of
|
||||
// the request. However, Request has some fields that reference mutable objects (not strings).
|
||||
// These references are copied; a deep copy is not performed. Specifically, this means that
|
||||
// you should avoid modifying the objects referred to by these fields: URL, Header, Body,
|
||||
// GetBody, TransferEncoding, Form, MultipartForm, Trailer, TLS, Cancel, and Response.
|
||||
type Policy interface {
|
||||
Do(ctx context.Context, request Request) (Response, error)
|
||||
}
|
||||
|
||||
// PolicyFunc is an adapter that allows the use of an ordinary function as a Policy interface.
|
||||
type PolicyFunc func(ctx context.Context, request Request) (Response, error)
|
||||
|
||||
// Do calls f(ctx, request).
|
||||
func (f PolicyFunc) Do(ctx context.Context, request Request) (Response, error) {
|
||||
return f(ctx, request)
|
||||
}
|
||||
|
||||
// Options configures a Pipeline's behavior.
|
||||
type Options struct {
|
||||
HTTPSender Factory // If sender is nil, then the pipeline's default client is used to send the HTTP requests.
|
||||
Log LogOptions
|
||||
}
|
||||
|
||||
// LogLevel tells a logger the minimum level to log. When code reports a log entry,
|
||||
// the LogLevel indicates the level of the log entry. The logger only records entries
|
||||
// whose level is at least the level it was told to log. See the Log* constants.
|
||||
// For example, if a logger is configured with LogError, then LogError, LogPanic,
|
||||
// and LogFatal entries will be logged; lower level entries are ignored.
|
||||
type LogLevel uint32
|
||||
|
||||
const (
|
||||
// LogNone tells a logger not to log any entries passed to it.
|
||||
LogNone LogLevel = iota
|
||||
|
||||
// LogFatal tells a logger to log all LogFatal entries passed to it.
|
||||
LogFatal
|
||||
|
||||
// LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it.
|
||||
LogPanic
|
||||
|
||||
// LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogError
|
||||
|
||||
// LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogWarning
|
||||
|
||||
// LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogInfo
|
||||
|
||||
// LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogDebug
|
||||
)
|
||||
|
||||
// LogOptions configures the pipeline's logging mechanism & level filtering.
|
||||
type LogOptions struct {
|
||||
Log func(level LogLevel, message string)
|
||||
|
||||
// ShouldLog is called periodically allowing you to return whether the specified LogLevel should be logged or not.
|
||||
// An application can return different values over the its lifetime; this allows the application to dynamically
|
||||
// alter what is logged. NOTE: This method can be called by multiple goroutines simultaneously so make sure
|
||||
// you implement it in a goroutine-safe way. If nil, nothing is logged (the equivalent of returning LogNone).
|
||||
// Usually, the function will be implemented simply like this: return level <= LogWarning
|
||||
ShouldLog func(level LogLevel) bool
|
||||
}
|
||||
|
||||
type pipeline struct {
|
||||
factories []Factory
|
||||
options Options
|
||||
}
|
||||
|
||||
// The Pipeline interface represents an ordered list of Factory objects and an object implementing the HTTPSender interface.
|
||||
// You construct a Pipeline by calling the pipeline.NewPipeline function. To send an HTTP request, call pipeline.NewRequest
|
||||
// and then call Pipeline's Do method passing a context, the request, and a method-specific Factory (or nil). Passing a
|
||||
// method-specific Factory allows this one call to Do to inject a Policy into the linked-list. The policy is injected where
|
||||
// the MethodFactoryMarker (see the pipeline.MethodFactoryMarker function) is in the slice of Factory objects.
|
||||
//
|
||||
// When Do is called, the Pipeline object asks each Factory object to construct its Policy object and adds each Policy to a linked-list.
|
||||
// THen, Do sends the Context and Request through all the Policy objects. The final Policy object sends the request over the network
|
||||
// (via the HTTPSender object passed to NewPipeline) and the response is returned backwards through all the Policy objects.
|
||||
// Since Pipeline and Factory objects are goroutine-safe, you typically create 1 Pipeline object and reuse it to make many HTTP requests.
|
||||
type Pipeline interface {
|
||||
Do(ctx context.Context, methodFactory Factory, request Request) (Response, error)
|
||||
}
|
||||
|
||||
// NewPipeline creates a new goroutine-safe Pipeline object from the slice of Factory objects and the specified options.
|
||||
func NewPipeline(factories []Factory, o Options) Pipeline {
|
||||
if o.HTTPSender == nil {
|
||||
o.HTTPSender = newDefaultHTTPClientFactory()
|
||||
}
|
||||
if o.Log.Log == nil {
|
||||
o.Log.Log = func(LogLevel, string) {} // No-op logger
|
||||
}
|
||||
return &pipeline{factories: factories, options: o}
|
||||
}
|
||||
|
||||
// Do is called for each and every HTTP request. It tells each Factory to create its own (mutable) Policy object
|
||||
// replacing a MethodFactoryMarker factory (if it exists) with the methodFactory passed in. Then, the Context and Request
|
||||
// are sent through the pipeline of Policy objects (which can transform the Request's URL/query parameters/headers) and
|
||||
// ultimately sends the transformed HTTP request over the network.
|
||||
func (p *pipeline) Do(ctx context.Context, methodFactory Factory, request Request) (Response, error) {
|
||||
response, err := p.newPolicies(methodFactory).Do(ctx, request)
|
||||
request.close()
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (p *pipeline) newPolicies(methodFactory Factory) Policy {
|
||||
// The last Policy is the one that actually sends the request over the wire and gets the response.
|
||||
// It is overridable via the Options' HTTPSender field.
|
||||
po := &PolicyOptions{pipeline: p} // One object shared by all policy objects
|
||||
next := p.options.HTTPSender.New(nil, po)
|
||||
|
||||
// Walk over the slice of Factory objects in reverse (from wire to API)
|
||||
markers := 0
|
||||
for i := len(p.factories) - 1; i >= 0; i-- {
|
||||
factory := p.factories[i]
|
||||
if _, ok := factory.(methodFactoryMarker); ok {
|
||||
markers++
|
||||
if markers > 1 {
|
||||
panic("MethodFactoryMarker can only appear once in the pipeline")
|
||||
}
|
||||
if methodFactory != nil {
|
||||
// Replace MethodFactoryMarker with passed-in methodFactory
|
||||
next = methodFactory.New(next, po)
|
||||
}
|
||||
} else {
|
||||
// Use the slice's Factory to construct its Policy
|
||||
next = factory.New(next, po)
|
||||
}
|
||||
}
|
||||
|
||||
// Each Factory has created its Policy
|
||||
if markers == 0 && methodFactory != nil {
|
||||
panic("Non-nil methodFactory requires MethodFactoryMarker in the pipeline")
|
||||
}
|
||||
return next // Return head of the Policy object linked-list
|
||||
}
|
||||
|
||||
// A PolicyOptions represents optional information that can be used by a node in the
|
||||
// linked-list of Policy objects. A PolicyOptions is passed to the Factory's New method
|
||||
// which passes it (if desired) to the Policy object it creates. Today, the Policy object
|
||||
// uses the options to perform logging. But, in the future, this could be used for more.
|
||||
type PolicyOptions struct {
|
||||
pipeline *pipeline
|
||||
}
|
||||
|
||||
// ShouldLog returns true if the specified log level should be logged.
|
||||
func (po *PolicyOptions) ShouldLog(level LogLevel) bool {
|
||||
if po.pipeline.options.Log.ShouldLog != nil {
|
||||
return po.pipeline.options.Log.ShouldLog(level)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Log logs a string to the Pipeline's Logger.
|
||||
func (po *PolicyOptions) Log(level LogLevel, msg string) {
|
||||
if !po.ShouldLog(level) {
|
||||
return // Short circuit message formatting if we're not logging it
|
||||
}
|
||||
|
||||
// We are logging it, ensure trailing newline
|
||||
if len(msg) == 0 || msg[len(msg)-1] != '\n' {
|
||||
msg += "\n" // Ensure trailing newline
|
||||
}
|
||||
po.pipeline.options.Log.Log(level, msg)
|
||||
|
||||
// If logger doesn't handle fatal/panic, we'll do it here.
|
||||
if level == LogFatal {
|
||||
os.Exit(1)
|
||||
} else if level == LogPanic {
|
||||
panic(msg)
|
||||
}
|
||||
}
|
||||
|
||||
var pipelineHTTPClient = newDefaultHTTPClient()
|
||||
|
||||
func newDefaultHTTPClient() *http.Client {
|
||||
// We want the Transport to have a large connection pool
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: ieproxy.GetProxyFunc(),
|
||||
// We use Dial instead of DialContext as DialContext has been reported to cause slower performance.
|
||||
Dial /*Context*/ : (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}).Dial, /*Context*/
|
||||
MaxIdleConns: 0, // No limit
|
||||
MaxIdleConnsPerHost: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
DisableKeepAlives: false,
|
||||
DisableCompression: false,
|
||||
MaxResponseHeaderBytes: 0,
|
||||
//ResponseHeaderTimeout: time.Duration{},
|
||||
//ExpectContinueTimeout: time.Duration{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// newDefaultHTTPClientFactory creates a DefaultHTTPClientPolicyFactory object that sends HTTP requests to a Go's default http.Client.
|
||||
func newDefaultHTTPClientFactory() Factory {
|
||||
return FactoryFunc(func(next Policy, po *PolicyOptions) PolicyFunc {
|
||||
return func(ctx context.Context, request Request) (Response, error) {
|
||||
r, err := pipelineHTTPClient.Do(request.WithContext(ctx))
|
||||
if err != nil {
|
||||
err = NewError(err, "HTTP request failed")
|
||||
}
|
||||
return NewHTTPResponse(r), err
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var mfm = methodFactoryMarker{} // Singleton
|
||||
|
||||
// MethodFactoryMarker returns a special marker Factory object. When Pipeline's Do method is called, any
|
||||
// MethodMarkerFactory object is replaced with the specified methodFactory object. If nil is passed fro Do's
|
||||
// methodFactory parameter, then the MethodFactoryMarker is ignored as the linked-list of Policy objects is created.
|
||||
func MethodFactoryMarker() Factory {
|
||||
return mfm
|
||||
}
|
||||
|
||||
type methodFactoryMarker struct {
|
||||
}
|
||||
|
||||
func (methodFactoryMarker) New(next Policy, po *PolicyOptions) Policy {
|
||||
panic("methodFactoryMarker policy should have been replaced with a method policy")
|
||||
}
|
||||
|
||||
// LogSanitizer can be implemented to clean secrets from lines logged by ForceLog
|
||||
// By default no implemetation is provided here, because pipeline may be used in many different
|
||||
// contexts, so the correct implementation is context-dependent
|
||||
type LogSanitizer interface {
|
||||
SanitizeLogMessage(raw string) string
|
||||
}
|
||||
|
||||
var sanitizer LogSanitizer
|
||||
var enableForceLog bool = true
|
||||
|
||||
// SetLogSanitizer can be called to supply a custom LogSanitizer.
|
||||
// There is no threadsafety or locking on the underlying variable,
|
||||
// so call this function just once at startup of your application
|
||||
// (Don't later try to change the sanitizer on the fly).
|
||||
func SetLogSanitizer(s LogSanitizer)(){
|
||||
sanitizer = s
|
||||
}
|
||||
|
||||
// SetForceLogEnabled can be used to disable ForceLog
|
||||
// There is no threadsafety or locking on the underlying variable,
|
||||
// so call this function just once at startup of your application
|
||||
// (Don't later try to change the setting on the fly).
|
||||
func SetForceLogEnabled(enable bool)() {
|
||||
enableForceLog = enable
|
||||
}
|
||||
|
||||
|
||||
14
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog.go
generated
vendored
Normal file
14
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
package pipeline
|
||||
|
||||
|
||||
// ForceLog should rarely be used. It forceable logs an entry to the
|
||||
// Windows Event Log (on Windows) or to the SysLog (on Linux)
|
||||
func ForceLog(level LogLevel, msg string) {
|
||||
if !enableForceLog {
|
||||
return
|
||||
}
|
||||
if sanitizer != nil {
|
||||
msg = sanitizer.SanitizeLogMessage(msg)
|
||||
}
|
||||
forceLog(level, msg)
|
||||
}
|
||||
33
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go
generated
vendored
Normal file
33
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
// +build !windows,!nacl,!plan9
|
||||
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"log"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
// forceLog should rarely be used. It forceable logs an entry to the
|
||||
// Windows Event Log (on Windows) or to the SysLog (on Linux)
|
||||
func forceLog(level LogLevel, msg string) {
|
||||
if defaultLogger == nil {
|
||||
return // Return fast if we failed to create the logger.
|
||||
}
|
||||
// We are logging it, ensure trailing newline
|
||||
if len(msg) == 0 || msg[len(msg)-1] != '\n' {
|
||||
msg += "\n" // Ensure trailing newline
|
||||
}
|
||||
switch level {
|
||||
case LogFatal:
|
||||
defaultLogger.Fatal(msg)
|
||||
case LogPanic:
|
||||
defaultLogger.Panic(msg)
|
||||
case LogError, LogWarning, LogInfo:
|
||||
defaultLogger.Print(msg)
|
||||
}
|
||||
}
|
||||
|
||||
var defaultLogger = func() *log.Logger {
|
||||
l, _ := syslog.NewLogger(syslog.LOG_USER|syslog.LOG_WARNING, log.LstdFlags)
|
||||
return l
|
||||
}()
|
||||
61
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go
generated
vendored
Normal file
61
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// forceLog should rarely be used. It forceable logs an entry to the
|
||||
// Windows Event Log (on Windows) or to the SysLog (on Linux)
|
||||
func forceLog(level LogLevel, msg string) {
|
||||
var el eventType
|
||||
switch level {
|
||||
case LogError, LogFatal, LogPanic:
|
||||
el = elError
|
||||
case LogWarning:
|
||||
el = elWarning
|
||||
case LogInfo:
|
||||
el = elInfo
|
||||
}
|
||||
// We are logging it, ensure trailing newline
|
||||
if len(msg) == 0 || msg[len(msg)-1] != '\n' {
|
||||
msg += "\n" // Ensure trailing newline
|
||||
}
|
||||
reportEvent(el, 0, msg)
|
||||
}
|
||||
|
||||
type eventType int16
|
||||
|
||||
const (
|
||||
elSuccess eventType = 0
|
||||
elError eventType = 1
|
||||
elWarning eventType = 2
|
||||
elInfo eventType = 4
|
||||
)
|
||||
|
||||
var reportEvent = func() func(eventType eventType, eventID int32, msg string) {
|
||||
advAPI32 := syscall.MustLoadDLL("advapi32.dll") // lower case to tie in with Go's sysdll registration
|
||||
registerEventSource := advAPI32.MustFindProc("RegisterEventSourceW")
|
||||
|
||||
sourceName, _ := os.Executable()
|
||||
sourceNameUTF16, _ := syscall.UTF16PtrFromString(sourceName)
|
||||
handle, _, lastErr := registerEventSource.Call(uintptr(0), uintptr(unsafe.Pointer(sourceNameUTF16)))
|
||||
if lastErr == nil { // On error, logging is a no-op
|
||||
return func(eventType eventType, eventID int32, msg string) {}
|
||||
}
|
||||
reportEvent := advAPI32.MustFindProc("ReportEventW")
|
||||
return func(eventType eventType, eventID int32, msg string) {
|
||||
s, _ := syscall.UTF16PtrFromString(msg)
|
||||
_, _, _ = reportEvent.Call(
|
||||
uintptr(handle), // HANDLE hEventLog
|
||||
uintptr(eventType), // WORD wType
|
||||
uintptr(0), // WORD wCategory
|
||||
uintptr(eventID), // DWORD dwEventID
|
||||
uintptr(0), // PSID lpUserSid
|
||||
uintptr(1), // WORD wNumStrings
|
||||
uintptr(0), // DWORD dwDataSize
|
||||
uintptr(unsafe.Pointer(&s)), // LPCTSTR *lpStrings
|
||||
uintptr(0)) // LPVOID lpRawData
|
||||
}
|
||||
}()
|
||||
161
vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go
generated
vendored
Normal file
161
vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go
generated
vendored
Normal file
@@ -0,0 +1,161 @@
|
||||
// Copyright 2017 Microsoft Corporation. All rights reserved.
|
||||
// Use of this source code is governed by an MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package pipeline implements an HTTP request/response middleware pipeline whose
|
||||
policy objects mutate an HTTP request's URL, query parameters, and/or headers before
|
||||
the request is sent over the wire.
|
||||
|
||||
Not all policy objects mutate an HTTP request; some policy objects simply impact the
|
||||
flow of requests/responses by performing operations such as logging, retry policies,
|
||||
timeouts, failure injection, and deserialization of response payloads.
|
||||
|
||||
Implementing the Policy Interface
|
||||
|
||||
To implement a policy, define a struct that implements the pipeline.Policy interface's Do method. Your Do
|
||||
method is called when an HTTP request wants to be sent over the network. Your Do method can perform any
|
||||
operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, and/or query
|
||||
parameters, inject a failure, etc. Your Do method must then forward the HTTP request to next Policy object
|
||||
in a linked-list ensuring that the remaining Policy objects perform their work. Ultimately, the last Policy
|
||||
object sends the HTTP request over the network (by calling the HTTPSender's Do method).
|
||||
|
||||
When an HTTP response comes back, each Policy object in the linked-list gets a chance to process the response
|
||||
(in reverse order). The Policy object can log the response, retry the operation if due to a transient failure
|
||||
or timeout, deserialize the response body, etc. Ultimately, the last Policy object returns the HTTP response
|
||||
to the code that initiated the original HTTP request.
|
||||
|
||||
Here is a template for how to define a pipeline.Policy object:
|
||||
|
||||
type myPolicy struct {
|
||||
node PolicyNode
|
||||
// TODO: Add configuration/setting fields here (if desired)...
|
||||
}
|
||||
|
||||
func (p *myPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
// TODO: Mutate/process the HTTP request here...
|
||||
response, err := p.node.Do(ctx, request) // Forward HTTP request to next Policy & get HTTP response
|
||||
// TODO: Mutate/process the HTTP response here...
|
||||
return response, err // Return response/error to previous Policy
|
||||
}
|
||||
|
||||
Implementing the Factory Interface
|
||||
|
||||
Each Policy struct definition requires a factory struct definition that implements the pipeline.Factory interface's New
|
||||
method. The New method is called when application code wants to initiate a new HTTP request. Factory's New method is
|
||||
passed a pipeline.PolicyNode object which contains a reference to the owning pipeline.Pipeline object (discussed later) and
|
||||
a reference to the next Policy object in the linked list. The New method should create its corresponding Policy object
|
||||
passing it the PolicyNode and any other configuration/settings fields appropriate for the specific Policy object.
|
||||
|
||||
Here is a template for how to define a pipeline.Policy object:
|
||||
|
||||
// NOTE: Once created & initialized, Factory objects should be goroutine-safe (ex: immutable);
|
||||
// this allows reuse (efficient use of memory) and makes these objects usable by multiple goroutines concurrently.
|
||||
type myPolicyFactory struct {
|
||||
// TODO: Add any configuration/setting fields if desired...
|
||||
}
|
||||
|
||||
func (f *myPolicyFactory) New(node pipeline.PolicyNode) Policy {
|
||||
return &myPolicy{node: node} // TODO: Also initialize any configuration/setting fields here (if desired)...
|
||||
}
|
||||
|
||||
Using your Factory and Policy objects via a Pipeline
|
||||
|
||||
To use the Factory and Policy objects, an application constructs a slice of Factory objects and passes
|
||||
this slice to the pipeline.NewPipeline function.
|
||||
|
||||
func NewPipeline(factories []pipeline.Factory, sender pipeline.HTTPSender) Pipeline
|
||||
|
||||
This function also requires an object implementing the HTTPSender interface. For simple scenarios,
|
||||
passing nil for HTTPSender causes a standard Go http.Client object to be created and used to actually
|
||||
send the HTTP response over the network. For more advanced scenarios, you can pass your own HTTPSender
|
||||
object in. This allows sharing of http.Client objects or the use of custom-configured http.Client objects
|
||||
or other objects that can simulate the network requests for testing purposes.
|
||||
|
||||
Now that you have a pipeline.Pipeline object, you can create a pipeline.Request object (which is a simple
|
||||
wrapper around Go's standard http.Request object) and pass it to Pipeline's Do method along with passing a
|
||||
context.Context for cancelling the HTTP request (if desired).
|
||||
|
||||
type Pipeline interface {
|
||||
Do(ctx context.Context, methodFactory pipeline.Factory, request pipeline.Request) (pipeline.Response, error)
|
||||
}
|
||||
|
||||
Do iterates over the slice of Factory objects and tells each one to create its corresponding
|
||||
Policy object. After the linked-list of Policy objects have been created, Do calls the first
|
||||
Policy object passing it the Context & HTTP request parameters. These parameters now flow through
|
||||
all the Policy objects giving each object a chance to look at and/or mutate the HTTP request.
|
||||
The last Policy object sends the message over the network.
|
||||
|
||||
When the network operation completes, the HTTP response and error return values pass
|
||||
back through the same Policy objects in reverse order. Most Policy objects ignore the
|
||||
response/error but some log the result, retry the operation (depending on the exact
|
||||
reason the operation failed), or deserialize the response's body. Your own Policy
|
||||
objects can do whatever they like when processing outgoing requests or incoming responses.
|
||||
|
||||
Note that after an I/O request runs to completion, the Policy objects for that request
|
||||
are garbage collected. However, Pipeline object (like Factory objects) are goroutine-safe allowing
|
||||
them to be created once and reused over many I/O operations. This allows for efficient use of
|
||||
memory and also makes them safely usable by multiple goroutines concurrently.
|
||||
|
||||
Inserting a Method-Specific Factory into the Linked-List of Policy Objects
|
||||
|
||||
While Pipeline and Factory objects can be reused over many different operations, it is
|
||||
common to have special behavior for a specific operation/method. For example, a method
|
||||
may need to deserialize the response's body to an instance of a specific data type.
|
||||
To accommodate this, the Pipeline's Do method takes an additional method-specific
|
||||
Factory object. The Do method tells this Factory to create a Policy object and
|
||||
injects this method-specific Policy object into the linked-list of Policy objects.
|
||||
|
||||
When creating a Pipeline object, the slice of Factory objects passed must have 1
|
||||
(and only 1) entry marking where the method-specific Factory should be injected.
|
||||
The Factory marker is obtained by calling the pipeline.MethodFactoryMarker() function:
|
||||
|
||||
func MethodFactoryMarker() pipeline.Factory
|
||||
|
||||
Creating an HTTP Request Object
|
||||
|
||||
The HTTP request object passed to Pipeline's Do method is not Go's http.Request struct.
|
||||
Instead, it is a pipeline.Request struct which is a simple wrapper around Go's standard
|
||||
http.Request. You create a pipeline.Request object by calling the pipeline.NewRequest function:
|
||||
|
||||
func NewRequest(method string, url url.URL, options pipeline.RequestOptions) (request pipeline.Request, err error)
|
||||
|
||||
To this function, you must pass a pipeline.RequestOptions that looks like this:
|
||||
|
||||
type RequestOptions struct {
|
||||
// The readable and seekable stream to be sent to the server as the request's body.
|
||||
Body io.ReadSeeker
|
||||
|
||||
// The callback method (if not nil) to be invoked to report progress as the stream is uploaded in the HTTP request.
|
||||
Progress ProgressReceiver
|
||||
}
|
||||
|
||||
The method and struct ensure that the request's body stream is a read/seekable stream.
|
||||
A seekable stream is required so that upon retry, the final Policy object can seek
|
||||
the stream back to the beginning before retrying the network request and re-uploading the
|
||||
body. In addition, you can associate a ProgressReceiver callback function which will be
|
||||
invoked periodically to report progress while bytes are being read from the body stream
|
||||
and sent over the network.
|
||||
|
||||
Processing the HTTP Response
|
||||
|
||||
When an HTTP response comes in from the network, a reference to Go's http.Response struct is
|
||||
embedded in a struct that implements the pipeline.Response interface:
|
||||
|
||||
type Response interface {
|
||||
Response() *http.Response
|
||||
}
|
||||
|
||||
This interface is returned through all the Policy objects. Each Policy object can call the Response
|
||||
interface's Response method to examine (or mutate) the embedded http.Response object.
|
||||
|
||||
A Policy object can internally define another struct (implementing the pipeline.Response interface)
|
||||
that embeds an http.Response and adds additional fields and return this structure to other Policy
|
||||
objects. This allows a Policy object to deserialize the body to some other struct and return the
|
||||
original http.Response and the additional struct back through the Policy chain. Other Policy objects
|
||||
can see the Response but cannot see the additional struct with the deserialized body. After all the
|
||||
Policy objects have returned, the pipeline.Response interface is returned by Pipeline's Do method.
|
||||
The caller of this method can perform a type assertion attempting to get back to the struct type
|
||||
really returned by the Policy object. If the type assertion is successful, the caller now has
|
||||
access to both the http.Response and the deserialized struct object.*/
|
||||
package pipeline
|
||||
181
vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go
generated
vendored
Normal file
181
vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go
generated
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
|
||||
func errorWithPC(msg string, pc uintptr) string {
|
||||
s := ""
|
||||
if fn := runtime.FuncForPC(pc); fn != nil {
|
||||
file, line := fn.FileLine(pc)
|
||||
s = fmt.Sprintf("-> %v, %v:%v\n", fn.Name(), file, line)
|
||||
}
|
||||
s += msg + "\n\n"
|
||||
return s
|
||||
}
|
||||
|
||||
func getPC(callersToSkip int) uintptr {
|
||||
// Get the PC of Initialize method's caller.
|
||||
pc := [1]uintptr{}
|
||||
_ = runtime.Callers(callersToSkip, pc[:])
|
||||
return pc[0]
|
||||
}
|
||||
|
||||
// ErrorNode can be an embedded field in a private error object. This field
|
||||
// adds Program Counter support and a 'cause' (reference to a preceding error).
|
||||
// When initializing a error type with this embedded field, initialize the
|
||||
// ErrorNode field by calling ErrorNode{}.Initialize(cause).
|
||||
type ErrorNode struct {
|
||||
pc uintptr // Represents a Program Counter that you can get symbols for.
|
||||
cause error // Refers to the preceding error (or nil)
|
||||
}
|
||||
|
||||
// Error returns a string with the PC's symbols or "" if the PC is invalid.
|
||||
// When defining a new error type, have its Error method call this one passing
|
||||
// it the string representation of the error.
|
||||
func (e *ErrorNode) Error(msg string) string {
|
||||
s := errorWithPC(msg, e.pc)
|
||||
if e.cause != nil {
|
||||
s += e.cause.Error() + "\n"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Cause returns the error that preceded this error.
|
||||
func (e *ErrorNode) Cause() error { return e.cause }
|
||||
|
||||
// Temporary returns true if the error occurred due to a temporary condition.
|
||||
func (e ErrorNode) Temporary() bool {
|
||||
type temporary interface {
|
||||
Temporary() bool
|
||||
}
|
||||
|
||||
for err := e.cause; err != nil; {
|
||||
if t, ok := err.(temporary); ok {
|
||||
return t.Temporary()
|
||||
}
|
||||
|
||||
if cause, ok := err.(causer); ok {
|
||||
err = cause.Cause()
|
||||
} else {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Timeout returns true if the error occurred due to time expiring.
|
||||
func (e ErrorNode) Timeout() bool {
|
||||
type timeout interface {
|
||||
Timeout() bool
|
||||
}
|
||||
|
||||
for err := e.cause; err != nil; {
|
||||
if t, ok := err.(timeout); ok {
|
||||
return t.Timeout()
|
||||
}
|
||||
|
||||
if cause, ok := err.(causer); ok {
|
||||
err = cause.Cause()
|
||||
} else {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Initialize is used to initialize an embedded ErrorNode field.
|
||||
// It captures the caller's program counter and saves the cause (preceding error).
|
||||
// To initialize the field, use "ErrorNode{}.Initialize(cause, 3)". A callersToSkip
|
||||
// value of 3 is very common; but, depending on your code nesting, you may need
|
||||
// a different value.
|
||||
func (ErrorNode) Initialize(cause error, callersToSkip int) ErrorNode {
|
||||
pc := getPC(callersToSkip)
|
||||
return ErrorNode{pc: pc, cause: cause}
|
||||
}
|
||||
|
||||
// Cause walks all the preceding errors and return the originating error.
|
||||
func Cause(err error) error {
|
||||
for err != nil {
|
||||
cause, ok := err.(causer)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
err = cause.Cause()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ErrorNodeNoCause can be an embedded field in a private error object. This field
|
||||
// adds Program Counter support.
|
||||
// When initializing a error type with this embedded field, initialize the
|
||||
// ErrorNodeNoCause field by calling ErrorNodeNoCause{}.Initialize().
|
||||
type ErrorNodeNoCause struct {
|
||||
pc uintptr // Represents a Program Counter that you can get symbols for.
|
||||
}
|
||||
|
||||
// Error returns a string with the PC's symbols or "" if the PC is invalid.
|
||||
// When defining a new error type, have its Error method call this one passing
|
||||
// it the string representation of the error.
|
||||
func (e *ErrorNodeNoCause) Error(msg string) string {
|
||||
return errorWithPC(msg, e.pc)
|
||||
}
|
||||
|
||||
// Temporary returns true if the error occurred due to a temporary condition.
|
||||
func (e ErrorNodeNoCause) Temporary() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Timeout returns true if the error occurred due to time expiring.
|
||||
func (e ErrorNodeNoCause) Timeout() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Initialize is used to initialize an embedded ErrorNode field.
|
||||
// It captures the caller's program counter.
|
||||
// To initialize the field, use "ErrorNodeNoCause{}.Initialize(3)". A callersToSkip
|
||||
// value of 3 is very common; but, depending on your code nesting, you may need
|
||||
// a different value.
|
||||
func (ErrorNodeNoCause) Initialize(callersToSkip int) ErrorNodeNoCause {
|
||||
pc := getPC(callersToSkip)
|
||||
return ErrorNodeNoCause{pc: pc}
|
||||
}
|
||||
|
||||
// NewError creates a simple string error (like Error.New). But, this
|
||||
// error also captures the caller's Program Counter and the preceding error (if provided).
|
||||
func NewError(cause error, msg string) error {
|
||||
if cause != nil {
|
||||
return &pcError{
|
||||
ErrorNode: ErrorNode{}.Initialize(cause, 3),
|
||||
msg: msg,
|
||||
}
|
||||
}
|
||||
return &pcErrorNoCause{
|
||||
ErrorNodeNoCause: ErrorNodeNoCause{}.Initialize(3),
|
||||
msg: msg,
|
||||
}
|
||||
}
|
||||
|
||||
// pcError is a simple string error (like error.New) with an ErrorNode (PC & cause).
|
||||
type pcError struct {
|
||||
ErrorNode
|
||||
msg string
|
||||
}
|
||||
|
||||
// Error satisfies the error interface. It shows the error with Program Counter
|
||||
// symbols and calls Error on the preceding error so you can see the full error chain.
|
||||
func (e *pcError) Error() string { return e.ErrorNode.Error(e.msg) }
|
||||
|
||||
// pcErrorNoCause is a simple string error (like error.New) with an ErrorNode (PC).
|
||||
type pcErrorNoCause struct {
|
||||
ErrorNodeNoCause
|
||||
msg string
|
||||
}
|
||||
|
||||
// Error satisfies the error interface. It shows the error with Program Counter symbols.
|
||||
func (e *pcErrorNoCause) Error() string { return e.ErrorNodeNoCause.Error(e.msg) }
|
||||
82
vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go
generated
vendored
Normal file
82
vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
package pipeline
|
||||
|
||||
import "io"
|
||||
|
||||
// ********** The following is common between the request body AND the response body.
|
||||
|
||||
// ProgressReceiver defines the signature of a callback function invoked as progress is reported.
|
||||
type ProgressReceiver func(bytesTransferred int64)
|
||||
|
||||
// ********** The following are specific to the request body (a ReadSeekCloser)
|
||||
|
||||
// This struct is used when sending a body to the network
|
||||
type requestBodyProgress struct {
|
||||
requestBody io.ReadSeeker // Seeking is required to support retries
|
||||
pr ProgressReceiver
|
||||
}
|
||||
|
||||
// NewRequestBodyProgress adds progress reporting to an HTTP request's body stream.
|
||||
func NewRequestBodyProgress(requestBody io.ReadSeeker, pr ProgressReceiver) io.ReadSeeker {
|
||||
if pr == nil {
|
||||
panic("pr must not be nil")
|
||||
}
|
||||
return &requestBodyProgress{requestBody: requestBody, pr: pr}
|
||||
}
|
||||
|
||||
// Read reads a block of data from an inner stream and reports progress
|
||||
func (rbp *requestBodyProgress) Read(p []byte) (n int, err error) {
|
||||
n, err = rbp.requestBody.Read(p)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Invokes the user's callback method to report progress
|
||||
position, err := rbp.requestBody.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rbp.pr(position)
|
||||
return
|
||||
}
|
||||
|
||||
func (rbp *requestBodyProgress) Seek(offset int64, whence int) (offsetFromStart int64, err error) {
|
||||
return rbp.requestBody.Seek(offset, whence)
|
||||
}
|
||||
|
||||
// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it.
|
||||
func (rbp *requestBodyProgress) Close() error {
|
||||
if c, ok := rbp.requestBody.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ********** The following are specific to the response body (a ReadCloser)
|
||||
|
||||
// This struct is used when sending a body to the network
|
||||
type responseBodyProgress struct {
|
||||
responseBody io.ReadCloser
|
||||
pr ProgressReceiver
|
||||
offset int64
|
||||
}
|
||||
|
||||
// NewResponseBodyProgress adds progress reporting to an HTTP response's body stream.
|
||||
func NewResponseBodyProgress(responseBody io.ReadCloser, pr ProgressReceiver) io.ReadCloser {
|
||||
if pr == nil {
|
||||
panic("pr must not be nil")
|
||||
}
|
||||
return &responseBodyProgress{responseBody: responseBody, pr: pr, offset: 0}
|
||||
}
|
||||
|
||||
// Read reads a block of data from an inner stream and reports progress
|
||||
func (rbp *responseBodyProgress) Read(p []byte) (n int, err error) {
|
||||
n, err = rbp.responseBody.Read(p)
|
||||
rbp.offset += int64(n)
|
||||
|
||||
// Invokes the user's callback method to report progress
|
||||
rbp.pr(rbp.offset)
|
||||
return
|
||||
}
|
||||
|
||||
func (rbp *responseBodyProgress) Close() error {
|
||||
return rbp.responseBody.Close()
|
||||
}
|
||||
147
vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go
generated
vendored
Normal file
147
vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go
generated
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Request is a thin wrapper over an http.Request. The wrapper provides several helper methods.
|
||||
type Request struct {
|
||||
*http.Request
|
||||
}
|
||||
|
||||
// NewRequest initializes a new HTTP request object with any desired options.
|
||||
func NewRequest(method string, url url.URL, body io.ReadSeeker) (request Request, err error) {
|
||||
// Note: the url is passed by value so that any pipeline operations that modify it do so on a copy.
|
||||
|
||||
// This code to construct an http.Request is copied from http.NewRequest(); we intentionally omitted removeEmptyPort for now.
|
||||
request.Request = &http.Request{
|
||||
Method: method,
|
||||
URL: &url,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: make(http.Header),
|
||||
Host: url.Host,
|
||||
}
|
||||
|
||||
if body != nil {
|
||||
err = request.SetBody(body)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBody sets the body and content length, assumes body is not nil.
|
||||
func (r Request) SetBody(body io.ReadSeeker) error {
|
||||
size, err := body.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
body.Seek(0, io.SeekStart)
|
||||
r.ContentLength = size
|
||||
r.Header["Content-Length"] = []string{strconv.FormatInt(size, 10)}
|
||||
|
||||
if size != 0 {
|
||||
r.Body = &retryableRequestBody{body: body}
|
||||
r.GetBody = func() (io.ReadCloser, error) {
|
||||
_, err := body.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.Body, nil
|
||||
}
|
||||
} else {
|
||||
// in case the body is an empty stream, we need to use http.NoBody to explicitly provide no content
|
||||
r.Body = http.NoBody
|
||||
r.GetBody = func() (io.ReadCloser, error) {
|
||||
return http.NoBody, nil
|
||||
}
|
||||
|
||||
// close the user-provided empty body
|
||||
if c, ok := body.(io.Closer); ok {
|
||||
c.Close()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy makes a copy of an http.Request. Specifically, it makes a deep copy
|
||||
// of its Method, URL, Host, Proto(Major/Minor), Header. ContentLength, Close,
|
||||
// RemoteAddr, RequestURI. Copy makes a shallow copy of the Body, GetBody, TLS,
|
||||
// Cancel, Response, and ctx fields. Copy panics if any of these fields are
|
||||
// not nil: TransferEncoding, Form, PostForm, MultipartForm, or Trailer.
|
||||
func (r Request) Copy() Request {
|
||||
if r.TransferEncoding != nil || r.Form != nil || r.PostForm != nil || r.MultipartForm != nil || r.Trailer != nil {
|
||||
panic("Can't make a deep copy of the http.Request because at least one of the following is not nil:" +
|
||||
"TransferEncoding, Form, PostForm, MultipartForm, or Trailer.")
|
||||
}
|
||||
copy := *r.Request // Copy the request
|
||||
urlCopy := *(r.Request.URL) // Copy the URL
|
||||
copy.URL = &urlCopy
|
||||
copy.Header = http.Header{} // Copy the header
|
||||
for k, vs := range r.Header {
|
||||
for _, value := range vs {
|
||||
copy.Header.Add(k, value)
|
||||
}
|
||||
}
|
||||
return Request{Request: ©} // Return the copy
|
||||
}
|
||||
|
||||
func (r Request) close() error {
|
||||
if r.Body != nil && r.Body != http.NoBody {
|
||||
c, ok := r.Body.(*retryableRequestBody)
|
||||
if !ok {
|
||||
panic("unexpected request body type (should be *retryableReadSeekerCloser)")
|
||||
}
|
||||
return c.realClose()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation.
|
||||
func (r Request) RewindBody() error {
|
||||
if r.Body != nil && r.Body != http.NoBody {
|
||||
s, ok := r.Body.(io.Seeker)
|
||||
if !ok {
|
||||
panic("unexpected request body type (should be io.Seeker)")
|
||||
}
|
||||
|
||||
// Reset the stream back to the beginning
|
||||
_, err := s.Seek(0, io.SeekStart)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser)
|
||||
|
||||
// This struct is used when sending a body to the network
|
||||
type retryableRequestBody struct {
|
||||
body io.ReadSeeker // Seeking is required to support retries
|
||||
}
|
||||
|
||||
// Read reads a block of data from an inner stream and reports progress
|
||||
func (b *retryableRequestBody) Read(p []byte) (n int, err error) {
|
||||
return b.body.Read(p)
|
||||
}
|
||||
|
||||
func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) {
|
||||
return b.body.Seek(offset, whence)
|
||||
}
|
||||
|
||||
func (b *retryableRequestBody) Close() error {
|
||||
// We don't want the underlying transport to close the request body on transient failures so this is a nop.
|
||||
// The pipeline closes the request body upon success.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *retryableRequestBody) realClose() error {
|
||||
if c, ok := b.body.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
74
vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go
generated
vendored
Normal file
74
vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// The Response interface exposes an http.Response object as it returns through the pipeline of Policy objects.
|
||||
// This ensures that Policy objects have access to the HTTP response. However, the object this interface encapsulates
|
||||
// might be a struct with additional fields that is created by a Policy object (typically a method-specific Factory).
|
||||
// The method that injected the method-specific Factory gets this returned Response and performs a type assertion
|
||||
// to the expected struct and returns the struct to its caller.
|
||||
type Response interface {
|
||||
Response() *http.Response
|
||||
}
|
||||
|
||||
// This is the default struct that has the http.Response.
|
||||
// A method can replace this struct with its own struct containing an http.Response
|
||||
// field and any other additional fields.
|
||||
type httpResponse struct {
|
||||
response *http.Response
|
||||
}
|
||||
|
||||
// NewHTTPResponse is typically called by a Policy object to return a Response object.
|
||||
func NewHTTPResponse(response *http.Response) Response {
|
||||
return &httpResponse{response: response}
|
||||
}
|
||||
|
||||
// This method satisfies the public Response interface's Response method
|
||||
func (r httpResponse) Response() *http.Response {
|
||||
return r.response
|
||||
}
|
||||
|
||||
// WriteRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are
|
||||
// not nil, then these are also written into the Buffer.
|
||||
func WriteRequestWithResponse(b *bytes.Buffer, request *http.Request, response *http.Response, err error) {
|
||||
// Write the request into the buffer.
|
||||
fmt.Fprint(b, " "+request.Method+" "+request.URL.String()+"\n")
|
||||
writeHeader(b, request.Header)
|
||||
if response != nil {
|
||||
fmt.Fprintln(b, " --------------------------------------------------------------------------------")
|
||||
fmt.Fprint(b, " RESPONSE Status: "+response.Status+"\n")
|
||||
writeHeader(b, response.Header)
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Fprintln(b, " --------------------------------------------------------------------------------")
|
||||
fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n")
|
||||
}
|
||||
}
|
||||
|
||||
// formatHeaders appends an HTTP request's or response's header into a Buffer.
|
||||
func writeHeader(b *bytes.Buffer, header map[string][]string) {
|
||||
if len(header) == 0 {
|
||||
b.WriteString(" (no headers)\n")
|
||||
return
|
||||
}
|
||||
keys := make([]string, 0, len(header))
|
||||
// Alphabetize the headers
|
||||
for k := range header {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, k := range keys {
|
||||
// Redact the value of any Authorization header to prevent security information from persisting in logs
|
||||
value := interface{}("REDACTED")
|
||||
if !strings.EqualFold(k, "Authorization") {
|
||||
value = header[k]
|
||||
}
|
||||
fmt.Fprintf(b, " %s: %+v\n", k, value)
|
||||
}
|
||||
}
|
||||
9
vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go
generated
vendored
Normal file
9
vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
package pipeline
|
||||
|
||||
const (
|
||||
// UserAgent is the string to be used in the user agent string when making requests.
|
||||
UserAgent = "azure-pipeline-go/" + Version
|
||||
|
||||
// Version is the semantic version (see http://semver.org) of the pipeline package.
|
||||
Version = "0.2.1"
|
||||
)
|
||||
21
vendor/github.com/Azure/azure-storage-blob-go/LICENSE
generated
vendored
Normal file
21
vendor/github.com/Azure/azure-storage-blob-go/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE
|
||||
65
vendor/github.com/Azure/azure-storage-blob-go/azblob/access_conditions.go
generated
vendored
Normal file
65
vendor/github.com/Azure/azure-storage-blob-go/azblob/access_conditions.go
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// ModifiedAccessConditions identifies standard HTTP access conditions which you optionally set.
|
||||
type ModifiedAccessConditions struct {
|
||||
IfModifiedSince time.Time
|
||||
IfUnmodifiedSince time.Time
|
||||
IfMatch ETag
|
||||
IfNoneMatch ETag
|
||||
}
|
||||
|
||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
||||
func (ac ModifiedAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *ETag, inme *ETag) {
|
||||
if !ac.IfModifiedSince.IsZero() {
|
||||
ims = &ac.IfModifiedSince
|
||||
}
|
||||
if !ac.IfUnmodifiedSince.IsZero() {
|
||||
ius = &ac.IfUnmodifiedSince
|
||||
}
|
||||
if ac.IfMatch != ETagNone {
|
||||
ime = &ac.IfMatch
|
||||
}
|
||||
if ac.IfNoneMatch != ETagNone {
|
||||
inme = &ac.IfNoneMatch
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ContainerAccessConditions identifies container-specific access conditions which you optionally set.
|
||||
type ContainerAccessConditions struct {
|
||||
ModifiedAccessConditions
|
||||
LeaseAccessConditions
|
||||
}
|
||||
|
||||
// BlobAccessConditions identifies blob-specific access conditions which you optionally set.
|
||||
type BlobAccessConditions struct {
|
||||
ModifiedAccessConditions
|
||||
LeaseAccessConditions
|
||||
}
|
||||
|
||||
// LeaseAccessConditions identifies lease access conditions for a container or blob which you optionally set.
|
||||
type LeaseAccessConditions struct {
|
||||
LeaseID string
|
||||
}
|
||||
|
||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
||||
func (ac LeaseAccessConditions) pointers() (leaseID *string) {
|
||||
if ac.LeaseID != "" {
|
||||
leaseID = &ac.LeaseID
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
// getInt32 is for internal infrastructure. It is used with access condition values where
|
||||
// 0 (the default setting) is meaningful. The library interprets 0 as do not send the header
|
||||
// and the privately-storage field in the access condition object is stored as +1 higher than desired.
|
||||
// THis method returns true, if the value is > 0 (explicitly set) and the stored value - 1 (the set desired value).
|
||||
func getInt32(value int32) (bool, int32) {
|
||||
return value > 0, value - 1
|
||||
}
|
||||
*/
|
||||
8009
vendor/github.com/Azure/azure-storage-blob-go/azblob/blob.json
generated
vendored
Normal file
8009
vendor/github.com/Azure/azure-storage-blob-go/azblob/blob.json
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
238
vendor/github.com/Azure/azure-storage-blob-go/azblob/chunkwriting.go
generated
vendored
Normal file
238
vendor/github.com/Azure/azure-storage-blob-go/azblob/chunkwriting.go
generated
vendored
Normal file
@@ -0,0 +1,238 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
guuid "github.com/google/uuid"
|
||||
)
|
||||
|
||||
// blockWriter provides methods to upload blocks that represent a file to a server and commit them.
|
||||
// This allows us to provide a local implementation that fakes the server for hermetic testing.
|
||||
type blockWriter interface {
|
||||
StageBlock(context.Context, string, io.ReadSeeker, LeaseAccessConditions, []byte) (*BlockBlobStageBlockResponse, error)
|
||||
CommitBlockList(context.Context, []string, BlobHTTPHeaders, Metadata, BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error)
|
||||
}
|
||||
|
||||
// copyFromReader copies a source io.Reader to blob storage using concurrent uploads.
|
||||
// TODO(someone): The existing model provides a buffer size and buffer limit as limiting factors. The buffer size is probably
|
||||
// useless other than needing to be above some number, as the network stack is going to hack up the buffer over some size. The
|
||||
// max buffers is providing a cap on how much memory we use (by multiplying it times the buffer size) and how many go routines can upload
|
||||
// at a time. I think having a single max memory dial would be more efficient. We can choose an internal buffer size that works
|
||||
// well, 4 MiB or 8 MiB, and autoscale to as many goroutines within the memory limit. This gives a single dial to tweak and we can
|
||||
// choose a max value for the memory setting based on internal transfers within Azure (which will give us the maximum throughput model).
|
||||
// We can even provide a utility to dial this number in for customer networks to optimize their copies.
|
||||
func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamToBlockBlobOptions) (*BlockBlobCommitBlockListResponse, error) {
|
||||
o.defaults()
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
cp := &copier{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
reader: from,
|
||||
to: to,
|
||||
id: newID(),
|
||||
o: o,
|
||||
ch: make(chan copierChunk, 1),
|
||||
errCh: make(chan error, 1),
|
||||
buffers: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return make([]byte, o.BufferSize)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Starts the pools of concurrent writers.
|
||||
cp.wg.Add(o.MaxBuffers)
|
||||
for i := 0; i < o.MaxBuffers; i++ {
|
||||
go cp.writer()
|
||||
}
|
||||
|
||||
// Send all our chunks until we get an error.
|
||||
var err error
|
||||
for {
|
||||
if err = cp.sendChunk(); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
// If the error is not EOF, then we have a problem.
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Close out our upload.
|
||||
if err := cp.close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cp.result, nil
|
||||
}
|
||||
|
||||
// copier streams a file via chunks in parallel from a reader representing a file.
|
||||
// Do not use directly, instead use copyFromReader().
|
||||
type copier struct {
|
||||
// ctx holds the context of a copier. This is normally a faux pas to store a Context in a struct. In this case,
|
||||
// the copier has the lifetime of a function call, so its fine.
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
// reader is the source to be written to storage.
|
||||
reader io.Reader
|
||||
// to is the location we are writing our chunks to.
|
||||
to blockWriter
|
||||
|
||||
id *id
|
||||
o UploadStreamToBlockBlobOptions
|
||||
|
||||
// num is the current chunk we are on.
|
||||
num int32
|
||||
// ch is used to pass the next chunk of data from our reader to one of the writers.
|
||||
ch chan copierChunk
|
||||
// errCh is used to hold the first error from our concurrent writers.
|
||||
errCh chan error
|
||||
// wg provides a count of how many writers we are waiting to finish.
|
||||
wg sync.WaitGroup
|
||||
// buffers provides a pool of chunks that can be reused.
|
||||
buffers sync.Pool
|
||||
|
||||
// result holds the final result from blob storage after we have submitted all chunks.
|
||||
result *BlockBlobCommitBlockListResponse
|
||||
}
|
||||
|
||||
type copierChunk struct {
|
||||
buffer []byte
|
||||
id string
|
||||
}
|
||||
|
||||
// getErr returns an error by priority. First, if a function set an error, it returns that error. Next, if the Context has an error
|
||||
// it returns that error. Otherwise it is nil. getErr supports only returning an error once per copier.
|
||||
func (c *copier) getErr() error {
|
||||
select {
|
||||
case err := <-c.errCh:
|
||||
return err
|
||||
default:
|
||||
}
|
||||
return c.ctx.Err()
|
||||
}
|
||||
|
||||
// sendChunk reads data from out internal reader, creates a chunk, and sends it to be written via a channel.
|
||||
// sendChunk returns io.EOF when the reader returns an io.EOF or io.ErrUnexpectedEOF.
|
||||
func (c *copier) sendChunk() error {
|
||||
if err := c.getErr(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buffer := c.buffers.Get().([]byte)
|
||||
n, err := io.ReadFull(c.reader, buffer)
|
||||
switch {
|
||||
case err == nil && n == 0:
|
||||
return nil
|
||||
case err == nil:
|
||||
c.ch <- copierChunk{
|
||||
buffer: buffer[0:n],
|
||||
id: c.id.next(),
|
||||
}
|
||||
return nil
|
||||
case err != nil && (err == io.EOF || err == io.ErrUnexpectedEOF) && n == 0:
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
c.ch <- copierChunk{
|
||||
buffer: buffer[0:n],
|
||||
id: c.id.next(),
|
||||
}
|
||||
return io.EOF
|
||||
}
|
||||
if err := c.getErr(); err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// writer writes chunks sent on a channel.
|
||||
func (c *copier) writer() {
|
||||
defer c.wg.Done()
|
||||
|
||||
for chunk := range c.ch {
|
||||
if err := c.write(chunk); err != nil {
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
select {
|
||||
case c.errCh <- err:
|
||||
c.cancel()
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// write uploads a chunk to blob storage.
|
||||
func (c *copier) write(chunk copierChunk) error {
|
||||
defer c.buffers.Put(chunk.buffer)
|
||||
|
||||
if err := c.ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err := c.to.StageBlock(c.ctx, chunk.id, bytes.NewReader(chunk.buffer), LeaseAccessConditions{}, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// close commits our blocks to blob storage and closes our writer.
|
||||
func (c *copier) close() error {
|
||||
close(c.ch)
|
||||
c.wg.Wait()
|
||||
|
||||
if err := c.getErr(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var err error
|
||||
c.result, err = c.to.CommitBlockList(c.ctx, c.id.issued(), c.o.BlobHTTPHeaders, c.o.Metadata, c.o.AccessConditions)
|
||||
return err
|
||||
}
|
||||
|
||||
// id allows the creation of unique IDs based on UUID4 + an int32. This autoincrements.
|
||||
type id struct {
|
||||
u [64]byte
|
||||
num uint32
|
||||
all []string
|
||||
}
|
||||
|
||||
// newID constructs a new id.
|
||||
func newID() *id {
|
||||
uu := guuid.New()
|
||||
u := [64]byte{}
|
||||
copy(u[:], uu[:])
|
||||
return &id{u: u}
|
||||
}
|
||||
|
||||
// next returns the next ID. This is not thread-safe.
|
||||
func (id *id) next() string {
|
||||
defer func() { id.num++ }()
|
||||
|
||||
binary.BigEndian.PutUint32((id.u[len(guuid.UUID{}):]), id.num)
|
||||
str := base64.StdEncoding.EncodeToString(id.u[:])
|
||||
id.all = append(id.all, str)
|
||||
|
||||
return str
|
||||
}
|
||||
|
||||
// issued returns all ids that have been issued. This returned value shares the internal slice so it is not safe to modify the return.
|
||||
// The value is only valid until the next time next() is called.
|
||||
func (id *id) issued() []string {
|
||||
return id.all
|
||||
}
|
||||
396
vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go
generated
vendored
Normal file
396
vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go
generated
vendored
Normal file
@@ -0,0 +1,396 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"bytes"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// CommonResponse returns the headers common to all blob REST API responses.
|
||||
type CommonResponse interface {
|
||||
// ETag returns the value for header ETag.
|
||||
ETag() ETag
|
||||
|
||||
// LastModified returns the value for header Last-Modified.
|
||||
LastModified() time.Time
|
||||
|
||||
// RequestID returns the value for header x-ms-request-id.
|
||||
RequestID() string
|
||||
|
||||
// Date returns the value for header Date.
|
||||
Date() time.Time
|
||||
|
||||
// Version returns the value for header x-ms-version.
|
||||
Version() string
|
||||
|
||||
// Response returns the raw HTTP response object.
|
||||
Response() *http.Response
|
||||
}
|
||||
|
||||
// UploadToBlockBlobOptions identifies options used by the UploadBufferToBlockBlob and UploadFileToBlockBlob functions.
|
||||
type UploadToBlockBlobOptions struct {
|
||||
// BlockSize specifies the block size to use; the default (and maximum size) is BlockBlobMaxStageBlockBytes.
|
||||
BlockSize int64
|
||||
|
||||
// Progress is a function that is invoked periodically as bytes are sent to the BlockBlobURL.
|
||||
// Note that the progress reporting is not always increasing; it can go down when retrying a request.
|
||||
Progress pipeline.ProgressReceiver
|
||||
|
||||
// BlobHTTPHeaders indicates the HTTP headers to be associated with the blob.
|
||||
BlobHTTPHeaders BlobHTTPHeaders
|
||||
|
||||
// Metadata indicates the metadata to be associated with the blob when PutBlockList is called.
|
||||
Metadata Metadata
|
||||
|
||||
// AccessConditions indicates the access conditions for the block blob.
|
||||
AccessConditions BlobAccessConditions
|
||||
|
||||
// Parallelism indicates the maximum number of blocks to upload in parallel (0=default)
|
||||
Parallelism uint16
|
||||
}
|
||||
|
||||
// UploadBufferToBlockBlob uploads a buffer in blocks to a block blob.
|
||||
func UploadBufferToBlockBlob(ctx context.Context, b []byte,
|
||||
blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) {
|
||||
bufferSize := int64(len(b))
|
||||
if o.BlockSize == 0 {
|
||||
// If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error
|
||||
if bufferSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks {
|
||||
return nil, errors.New("buffer is too large to upload to a block blob")
|
||||
}
|
||||
// If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request
|
||||
if bufferSize <= BlockBlobMaxUploadBlobBytes {
|
||||
o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified
|
||||
} else {
|
||||
o.BlockSize = bufferSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks
|
||||
if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB
|
||||
o.BlockSize = BlobDefaultDownloadBlockSize
|
||||
}
|
||||
// StageBlock will be called with blockSize blocks and a Parallelism of (BufferSize / BlockSize).
|
||||
}
|
||||
}
|
||||
|
||||
if bufferSize <= BlockBlobMaxUploadBlobBytes {
|
||||
// If the size can fit in 1 Upload call, do it this way
|
||||
var body io.ReadSeeker = bytes.NewReader(b)
|
||||
if o.Progress != nil {
|
||||
body = pipeline.NewRequestBodyProgress(body, o.Progress)
|
||||
}
|
||||
return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions)
|
||||
}
|
||||
|
||||
var numBlocks = uint16(((bufferSize - 1) / o.BlockSize) + 1)
|
||||
|
||||
blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs
|
||||
progress := int64(0)
|
||||
progressLock := &sync.Mutex{}
|
||||
|
||||
err := DoBatchTransfer(ctx, BatchTransferOptions{
|
||||
OperationName: "UploadBufferToBlockBlob",
|
||||
TransferSize: bufferSize,
|
||||
ChunkSize: o.BlockSize,
|
||||
Parallelism: o.Parallelism,
|
||||
Operation: func(offset int64, count int64, ctx context.Context) error {
|
||||
// This function is called once per block.
|
||||
// It is passed this block's offset within the buffer and its count of bytes
|
||||
// Prepare to read the proper block/section of the buffer
|
||||
var body io.ReadSeeker = bytes.NewReader(b[offset : offset+count])
|
||||
blockNum := offset / o.BlockSize
|
||||
if o.Progress != nil {
|
||||
blockProgress := int64(0)
|
||||
body = pipeline.NewRequestBodyProgress(body,
|
||||
func(bytesTransferred int64) {
|
||||
diff := bytesTransferred - blockProgress
|
||||
blockProgress = bytesTransferred
|
||||
progressLock.Lock() // 1 goroutine at a time gets a progress report
|
||||
progress += diff
|
||||
o.Progress(progress)
|
||||
progressLock.Unlock()
|
||||
})
|
||||
}
|
||||
|
||||
// Block IDs are unique values to avoid issue if 2+ clients are uploading blocks
|
||||
// at the same time causing PutBlockList to get a mix of blocks from all the clients.
|
||||
blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes())
|
||||
_, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions, nil)
|
||||
return err
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// All put blocks were successful, call Put Block List to finalize the blob
|
||||
return blockBlobURL.CommitBlockList(ctx, blockIDList, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions)
|
||||
}
|
||||
|
||||
// UploadFileToBlockBlob uploads a file in blocks to a block blob.
|
||||
func UploadFileToBlockBlob(ctx context.Context, file *os.File,
|
||||
blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) {
|
||||
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := mmf{} // Default to an empty slice; used for 0-size file
|
||||
if stat.Size() != 0 {
|
||||
m, err = newMMF(file, false, 0, int(stat.Size()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer m.unmap()
|
||||
}
|
||||
return UploadBufferToBlockBlob(ctx, m, blockBlobURL, o)
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
const BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB
|
||||
|
||||
// DownloadFromBlobOptions identifies options used by the DownloadBlobToBuffer and DownloadBlobToFile functions.
|
||||
type DownloadFromBlobOptions struct {
|
||||
// BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize.
|
||||
BlockSize int64
|
||||
|
||||
// Progress is a function that is invoked periodically as bytes are received.
|
||||
Progress pipeline.ProgressReceiver
|
||||
|
||||
// AccessConditions indicates the access conditions used when making HTTP GET requests against the blob.
|
||||
AccessConditions BlobAccessConditions
|
||||
|
||||
// Parallelism indicates the maximum number of blocks to download in parallel (0=default)
|
||||
Parallelism uint16
|
||||
|
||||
// RetryReaderOptionsPerBlock is used when downloading each block.
|
||||
RetryReaderOptionsPerBlock RetryReaderOptions
|
||||
}
|
||||
|
||||
// downloadBlobToBuffer downloads an Azure blob to a buffer with parallel.
|
||||
func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64,
|
||||
b []byte, o DownloadFromBlobOptions, initialDownloadResponse *DownloadResponse) error {
|
||||
if o.BlockSize == 0 {
|
||||
o.BlockSize = BlobDefaultDownloadBlockSize
|
||||
}
|
||||
|
||||
if count == CountToEnd { // If size not specified, calculate it
|
||||
if initialDownloadResponse != nil {
|
||||
count = initialDownloadResponse.ContentLength() - offset // if we have the length, use it
|
||||
} else {
|
||||
// If we don't have the length at all, get it
|
||||
dr, err := blobURL.Download(ctx, 0, CountToEnd, o.AccessConditions, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
count = dr.ContentLength() - offset
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare and do parallel download.
|
||||
progress := int64(0)
|
||||
progressLock := &sync.Mutex{}
|
||||
|
||||
err := DoBatchTransfer(ctx, BatchTransferOptions{
|
||||
OperationName: "downloadBlobToBuffer",
|
||||
TransferSize: count,
|
||||
ChunkSize: o.BlockSize,
|
||||
Parallelism: o.Parallelism,
|
||||
Operation: func(chunkStart int64, count int64, ctx context.Context) error {
|
||||
dr, err := blobURL.Download(ctx, chunkStart+offset, count, o.AccessConditions, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
body := dr.Body(o.RetryReaderOptionsPerBlock)
|
||||
if o.Progress != nil {
|
||||
rangeProgress := int64(0)
|
||||
body = pipeline.NewResponseBodyProgress(
|
||||
body,
|
||||
func(bytesTransferred int64) {
|
||||
diff := bytesTransferred - rangeProgress
|
||||
rangeProgress = bytesTransferred
|
||||
progressLock.Lock()
|
||||
progress += diff
|
||||
o.Progress(progress)
|
||||
progressLock.Unlock()
|
||||
})
|
||||
}
|
||||
_, err = io.ReadFull(body, b[chunkStart:chunkStart+count])
|
||||
body.Close()
|
||||
return err
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DownloadBlobToBuffer downloads an Azure blob to a buffer with parallel.
|
||||
// Offset and count are optional, pass 0 for both to download the entire blob.
|
||||
func DownloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64,
|
||||
b []byte, o DownloadFromBlobOptions) error {
|
||||
return downloadBlobToBuffer(ctx, blobURL, offset, count, b, o, nil)
|
||||
}
|
||||
|
||||
// DownloadBlobToFile downloads an Azure blob to a local file.
|
||||
// The file would be truncated if the size doesn't match.
|
||||
// Offset and count are optional, pass 0 for both to download the entire blob.
|
||||
func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, count int64,
|
||||
file *os.File, o DownloadFromBlobOptions) error {
|
||||
// 1. Calculate the size of the destination file
|
||||
var size int64
|
||||
|
||||
if count == CountToEnd {
|
||||
// Try to get Azure blob's size
|
||||
props, err := blobURL.GetProperties(ctx, o.AccessConditions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
size = props.ContentLength() - offset
|
||||
} else {
|
||||
size = count
|
||||
}
|
||||
|
||||
// 2. Compare and try to resize local file's size if it doesn't match Azure blob's size.
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stat.Size() != size {
|
||||
if err = file.Truncate(size); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if size > 0 {
|
||||
// 3. Set mmap and call downloadBlobToBuffer.
|
||||
m, err := newMMF(file, true, 0, int(size))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer m.unmap()
|
||||
return downloadBlobToBuffer(ctx, blobURL, offset, size, m, o, nil)
|
||||
} else { // if the blob's size is 0, there is no need in downloading it
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// BatchTransferOptions identifies options used by DoBatchTransfer.
|
||||
type BatchTransferOptions struct {
|
||||
TransferSize int64
|
||||
ChunkSize int64
|
||||
Parallelism uint16
|
||||
Operation func(offset int64, chunkSize int64, ctx context.Context) error
|
||||
OperationName string
|
||||
}
|
||||
|
||||
// DoBatchTransfer helps to execute operations in a batch manner.
|
||||
// Can be used by users to customize batch works (for other scenarios that the SDK does not provide)
|
||||
func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error {
|
||||
if o.ChunkSize == 0 {
|
||||
return errors.New("ChunkSize cannot be 0")
|
||||
}
|
||||
|
||||
if o.Parallelism == 0 {
|
||||
o.Parallelism = 5 // default Parallelism
|
||||
}
|
||||
|
||||
// Prepare and do parallel operations.
|
||||
numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1)
|
||||
operationChannel := make(chan func() error, o.Parallelism) // Create the channel that release 'Parallelism' goroutines concurrently
|
||||
operationResponseChannel := make(chan error, numChunks) // Holds each response
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Create the goroutines that process each operation (in parallel).
|
||||
for g := uint16(0); g < o.Parallelism; g++ {
|
||||
//grIndex := g
|
||||
go func() {
|
||||
for f := range operationChannel {
|
||||
err := f()
|
||||
operationResponseChannel <- err
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Add each chunk's operation to the channel.
|
||||
for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ {
|
||||
curChunkSize := o.ChunkSize
|
||||
|
||||
if chunkNum == numChunks-1 { // Last chunk
|
||||
curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total
|
||||
}
|
||||
offset := int64(chunkNum) * o.ChunkSize
|
||||
|
||||
operationChannel <- func() error {
|
||||
return o.Operation(offset, curChunkSize, ctx)
|
||||
}
|
||||
}
|
||||
close(operationChannel)
|
||||
|
||||
// Wait for the operations to complete.
|
||||
var firstErr error = nil
|
||||
for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ {
|
||||
responseError := <-operationResponseChannel
|
||||
// record the first error (the original error which should cause the other chunks to fail with canceled context)
|
||||
if responseError != nil && firstErr == nil {
|
||||
cancel() // As soon as any operation fails, cancel all remaining operation calls
|
||||
firstErr = responseError
|
||||
}
|
||||
}
|
||||
return firstErr
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
const _1MiB = 1024 * 1024
|
||||
|
||||
type UploadStreamToBlockBlobOptions struct {
|
||||
// BufferSize sizes the buffer used to read data from source. If < 1 MiB, defaults to 1 MiB.
|
||||
BufferSize int
|
||||
// MaxBuffers defines the number of simultaneous uploads will be performed to upload the file.
|
||||
MaxBuffers int
|
||||
BlobHTTPHeaders BlobHTTPHeaders
|
||||
Metadata Metadata
|
||||
AccessConditions BlobAccessConditions
|
||||
}
|
||||
|
||||
func (u *UploadStreamToBlockBlobOptions) defaults() {
|
||||
if u.MaxBuffers == 0 {
|
||||
u.MaxBuffers = 1
|
||||
}
|
||||
|
||||
if u.BufferSize < _1MiB {
|
||||
u.BufferSize = _1MiB
|
||||
}
|
||||
}
|
||||
|
||||
// UploadStreamToBlockBlob copies the file held in io.Reader to the Blob at blockBlobURL.
|
||||
// A Context deadline or cancellation will cause this to error.
|
||||
func UploadStreamToBlockBlob(ctx context.Context, reader io.Reader, blockBlobURL BlockBlobURL,
|
||||
o UploadStreamToBlockBlobOptions) (CommonResponse, error) {
|
||||
o.defaults()
|
||||
|
||||
result, err := copyFromReader(ctx, reader, blockBlobURL, o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// UploadStreamOptions (defunct) was used internally. This will be removed or made private in a future version.
|
||||
type UploadStreamOptions struct {
|
||||
BufferSize int
|
||||
MaxBuffers int
|
||||
}
|
||||
153
vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go
generated
vendored
Normal file
153
vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go
generated
vendored
Normal file
@@ -0,0 +1,153 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
snapshot = "snapshot"
|
||||
SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00"
|
||||
)
|
||||
|
||||
// A BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an
|
||||
// existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL().
|
||||
// NOTE: Changing any SAS-related field requires computing a new SAS signature.
|
||||
type BlobURLParts struct {
|
||||
Scheme string // Ex: "https://"
|
||||
Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80"
|
||||
IPEndpointStyleInfo IPEndpointStyleInfo
|
||||
ContainerName string // "" if no container
|
||||
BlobName string // "" if no blob
|
||||
Snapshot string // "" if not a snapshot
|
||||
SAS SASQueryParameters
|
||||
UnparsedParams string
|
||||
}
|
||||
|
||||
// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator.
|
||||
// Ex: "https://10.132.141.33/accountname/containername"
|
||||
type IPEndpointStyleInfo struct {
|
||||
AccountName string // "" if not using IP endpoint style
|
||||
}
|
||||
|
||||
// isIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as:
|
||||
// http(s)://IP(:port)/storageaccount/container/...
|
||||
// As url's Host property, host could be both host or host:port
|
||||
func isIPEndpointStyle(host string) bool {
|
||||
if host == "" {
|
||||
return false
|
||||
}
|
||||
if h, _, err := net.SplitHostPort(host); err == nil {
|
||||
host = h
|
||||
}
|
||||
// For IPv6, there could be case where SplitHostPort fails for cannot finding port.
|
||||
// In this case, eliminate the '[' and ']' in the URL.
|
||||
// For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732
|
||||
if host[0] == '[' && host[len(host)-1] == ']' {
|
||||
host = host[1 : len(host)-1]
|
||||
}
|
||||
return net.ParseIP(host) != nil
|
||||
}
|
||||
|
||||
// NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other
|
||||
// query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object.
|
||||
func NewBlobURLParts(u url.URL) BlobURLParts {
|
||||
up := BlobURLParts{
|
||||
Scheme: u.Scheme,
|
||||
Host: u.Host,
|
||||
}
|
||||
|
||||
// Find the container & blob names (if any)
|
||||
if u.Path != "" {
|
||||
path := u.Path
|
||||
if path[0] == '/' {
|
||||
path = path[1:] // If path starts with a slash, remove it
|
||||
}
|
||||
if isIPEndpointStyle(up.Host) {
|
||||
if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob
|
||||
up.IPEndpointStyleInfo.AccountName = path
|
||||
} else {
|
||||
up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes
|
||||
path = path[accountEndIndex+1:] // path refers to portion after the account name now (container & blob names)
|
||||
}
|
||||
}
|
||||
|
||||
containerEndIndex := strings.Index(path, "/") // Find the next slash (if it exists)
|
||||
if containerEndIndex == -1 { // Slash not found; path has container name & no blob name
|
||||
up.ContainerName = path
|
||||
} else {
|
||||
up.ContainerName = path[:containerEndIndex] // The container name is the part between the slashes
|
||||
up.BlobName = path[containerEndIndex+1:] // The blob name is after the container slash
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the query parameters to a case-sensitive map & trim whitespace
|
||||
paramsMap := u.Query()
|
||||
|
||||
up.Snapshot = "" // Assume no snapshot
|
||||
if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok {
|
||||
up.Snapshot = snapshotStr[0]
|
||||
// If we recognized the query parameter, remove it from the map
|
||||
delete(paramsMap, snapshot)
|
||||
}
|
||||
up.SAS = newSASQueryParameters(paramsMap, true)
|
||||
up.UnparsedParams = paramsMap.Encode()
|
||||
return up
|
||||
}
|
||||
|
||||
type caseInsensitiveValues url.Values // map[string][]string
|
||||
func (values caseInsensitiveValues) Get(key string) ([]string, bool) {
|
||||
key = strings.ToLower(key)
|
||||
for k, v := range values {
|
||||
if strings.ToLower(k) == key {
|
||||
return v, true
|
||||
}
|
||||
}
|
||||
return []string{}, false
|
||||
}
|
||||
|
||||
// URL returns a URL object whose fields are initialized from the BlobURLParts fields. The URL's RawQuery
|
||||
// field contains the SAS, snapshot, and unparsed query parameters.
|
||||
func (up BlobURLParts) URL() url.URL {
|
||||
path := ""
|
||||
if isIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" {
|
||||
path += "/" + up.IPEndpointStyleInfo.AccountName
|
||||
}
|
||||
// Concatenate container & blob names (if they exist)
|
||||
if up.ContainerName != "" {
|
||||
path += "/" + up.ContainerName
|
||||
if up.BlobName != "" {
|
||||
path += "/" + up.BlobName
|
||||
}
|
||||
}
|
||||
|
||||
rawQuery := up.UnparsedParams
|
||||
|
||||
//If no snapshot is initially provided, fill it in from the SAS query properties to help the user
|
||||
if up.Snapshot == "" && !up.SAS.snapshotTime.IsZero() {
|
||||
up.Snapshot = up.SAS.snapshotTime.Format(SnapshotTimeFormat)
|
||||
}
|
||||
|
||||
// Concatenate blob snapshot query parameter (if it exists)
|
||||
if up.Snapshot != "" {
|
||||
if len(rawQuery) > 0 {
|
||||
rawQuery += "&"
|
||||
}
|
||||
rawQuery += snapshot + "=" + up.Snapshot
|
||||
}
|
||||
sas := up.SAS.Encode()
|
||||
if sas != "" {
|
||||
if len(rawQuery) > 0 {
|
||||
rawQuery += "&"
|
||||
}
|
||||
rawQuery += sas
|
||||
}
|
||||
u := url.URL{
|
||||
Scheme: up.Scheme,
|
||||
Host: up.Host,
|
||||
Path: path,
|
||||
RawQuery: rawQuery,
|
||||
}
|
||||
return u
|
||||
}
|
||||
256
vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go
generated
vendored
Normal file
256
vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go
generated
vendored
Normal file
@@ -0,0 +1,256 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas
|
||||
type BlobSASSignatureValues struct {
|
||||
Version string `param:"sv"` // If not specified, this defaults to SASVersion
|
||||
Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants
|
||||
StartTime time.Time `param:"st"` // Not specified if IsZero
|
||||
ExpiryTime time.Time `param:"se"` // Not specified if IsZero
|
||||
SnapshotTime time.Time
|
||||
Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String()
|
||||
IPRange IPRange `param:"sip"`
|
||||
Identifier string `param:"si"`
|
||||
ContainerName string
|
||||
BlobName string // Use "" to create a Container SAS
|
||||
CacheControl string // rscc
|
||||
ContentDisposition string // rscd
|
||||
ContentEncoding string // rsce
|
||||
ContentLanguage string // rscl
|
||||
ContentType string // rsct
|
||||
}
|
||||
|
||||
// NewSASQueryParameters uses an account's StorageAccountCredential to sign this signature values to produce
|
||||
// the proper SAS query parameters.
|
||||
// See: StorageAccountCredential. Compatible with both UserDelegationCredential and SharedKeyCredential
|
||||
func (v BlobSASSignatureValues) NewSASQueryParameters(credential StorageAccountCredential) (SASQueryParameters, error) {
|
||||
resource := "c"
|
||||
if credential == nil {
|
||||
return SASQueryParameters{}, fmt.Errorf("cannot sign SAS query without StorageAccountCredential")
|
||||
}
|
||||
|
||||
if !v.SnapshotTime.IsZero() {
|
||||
resource = "bs"
|
||||
//Make sure the permission characters are in the correct order
|
||||
perms := &BlobSASPermissions{}
|
||||
if err := perms.Parse(v.Permissions); err != nil {
|
||||
return SASQueryParameters{}, err
|
||||
}
|
||||
v.Permissions = perms.String()
|
||||
} else if v.BlobName == "" {
|
||||
// Make sure the permission characters are in the correct order
|
||||
perms := &ContainerSASPermissions{}
|
||||
if err := perms.Parse(v.Permissions); err != nil {
|
||||
return SASQueryParameters{}, err
|
||||
}
|
||||
v.Permissions = perms.String()
|
||||
} else {
|
||||
resource = "b"
|
||||
// Make sure the permission characters are in the correct order
|
||||
perms := &BlobSASPermissions{}
|
||||
if err := perms.Parse(v.Permissions); err != nil {
|
||||
return SASQueryParameters{}, err
|
||||
}
|
||||
v.Permissions = perms.String()
|
||||
}
|
||||
if v.Version == "" {
|
||||
v.Version = SASVersion
|
||||
}
|
||||
startTime, expiryTime, snapshotTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime)
|
||||
|
||||
signedIdentifier := v.Identifier
|
||||
|
||||
udk := credential.getUDKParams()
|
||||
|
||||
if udk != nil {
|
||||
udkStart, udkExpiry, _ := FormatTimesForSASSigning(udk.SignedStart, udk.SignedExpiry, time.Time{})
|
||||
//I don't like this answer to combining the functions
|
||||
//But because signedIdentifier and the user delegation key strings share a place, this is an _OK_ way to do it.
|
||||
signedIdentifier = strings.Join([]string{
|
||||
udk.SignedOid,
|
||||
udk.SignedTid,
|
||||
udkStart,
|
||||
udkExpiry,
|
||||
udk.SignedService,
|
||||
udk.SignedVersion,
|
||||
}, "\n")
|
||||
}
|
||||
|
||||
// String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
||||
stringToSign := strings.Join([]string{
|
||||
v.Permissions,
|
||||
startTime,
|
||||
expiryTime,
|
||||
getCanonicalName(credential.AccountName(), v.ContainerName, v.BlobName),
|
||||
signedIdentifier,
|
||||
v.IPRange.String(),
|
||||
string(v.Protocol),
|
||||
v.Version,
|
||||
resource,
|
||||
snapshotTime, // signed timestamp
|
||||
v.CacheControl, // rscc
|
||||
v.ContentDisposition, // rscd
|
||||
v.ContentEncoding, // rsce
|
||||
v.ContentLanguage, // rscl
|
||||
v.ContentType}, // rsct
|
||||
"\n")
|
||||
|
||||
signature := ""
|
||||
signature = credential.ComputeHMACSHA256(stringToSign)
|
||||
|
||||
p := SASQueryParameters{
|
||||
// Common SAS parameters
|
||||
version: v.Version,
|
||||
protocol: v.Protocol,
|
||||
startTime: v.StartTime,
|
||||
expiryTime: v.ExpiryTime,
|
||||
permissions: v.Permissions,
|
||||
ipRange: v.IPRange,
|
||||
|
||||
// Container/Blob-specific SAS parameters
|
||||
resource: resource,
|
||||
identifier: v.Identifier,
|
||||
cacheControl: v.CacheControl,
|
||||
contentDisposition: v.ContentDisposition,
|
||||
contentEncoding: v.ContentEncoding,
|
||||
contentLanguage: v.ContentLanguage,
|
||||
contentType: v.ContentType,
|
||||
snapshotTime: v.SnapshotTime,
|
||||
|
||||
// Calculated SAS signature
|
||||
signature: signature,
|
||||
}
|
||||
|
||||
//User delegation SAS specific parameters
|
||||
if udk != nil {
|
||||
p.signedOid = udk.SignedOid
|
||||
p.signedTid = udk.SignedTid
|
||||
p.signedStart = udk.SignedStart
|
||||
p.signedExpiry = udk.SignedExpiry
|
||||
p.signedService = udk.SignedService
|
||||
p.signedVersion = udk.SignedVersion
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// getCanonicalName computes the canonical name for a container or blob resource for SAS signing.
|
||||
func getCanonicalName(account string, containerName string, blobName string) string {
|
||||
// Container: "/blob/account/containername"
|
||||
// Blob: "/blob/account/containername/blobname"
|
||||
elements := []string{"/blob/", account, "/", containerName}
|
||||
if blobName != "" {
|
||||
elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1))
|
||||
}
|
||||
return strings.Join(elements, "")
|
||||
}
|
||||
|
||||
// The ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS.
|
||||
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
|
||||
type ContainerSASPermissions struct {
|
||||
Read, Add, Create, Write, Delete, List bool
|
||||
}
|
||||
|
||||
// String produces the SAS permissions string for an Azure Storage container.
|
||||
// Call this method to set BlobSASSignatureValues's Permissions field.
|
||||
func (p ContainerSASPermissions) String() string {
|
||||
var b bytes.Buffer
|
||||
if p.Read {
|
||||
b.WriteRune('r')
|
||||
}
|
||||
if p.Add {
|
||||
b.WriteRune('a')
|
||||
}
|
||||
if p.Create {
|
||||
b.WriteRune('c')
|
||||
}
|
||||
if p.Write {
|
||||
b.WriteRune('w')
|
||||
}
|
||||
if p.Delete {
|
||||
b.WriteRune('d')
|
||||
}
|
||||
if p.List {
|
||||
b.WriteRune('l')
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// Parse initializes the ContainerSASPermissions's fields from a string.
|
||||
func (p *ContainerSASPermissions) Parse(s string) error {
|
||||
*p = ContainerSASPermissions{} // Clear the flags
|
||||
for _, r := range s {
|
||||
switch r {
|
||||
case 'r':
|
||||
p.Read = true
|
||||
case 'a':
|
||||
p.Add = true
|
||||
case 'c':
|
||||
p.Create = true
|
||||
case 'w':
|
||||
p.Write = true
|
||||
case 'd':
|
||||
p.Delete = true
|
||||
case 'l':
|
||||
p.List = true
|
||||
default:
|
||||
return fmt.Errorf("Invalid permission: '%v'", r)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS.
|
||||
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
|
||||
type BlobSASPermissions struct{ Read, Add, Create, Write, Delete bool }
|
||||
|
||||
// String produces the SAS permissions string for an Azure Storage blob.
|
||||
// Call this method to set BlobSASSignatureValues's Permissions field.
|
||||
func (p BlobSASPermissions) String() string {
|
||||
var b bytes.Buffer
|
||||
if p.Read {
|
||||
b.WriteRune('r')
|
||||
}
|
||||
if p.Add {
|
||||
b.WriteRune('a')
|
||||
}
|
||||
if p.Create {
|
||||
b.WriteRune('c')
|
||||
}
|
||||
if p.Write {
|
||||
b.WriteRune('w')
|
||||
}
|
||||
if p.Delete {
|
||||
b.WriteRune('d')
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// Parse initializes the BlobSASPermissions's fields from a string.
|
||||
func (p *BlobSASPermissions) Parse(s string) error {
|
||||
*p = BlobSASPermissions{} // Clear the flags
|
||||
for _, r := range s {
|
||||
switch r {
|
||||
case 'r':
|
||||
p.Read = true
|
||||
case 'a':
|
||||
p.Add = true
|
||||
case 'c':
|
||||
p.Create = true
|
||||
case 'w':
|
||||
p.Write = true
|
||||
case 'd':
|
||||
p.Delete = true
|
||||
default:
|
||||
return fmt.Errorf("Invalid permission: '%v'", r)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
195
vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go
generated
vendored
Normal file
195
vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go
generated
vendored
Normal file
@@ -0,0 +1,195 @@
|
||||
package azblob
|
||||
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes
|
||||
|
||||
// ServiceCode values indicate a service failure.
|
||||
const (
|
||||
// ServiceCodeAppendPositionConditionNotMet means the append position condition specified was not met.
|
||||
ServiceCodeAppendPositionConditionNotMet ServiceCodeType = "AppendPositionConditionNotMet"
|
||||
|
||||
// ServiceCodeBlobAlreadyExists means the specified blob already exists.
|
||||
ServiceCodeBlobAlreadyExists ServiceCodeType = "BlobAlreadyExists"
|
||||
|
||||
// ServiceCodeBlobNotFound means the specified blob does not exist.
|
||||
ServiceCodeBlobNotFound ServiceCodeType = "BlobNotFound"
|
||||
|
||||
// ServiceCodeBlobOverwritten means the blob has been recreated since the previous snapshot was taken.
|
||||
ServiceCodeBlobOverwritten ServiceCodeType = "BlobOverwritten"
|
||||
|
||||
// ServiceCodeBlobTierInadequateForContentLength means the specified blob tier size limit cannot be less than content length.
|
||||
ServiceCodeBlobTierInadequateForContentLength ServiceCodeType = "BlobTierInadequateForContentLength"
|
||||
|
||||
// ServiceCodeBlockCountExceedsLimit means the committed block count cannot exceed the maximum limit of 50,000 blocks
|
||||
// or that the uncommitted block count cannot exceed the maximum limit of 100,000 blocks.
|
||||
ServiceCodeBlockCountExceedsLimit ServiceCodeType = "BlockCountExceedsLimit"
|
||||
|
||||
// ServiceCodeBlockListTooLong means the block list may not contain more than 50,000 blocks.
|
||||
ServiceCodeBlockListTooLong ServiceCodeType = "BlockListTooLong"
|
||||
|
||||
// ServiceCodeCannotChangeToLowerTier means that a higher blob tier has already been explicitly set.
|
||||
ServiceCodeCannotChangeToLowerTier ServiceCodeType = "CannotChangeToLowerTier"
|
||||
|
||||
// ServiceCodeCannotVerifyCopySource means that the service could not verify the copy source within the specified time.
|
||||
// Examine the HTTP status code and message for more information about the failure.
|
||||
ServiceCodeCannotVerifyCopySource ServiceCodeType = "CannotVerifyCopySource"
|
||||
|
||||
// ServiceCodeContainerAlreadyExists means the specified container already exists.
|
||||
ServiceCodeContainerAlreadyExists ServiceCodeType = "ContainerAlreadyExists"
|
||||
|
||||
// ServiceCodeContainerBeingDeleted means the specified container is being deleted.
|
||||
ServiceCodeContainerBeingDeleted ServiceCodeType = "ContainerBeingDeleted"
|
||||
|
||||
// ServiceCodeContainerDisabled means the specified container has been disabled by the administrator.
|
||||
ServiceCodeContainerDisabled ServiceCodeType = "ContainerDisabled"
|
||||
|
||||
// ServiceCodeContainerNotFound means the specified container does not exist.
|
||||
ServiceCodeContainerNotFound ServiceCodeType = "ContainerNotFound"
|
||||
|
||||
// ServiceCodeContentLengthLargerThanTierLimit means the blob's content length cannot exceed its tier limit.
|
||||
ServiceCodeContentLengthLargerThanTierLimit ServiceCodeType = "ContentLengthLargerThanTierLimit"
|
||||
|
||||
// ServiceCodeCopyAcrossAccountsNotSupported means the copy source account and destination account must be the same.
|
||||
ServiceCodeCopyAcrossAccountsNotSupported ServiceCodeType = "CopyAcrossAccountsNotSupported"
|
||||
|
||||
// ServiceCodeCopyIDMismatch means the specified copy ID did not match the copy ID for the pending copy operation.
|
||||
ServiceCodeCopyIDMismatch ServiceCodeType = "CopyIdMismatch"
|
||||
|
||||
// ServiceCodeFeatureVersionMismatch means the type of blob in the container is unrecognized by this version or
|
||||
// that the operation for AppendBlob requires at least version 2015-02-21.
|
||||
ServiceCodeFeatureVersionMismatch ServiceCodeType = "FeatureVersionMismatch"
|
||||
|
||||
// ServiceCodeIncrementalCopyBlobMismatch means the specified source blob is different than the copy source of the existing incremental copy blob.
|
||||
ServiceCodeIncrementalCopyBlobMismatch ServiceCodeType = "IncrementalCopyBlobMismatch"
|
||||
|
||||
// ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed means the specified snapshot is earlier than the last snapshot copied into the incremental copy blob.
|
||||
ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed ServiceCodeType = "IncrementalCopyOfEralierVersionSnapshotNotAllowed"
|
||||
|
||||
// ServiceCodeIncrementalCopySourceMustBeSnapshot means the source for incremental copy request must be a snapshot.
|
||||
ServiceCodeIncrementalCopySourceMustBeSnapshot ServiceCodeType = "IncrementalCopySourceMustBeSnapshot"
|
||||
|
||||
// ServiceCodeInfiniteLeaseDurationRequired means the lease ID matched, but the specified lease must be an infinite-duration lease.
|
||||
ServiceCodeInfiniteLeaseDurationRequired ServiceCodeType = "InfiniteLeaseDurationRequired"
|
||||
|
||||
// ServiceCodeInvalidBlobOrBlock means the specified blob or block content is invalid.
|
||||
ServiceCodeInvalidBlobOrBlock ServiceCodeType = "InvalidBlobOrBlock"
|
||||
|
||||
// ServiceCodeInvalidBlobType means the blob type is invalid for this operation.
|
||||
ServiceCodeInvalidBlobType ServiceCodeType = "InvalidBlobType"
|
||||
|
||||
// ServiceCodeInvalidBlockID means the specified block ID is invalid. The block ID must be Base64-encoded.
|
||||
ServiceCodeInvalidBlockID ServiceCodeType = "InvalidBlockId"
|
||||
|
||||
// ServiceCodeInvalidBlockList means the specified block list is invalid.
|
||||
ServiceCodeInvalidBlockList ServiceCodeType = "InvalidBlockList"
|
||||
|
||||
// ServiceCodeInvalidOperation means an invalid operation against a blob snapshot.
|
||||
ServiceCodeInvalidOperation ServiceCodeType = "InvalidOperation"
|
||||
|
||||
// ServiceCodeInvalidPageRange means the page range specified is invalid.
|
||||
ServiceCodeInvalidPageRange ServiceCodeType = "InvalidPageRange"
|
||||
|
||||
// ServiceCodeInvalidSourceBlobType means the copy source blob type is invalid for this operation.
|
||||
ServiceCodeInvalidSourceBlobType ServiceCodeType = "InvalidSourceBlobType"
|
||||
|
||||
// ServiceCodeInvalidSourceBlobURL means the source URL for incremental copy request must be valid Azure Storage blob URL.
|
||||
ServiceCodeInvalidSourceBlobURL ServiceCodeType = "InvalidSourceBlobUrl"
|
||||
|
||||
// ServiceCodeInvalidVersionForPageBlobOperation means that all operations on page blobs require at least version 2009-09-19.
|
||||
ServiceCodeInvalidVersionForPageBlobOperation ServiceCodeType = "InvalidVersionForPageBlobOperation"
|
||||
|
||||
// ServiceCodeLeaseAlreadyPresent means there is already a lease present.
|
||||
ServiceCodeLeaseAlreadyPresent ServiceCodeType = "LeaseAlreadyPresent"
|
||||
|
||||
// ServiceCodeLeaseAlreadyBroken means the lease has already been broken and cannot be broken again.
|
||||
ServiceCodeLeaseAlreadyBroken ServiceCodeType = "LeaseAlreadyBroken"
|
||||
|
||||
// ServiceCodeLeaseIDMismatchWithBlobOperation means the lease ID specified did not match the lease ID for the blob.
|
||||
ServiceCodeLeaseIDMismatchWithBlobOperation ServiceCodeType = "LeaseIdMismatchWithBlobOperation"
|
||||
|
||||
// ServiceCodeLeaseIDMismatchWithContainerOperation means the lease ID specified did not match the lease ID for the container.
|
||||
ServiceCodeLeaseIDMismatchWithContainerOperation ServiceCodeType = "LeaseIdMismatchWithContainerOperation"
|
||||
|
||||
// ServiceCodeLeaseIDMismatchWithLeaseOperation means the lease ID specified did not match the lease ID for the blob/container.
|
||||
ServiceCodeLeaseIDMismatchWithLeaseOperation ServiceCodeType = "LeaseIdMismatchWithLeaseOperation"
|
||||
|
||||
// ServiceCodeLeaseIDMissing means there is currently a lease on the blob/container and no lease ID was specified in the request.
|
||||
ServiceCodeLeaseIDMissing ServiceCodeType = "LeaseIdMissing"
|
||||
|
||||
// ServiceCodeLeaseIsBreakingAndCannotBeAcquired means the lease ID matched, but the lease is currently in breaking state and cannot be acquired until it is broken.
|
||||
ServiceCodeLeaseIsBreakingAndCannotBeAcquired ServiceCodeType = "LeaseIsBreakingAndCannotBeAcquired"
|
||||
|
||||
// ServiceCodeLeaseIsBreakingAndCannotBeChanged means the lease ID matched, but the lease is currently in breaking state and cannot be changed.
|
||||
ServiceCodeLeaseIsBreakingAndCannotBeChanged ServiceCodeType = "LeaseIsBreakingAndCannotBeChanged"
|
||||
|
||||
// ServiceCodeLeaseIsBrokenAndCannotBeRenewed means the lease ID matched, but the lease has been broken explicitly and cannot be renewed.
|
||||
ServiceCodeLeaseIsBrokenAndCannotBeRenewed ServiceCodeType = "LeaseIsBrokenAndCannotBeRenewed"
|
||||
|
||||
// ServiceCodeLeaseLost means a lease ID was specified, but the lease for the blob/container has expired.
|
||||
ServiceCodeLeaseLost ServiceCodeType = "LeaseLost"
|
||||
|
||||
// ServiceCodeLeaseNotPresentWithBlobOperation means there is currently no lease on the blob.
|
||||
ServiceCodeLeaseNotPresentWithBlobOperation ServiceCodeType = "LeaseNotPresentWithBlobOperation"
|
||||
|
||||
// ServiceCodeLeaseNotPresentWithContainerOperation means there is currently no lease on the container.
|
||||
ServiceCodeLeaseNotPresentWithContainerOperation ServiceCodeType = "LeaseNotPresentWithContainerOperation"
|
||||
|
||||
// ServiceCodeLeaseNotPresentWithLeaseOperation means there is currently no lease on the blob/container.
|
||||
ServiceCodeLeaseNotPresentWithLeaseOperation ServiceCodeType = "LeaseNotPresentWithLeaseOperation"
|
||||
|
||||
// ServiceCodeMaxBlobSizeConditionNotMet means the max blob size condition specified was not met.
|
||||
ServiceCodeMaxBlobSizeConditionNotMet ServiceCodeType = "MaxBlobSizeConditionNotMet"
|
||||
|
||||
// ServiceCodeNoPendingCopyOperation means there is currently no pending copy operation.
|
||||
ServiceCodeNoPendingCopyOperation ServiceCodeType = "NoPendingCopyOperation"
|
||||
|
||||
// ServiceCodeOperationNotAllowedOnIncrementalCopyBlob means the specified operation is not allowed on an incremental copy blob.
|
||||
ServiceCodeOperationNotAllowedOnIncrementalCopyBlob ServiceCodeType = "OperationNotAllowedOnIncrementalCopyBlob"
|
||||
|
||||
// ServiceCodePendingCopyOperation means there is currently a pending copy operation.
|
||||
ServiceCodePendingCopyOperation ServiceCodeType = "PendingCopyOperation"
|
||||
|
||||
// ServiceCodePreviousSnapshotCannotBeNewer means the prevsnapshot query parameter value cannot be newer than snapshot query parameter value.
|
||||
ServiceCodePreviousSnapshotCannotBeNewer ServiceCodeType = "PreviousSnapshotCannotBeNewer"
|
||||
|
||||
// ServiceCodePreviousSnapshotNotFound means the previous snapshot is not found.
|
||||
ServiceCodePreviousSnapshotNotFound ServiceCodeType = "PreviousSnapshotNotFound"
|
||||
|
||||
// ServiceCodePreviousSnapshotOperationNotSupported means that differential Get Page Ranges is not supported on the previous snapshot.
|
||||
ServiceCodePreviousSnapshotOperationNotSupported ServiceCodeType = "PreviousSnapshotOperationNotSupported"
|
||||
|
||||
// ServiceCodeSequenceNumberConditionNotMet means the sequence number condition specified was not met.
|
||||
ServiceCodeSequenceNumberConditionNotMet ServiceCodeType = "SequenceNumberConditionNotMet"
|
||||
|
||||
// ServiceCodeSequenceNumberIncrementTooLarge means the sequence number increment cannot be performed because it would result in overflow of the sequence number.
|
||||
ServiceCodeSequenceNumberIncrementTooLarge ServiceCodeType = "SequenceNumberIncrementTooLarge"
|
||||
|
||||
// ServiceCodeSnapshotCountExceeded means the snapshot count against this blob has been exceeded.
|
||||
ServiceCodeSnapshotCountExceeded ServiceCodeType = "SnapshotCountExceeded"
|
||||
|
||||
// ServiceCodeSnaphotOperationRateExceeded means the rate of snapshot operations against this blob has been exceeded.
|
||||
ServiceCodeSnaphotOperationRateExceeded ServiceCodeType = "SnaphotOperationRateExceeded"
|
||||
|
||||
// ServiceCodeSnapshotsPresent means this operation is not permitted while the blob has snapshots.
|
||||
ServiceCodeSnapshotsPresent ServiceCodeType = "SnapshotsPresent"
|
||||
|
||||
// ServiceCodeSourceConditionNotMet means the source condition specified using HTTP conditional header(s) is not met.
|
||||
ServiceCodeSourceConditionNotMet ServiceCodeType = "SourceConditionNotMet"
|
||||
|
||||
// ServiceCodeSystemInUse means this blob is in use by the system.
|
||||
ServiceCodeSystemInUse ServiceCodeType = "SystemInUse"
|
||||
|
||||
// ServiceCodeTargetConditionNotMet means the target condition specified using HTTP conditional header(s) is not met.
|
||||
ServiceCodeTargetConditionNotMet ServiceCodeType = "TargetConditionNotMet"
|
||||
|
||||
// ServiceCodeUnauthorizedBlobOverwrite means this request is not authorized to perform blob overwrites.
|
||||
ServiceCodeUnauthorizedBlobOverwrite ServiceCodeType = "UnauthorizedBlobOverwrite"
|
||||
|
||||
// ServiceCodeBlobBeingRehydrated means this operation is not permitted because the blob is being rehydrated.
|
||||
ServiceCodeBlobBeingRehydrated ServiceCodeType = "BlobBeingRehydrated"
|
||||
|
||||
// ServiceCodeBlobArchived means this operation is not permitted on an archived blob.
|
||||
ServiceCodeBlobArchived ServiceCodeType = "BlobArchived"
|
||||
|
||||
// ServiceCodeBlobNotArchived means this blob is currently not in the archived state.
|
||||
ServiceCodeBlobNotArchived ServiceCodeType = "BlobNotArchived"
|
||||
)
|
||||
8
vendor/github.com/Azure/azure-storage-blob-go/azblob/storage_account_credential.go
generated
vendored
Normal file
8
vendor/github.com/Azure/azure-storage-blob-go/azblob/storage_account_credential.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
package azblob
|
||||
|
||||
// StorageAccountCredential is a wrapper interface for SharedKeyCredential and UserDelegationCredential
|
||||
type StorageAccountCredential interface {
|
||||
AccountName() string
|
||||
ComputeHMACSHA256(message string) (base64String string)
|
||||
getUDKParams() *UserDelegationKey
|
||||
}
|
||||
138
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go
generated
vendored
Normal file
138
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go
generated
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/url"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
const (
|
||||
// AppendBlobMaxAppendBlockBytes indicates the maximum number of bytes that can be sent in a call to AppendBlock.
|
||||
AppendBlobMaxAppendBlockBytes = 4 * 1024 * 1024 // 4MB
|
||||
|
||||
// AppendBlobMaxBlocks indicates the maximum number of blocks allowed in an append blob.
|
||||
AppendBlobMaxBlocks = 50000
|
||||
)
|
||||
|
||||
// AppendBlobURL defines a set of operations applicable to append blobs.
|
||||
type AppendBlobURL struct {
|
||||
BlobURL
|
||||
abClient appendBlobClient
|
||||
}
|
||||
|
||||
// NewAppendBlobURL creates an AppendBlobURL object using the specified URL and request policy pipeline.
|
||||
func NewAppendBlobURL(url url.URL, p pipeline.Pipeline) AppendBlobURL {
|
||||
blobClient := newBlobClient(url, p)
|
||||
abClient := newAppendBlobClient(url, p)
|
||||
return AppendBlobURL{BlobURL: BlobURL{blobClient: blobClient}, abClient: abClient}
|
||||
}
|
||||
|
||||
// WithPipeline creates a new AppendBlobURL object identical to the source but with the specific request policy pipeline.
|
||||
func (ab AppendBlobURL) WithPipeline(p pipeline.Pipeline) AppendBlobURL {
|
||||
return NewAppendBlobURL(ab.blobClient.URL(), p)
|
||||
}
|
||||
|
||||
// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp.
|
||||
// Pass "" to remove the snapshot returning a URL to the base blob.
|
||||
func (ab AppendBlobURL) WithSnapshot(snapshot string) AppendBlobURL {
|
||||
p := NewBlobURLParts(ab.URL())
|
||||
p.Snapshot = snapshot
|
||||
return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline())
|
||||
}
|
||||
|
||||
func (ab AppendBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
|
||||
return ab.blobClient.GetAccountInfo(ctx)
|
||||
}
|
||||
|
||||
// Create creates a 0-length append blob. Call AppendBlock to append data to an append blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
||||
func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*AppendBlobCreateResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers()
|
||||
return ab.abClient.Create(ctx, 0, nil,
|
||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
|
||||
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, nil)
|
||||
}
|
||||
|
||||
// AppendBlock writes a stream to a new block of data to the end of the existing append blob.
|
||||
// This method panics if the stream is not at position 0.
|
||||
// Note that the http client closes the body stream after the request is sent to the service.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block.
|
||||
func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac AppendBlobAccessConditions, transactionalMD5 []byte) (*AppendBlobAppendBlockResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendPositionAccessConditions.pointers()
|
||||
count, err := validateSeekableStreamAt0AndGetCount(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ab.abClient.AppendBlock(ctx, body, count, nil,
|
||||
transactionalMD5,
|
||||
nil, // CRC
|
||||
ac.LeaseAccessConditions.pointers(),
|
||||
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual,
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url.
|
||||
func (ab AppendBlobURL) AppendBlockFromURL(ctx context.Context, sourceURL url.URL, offset int64, count int64, destinationAccessConditions AppendBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions, transactionalMD5 []byte) (*AppendBlobAppendBlockFromURLResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers()
|
||||
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers()
|
||||
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := destinationAccessConditions.AppendPositionAccessConditions.pointers()
|
||||
return ab.abClient.AppendBlockFromURL(ctx, sourceURL.String(), 0, httpRange{offset: offset, count: count}.pointers(),
|
||||
transactionalMD5, nil, nil, nil,
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
destinationAccessConditions.LeaseAccessConditions.pointers(),
|
||||
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
type AppendBlobAccessConditions struct {
|
||||
ModifiedAccessConditions
|
||||
LeaseAccessConditions
|
||||
AppendPositionAccessConditions
|
||||
}
|
||||
|
||||
// AppendPositionAccessConditions identifies append blob-specific access conditions which you optionally set.
|
||||
type AppendPositionAccessConditions struct {
|
||||
// IfAppendPositionEqual ensures that the AppendBlock operation succeeds
|
||||
// only if the append position is equal to a value.
|
||||
// IfAppendPositionEqual=0 means no 'IfAppendPositionEqual' header specified.
|
||||
// IfAppendPositionEqual>0 means 'IfAppendPositionEqual' header specified with its value
|
||||
// IfAppendPositionEqual==-1 means IfAppendPositionEqual' header specified with a value of 0
|
||||
IfAppendPositionEqual int64
|
||||
|
||||
// IfMaxSizeLessThanOrEqual ensures that the AppendBlock operation succeeds
|
||||
// only if the append blob's size is less than or equal to a value.
|
||||
// IfMaxSizeLessThanOrEqual=0 means no 'IfMaxSizeLessThanOrEqual' header specified.
|
||||
// IfMaxSizeLessThanOrEqual>0 means 'IfMaxSizeLessThanOrEqual' header specified with its value
|
||||
// IfMaxSizeLessThanOrEqual==-1 means 'IfMaxSizeLessThanOrEqual' header specified with a value of 0
|
||||
IfMaxSizeLessThanOrEqual int64
|
||||
}
|
||||
|
||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
||||
func (ac AppendPositionAccessConditions) pointers() (iape *int64, imsltoe *int64) {
|
||||
var zero int64 // defaults to 0
|
||||
switch ac.IfAppendPositionEqual {
|
||||
case -1:
|
||||
iape = &zero
|
||||
case 0:
|
||||
iape = nil
|
||||
default:
|
||||
iape = &ac.IfAppendPositionEqual
|
||||
}
|
||||
|
||||
switch ac.IfMaxSizeLessThanOrEqual {
|
||||
case -1:
|
||||
imsltoe = &zero
|
||||
case 0:
|
||||
imsltoe = nil
|
||||
default:
|
||||
imsltoe = &ac.IfMaxSizeLessThanOrEqual
|
||||
}
|
||||
return
|
||||
}
|
||||
225
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go
generated
vendored
Normal file
225
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go
generated
vendored
Normal file
@@ -0,0 +1,225 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// A BlobURL represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob.
|
||||
type BlobURL struct {
|
||||
blobClient blobClient
|
||||
}
|
||||
|
||||
// NewBlobURL creates a BlobURL object using the specified URL and request policy pipeline.
|
||||
func NewBlobURL(url url.URL, p pipeline.Pipeline) BlobURL {
|
||||
blobClient := newBlobClient(url, p)
|
||||
return BlobURL{blobClient: blobClient}
|
||||
}
|
||||
|
||||
// URL returns the URL endpoint used by the BlobURL object.
|
||||
func (b BlobURL) URL() url.URL {
|
||||
return b.blobClient.URL()
|
||||
}
|
||||
|
||||
// String returns the URL as a string.
|
||||
func (b BlobURL) String() string {
|
||||
u := b.URL()
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (b BlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
|
||||
return b.blobClient.GetAccountInfo(ctx)
|
||||
}
|
||||
|
||||
// WithPipeline creates a new BlobURL object identical to the source but with the specified request policy pipeline.
|
||||
func (b BlobURL) WithPipeline(p pipeline.Pipeline) BlobURL {
|
||||
return NewBlobURL(b.blobClient.URL(), p)
|
||||
}
|
||||
|
||||
// WithSnapshot creates a new BlobURL object identical to the source but with the specified snapshot timestamp.
|
||||
// Pass "" to remove the snapshot returning a URL to the base blob.
|
||||
func (b BlobURL) WithSnapshot(snapshot string) BlobURL {
|
||||
p := NewBlobURLParts(b.URL())
|
||||
p.Snapshot = snapshot
|
||||
return NewBlobURL(p.URL(), b.blobClient.Pipeline())
|
||||
}
|
||||
|
||||
// ToAppendBlobURL creates an AppendBlobURL using the source's URL and pipeline.
|
||||
func (b BlobURL) ToAppendBlobURL() AppendBlobURL {
|
||||
return NewAppendBlobURL(b.URL(), b.blobClient.Pipeline())
|
||||
}
|
||||
|
||||
// ToBlockBlobURL creates a BlockBlobURL using the source's URL and pipeline.
|
||||
func (b BlobURL) ToBlockBlobURL() BlockBlobURL {
|
||||
return NewBlockBlobURL(b.URL(), b.blobClient.Pipeline())
|
||||
}
|
||||
|
||||
// ToPageBlobURL creates a PageBlobURL using the source's URL and pipeline.
|
||||
func (b BlobURL) ToPageBlobURL() PageBlobURL {
|
||||
return NewPageBlobURL(b.URL(), b.blobClient.Pipeline())
|
||||
}
|
||||
|
||||
// DownloadBlob reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
|
||||
// Passing azblob.CountToEnd (0) for count will download the blob from the offset to the end.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
|
||||
func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool) (*DownloadResponse, error) {
|
||||
var xRangeGetContentMD5 *bool
|
||||
if rangeGetContentMD5 {
|
||||
xRangeGetContentMD5 = &rangeGetContentMD5
|
||||
}
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
dr, err := b.blobClient.Download(ctx, nil, nil,
|
||||
httpRange{offset: offset, count: count}.pointers(),
|
||||
ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5, nil,
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &DownloadResponse{
|
||||
b: b,
|
||||
r: dr,
|
||||
ctx: ctx,
|
||||
getInfo: HTTPGetterInfo{Offset: offset, Count: count, ETag: dr.ETag()},
|
||||
}, err
|
||||
}
|
||||
|
||||
// DeleteBlob marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
|
||||
// Note that deleting a blob also deletes all its snapshots.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
|
||||
func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return b.blobClient.Delete(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob.
|
||||
func (b BlobURL) Undelete(ctx context.Context) (*BlobUndeleteResponse, error) {
|
||||
return b.blobClient.Undelete(ctx, nil, nil)
|
||||
}
|
||||
|
||||
// SetTier operation sets the tier on a blob. The operation is allowed on a page
|
||||
// blob in a premium storage account and on a block blob in a blob storage account (locally
|
||||
// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and
|
||||
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
|
||||
// does not update the blob's ETag.
|
||||
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
|
||||
func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType, lac LeaseAccessConditions) (*BlobSetTierResponse, error) {
|
||||
return b.blobClient.SetTier(ctx, tier, nil, RehydratePriorityNone, nil, lac.pointers())
|
||||
}
|
||||
|
||||
// GetBlobProperties returns the blob's properties.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
|
||||
func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions) (*BlobGetPropertiesResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return b.blobClient.GetProperties(ctx, nil, nil, ac.LeaseAccessConditions.pointers(),
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// SetBlobHTTPHeaders changes a blob's HTTP headers.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
|
||||
func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobSetHTTPHeadersResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return b.blobClient.SetHTTPHeaders(ctx, nil,
|
||||
&h.CacheControl, &h.ContentType, h.ContentMD5, &h.ContentEncoding, &h.ContentLanguage,
|
||||
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
||||
&h.ContentDisposition, nil)
|
||||
}
|
||||
|
||||
// SetBlobMetadata changes a blob's metadata.
|
||||
// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
|
||||
func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobSetMetadataResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return b.blobClient.SetMetadata(ctx, nil, metadata, ac.LeaseAccessConditions.pointers(),
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// CreateSnapshot creates a read-only snapshot of a blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob.
|
||||
func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobCreateSnapshotResponse, error) {
|
||||
// CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter
|
||||
// because checking this would be a performance hit for a VERY unusual path and I don't think the common case should suffer this
|
||||
// performance hit.
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return b.blobClient.CreateSnapshot(ctx, nil, metadata,
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, ac.LeaseAccessConditions.pointers(), nil)
|
||||
}
|
||||
|
||||
// AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between
|
||||
// 15 to 60 seconds, or infinite (-1).
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||
func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*BlobAcquireLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
||||
return b.blobClient.AcquireLease(ctx, nil, &duration, &proposedID,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// RenewLease renews the blob's previously-acquired lease.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||
func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobRenewLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
||||
return b.blobClient.RenewLease(ctx, leaseID, nil,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// ReleaseLease releases the blob's previously-acquired lease.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||
func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobReleaseLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
||||
return b.blobClient.ReleaseLease(ctx, leaseID, nil,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1)
|
||||
// constant to break a fixed-duration lease when it expires or an infinite lease immediately.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||
func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac ModifiedAccessConditions) (*BlobBreakLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
||||
return b.blobClient.BreakLease(ctx, nil, leasePeriodPointer(breakPeriodInSeconds),
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// ChangeLease changes the blob's lease ID.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||
func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*BlobChangeLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
||||
return b.blobClient.ChangeLease(ctx, leaseID, proposedID,
|
||||
nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// LeaseBreakNaturally tells ContainerURL's or BlobURL's BreakLease method to break the lease using service semantics.
|
||||
const LeaseBreakNaturally = -1
|
||||
|
||||
func leasePeriodPointer(period int32) (p *int32) {
|
||||
if period != LeaseBreakNaturally {
|
||||
p = &period
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartCopyFromURL copies the data at the source URL to a blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
|
||||
func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions) (*BlobStartCopyFromURLResponse, error) {
|
||||
srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers()
|
||||
dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers()
|
||||
dstLeaseID := dstac.LeaseAccessConditions.pointers()
|
||||
|
||||
return b.blobClient.StartCopyFromURL(ctx, source.String(), nil, metadata,
|
||||
AccessTierNone, RehydratePriorityNone, srcIfModifiedSince, srcIfUnmodifiedSince,
|
||||
srcIfMatchETag, srcIfNoneMatchETag,
|
||||
dstIfModifiedSince, dstIfUnmodifiedSince,
|
||||
dstIfMatchETag, dstIfNoneMatchETag,
|
||||
dstLeaseID, nil)
|
||||
}
|
||||
|
||||
// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob.
|
||||
func (b BlobURL) AbortCopyFromURL(ctx context.Context, copyID string, ac LeaseAccessConditions) (*BlobAbortCopyFromURLResponse, error) {
|
||||
return b.blobClient.AbortCopyFromURL(ctx, copyID, nil, ac.pointers(), nil)
|
||||
}
|
||||
134
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go
generated
vendored
Normal file
134
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go
generated
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/url"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockBlobMaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload.
|
||||
BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB
|
||||
|
||||
// BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock.
|
||||
BlockBlobMaxStageBlockBytes = 100 * 1024 * 1024 // 100MB
|
||||
|
||||
// BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob.
|
||||
BlockBlobMaxBlocks = 50000
|
||||
)
|
||||
|
||||
// BlockBlobURL defines a set of operations applicable to block blobs.
|
||||
type BlockBlobURL struct {
|
||||
BlobURL
|
||||
bbClient blockBlobClient
|
||||
}
|
||||
|
||||
// NewBlockBlobURL creates a BlockBlobURL object using the specified URL and request policy pipeline.
|
||||
func NewBlockBlobURL(url url.URL, p pipeline.Pipeline) BlockBlobURL {
|
||||
blobClient := newBlobClient(url, p)
|
||||
bbClient := newBlockBlobClient(url, p)
|
||||
return BlockBlobURL{BlobURL: BlobURL{blobClient: blobClient}, bbClient: bbClient}
|
||||
}
|
||||
|
||||
// WithPipeline creates a new BlockBlobURL object identical to the source but with the specific request policy pipeline.
|
||||
func (bb BlockBlobURL) WithPipeline(p pipeline.Pipeline) BlockBlobURL {
|
||||
return NewBlockBlobURL(bb.blobClient.URL(), p)
|
||||
}
|
||||
|
||||
// WithSnapshot creates a new BlockBlobURL object identical to the source but with the specified snapshot timestamp.
|
||||
// Pass "" to remove the snapshot returning a URL to the base blob.
|
||||
func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL {
|
||||
p := NewBlobURLParts(bb.URL())
|
||||
p.Snapshot = snapshot
|
||||
return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline())
|
||||
}
|
||||
|
||||
func (bb BlockBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
|
||||
return bb.blobClient.GetAccountInfo(ctx)
|
||||
}
|
||||
|
||||
// Upload creates a new block blob or overwrites an existing block blob.
|
||||
// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not
|
||||
// supported with Upload; the content of the existing blob is overwritten with the new content. To
|
||||
// perform a partial update of a block blob, use StageBlock and CommitBlockList.
|
||||
// This method panics if the stream is not at position 0.
|
||||
// Note that the http client closes the body stream after the request is sent to the service.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
||||
func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*BlockBlobUploadResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
count, err := validateSeekableStreamAt0AndGetCount(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bb.bbClient.Upload(ctx, body, count, nil, nil,
|
||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
|
||||
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
AccessTierNone, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
||||
nil)
|
||||
}
|
||||
|
||||
// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList.
|
||||
// Note that the http client closes the body stream after the request is sent to the service.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block.
|
||||
func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions, transactionalMD5 []byte) (*BlockBlobStageBlockResponse, error) {
|
||||
count, err := validateSeekableStreamAt0AndGetCount(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bb.bbClient.StageBlock(ctx, base64BlockID, count, body, transactionalMD5, nil, nil, ac.pointers(),
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
nil)
|
||||
}
|
||||
|
||||
// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList.
|
||||
// If count is CountToEnd (0), then data is read from specified offset to the end.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url.
|
||||
func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL url.URL, offset int64, count int64, destinationAccessConditions LeaseAccessConditions, sourceAccessConditions ModifiedAccessConditions) (*BlockBlobStageBlockFromURLResponse, error) {
|
||||
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers()
|
||||
return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, sourceURL.String(), httpRange{offset: offset, count: count}.pointers(), nil, nil, nil,
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
destinationAccessConditions.pointers(), sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob.
|
||||
// In order to be written as part of a blob, a block must have been successfully written
|
||||
// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob
|
||||
// by uploading only those blocks that have changed, then committing the new and existing
|
||||
// blocks together. Any blocks not specified in the block list and permanently deleted.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list.
|
||||
func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders,
|
||||
metadata Metadata, ac BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return bb.bbClient.CommitBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil,
|
||||
&h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, nil, nil,
|
||||
metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
AccessTierNone,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list.
|
||||
func (bb BlockBlobURL) GetBlockList(ctx context.Context, listType BlockListType, ac LeaseAccessConditions) (*BlockList, error) {
|
||||
return bb.bbClient.GetBlockList(ctx, listType, nil, nil, ac.pointers(), nil)
|
||||
}
|
||||
|
||||
// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url.
|
||||
func (bb BlockBlobURL) CopyFromURL(ctx context.Context, source url.URL, metadata Metadata,
|
||||
srcac ModifiedAccessConditions, dstac BlobAccessConditions, srcContentMD5 []byte) (*BlobCopyFromURLResponse, error) {
|
||||
|
||||
srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers()
|
||||
dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers()
|
||||
dstLeaseID := dstac.LeaseAccessConditions.pointers()
|
||||
|
||||
return bb.blobClient.CopyFromURL(ctx, source.String(), nil, metadata, AccessTierNone,
|
||||
srcIfModifiedSince, srcIfUnmodifiedSince,
|
||||
srcIfMatchETag, srcIfNoneMatchETag,
|
||||
dstIfModifiedSince, dstIfUnmodifiedSince,
|
||||
dstIfMatchETag, dstIfNoneMatchETag,
|
||||
dstLeaseID, nil, srcContentMD5)
|
||||
}
|
||||
299
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go
generated
vendored
Normal file
299
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go
generated
vendored
Normal file
@@ -0,0 +1,299 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// A ContainerURL represents a URL to the Azure Storage container allowing you to manipulate its blobs.
|
||||
type ContainerURL struct {
|
||||
client containerClient
|
||||
}
|
||||
|
||||
// NewContainerURL creates a ContainerURL object using the specified URL and request policy pipeline.
|
||||
func NewContainerURL(url url.URL, p pipeline.Pipeline) ContainerURL {
|
||||
client := newContainerClient(url, p)
|
||||
return ContainerURL{client: client}
|
||||
}
|
||||
|
||||
// URL returns the URL endpoint used by the ContainerURL object.
|
||||
func (c ContainerURL) URL() url.URL {
|
||||
return c.client.URL()
|
||||
}
|
||||
|
||||
// String returns the URL as a string.
|
||||
func (c ContainerURL) String() string {
|
||||
u := c.URL()
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (c ContainerURL) GetAccountInfo(ctx context.Context) (*ContainerGetAccountInfoResponse, error) {
|
||||
return c.client.GetAccountInfo(ctx)
|
||||
}
|
||||
|
||||
// WithPipeline creates a new ContainerURL object identical to the source but with the specified request policy pipeline.
|
||||
func (c ContainerURL) WithPipeline(p pipeline.Pipeline) ContainerURL {
|
||||
return NewContainerURL(c.URL(), p)
|
||||
}
|
||||
|
||||
// NewBlobURL creates a new BlobURL object by concatenating blobName to the end of
|
||||
// ContainerURL's URL. The new BlobURL uses the same request policy pipeline as the ContainerURL.
|
||||
// To change the pipeline, create the BlobURL and then call its WithPipeline method passing in the
|
||||
// desired pipeline object. Or, call this package's NewBlobURL instead of calling this object's
|
||||
// NewBlobURL method.
|
||||
func (c ContainerURL) NewBlobURL(blobName string) BlobURL {
|
||||
blobURL := appendToURLPath(c.URL(), blobName)
|
||||
return NewBlobURL(blobURL, c.client.Pipeline())
|
||||
}
|
||||
|
||||
// NewAppendBlobURL creates a new AppendBlobURL object by concatenating blobName to the end of
|
||||
// ContainerURL's URL. The new AppendBlobURL uses the same request policy pipeline as the ContainerURL.
|
||||
// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the
|
||||
// desired pipeline object. Or, call this package's NewAppendBlobURL instead of calling this object's
|
||||
// NewAppendBlobURL method.
|
||||
func (c ContainerURL) NewAppendBlobURL(blobName string) AppendBlobURL {
|
||||
blobURL := appendToURLPath(c.URL(), blobName)
|
||||
return NewAppendBlobURL(blobURL, c.client.Pipeline())
|
||||
}
|
||||
|
||||
// NewBlockBlobURL creates a new BlockBlobURL object by concatenating blobName to the end of
|
||||
// ContainerURL's URL. The new BlockBlobURL uses the same request policy pipeline as the ContainerURL.
|
||||
// To change the pipeline, create the BlockBlobURL and then call its WithPipeline method passing in the
|
||||
// desired pipeline object. Or, call this package's NewBlockBlobURL instead of calling this object's
|
||||
// NewBlockBlobURL method.
|
||||
func (c ContainerURL) NewBlockBlobURL(blobName string) BlockBlobURL {
|
||||
blobURL := appendToURLPath(c.URL(), blobName)
|
||||
return NewBlockBlobURL(blobURL, c.client.Pipeline())
|
||||
}
|
||||
|
||||
// NewPageBlobURL creates a new PageBlobURL object by concatenating blobName to the end of
|
||||
// ContainerURL's URL. The new PageBlobURL uses the same request policy pipeline as the ContainerURL.
|
||||
// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the
|
||||
// desired pipeline object. Or, call this package's NewPageBlobURL instead of calling this object's
|
||||
// NewPageBlobURL method.
|
||||
func (c ContainerURL) NewPageBlobURL(blobName string) PageBlobURL {
|
||||
blobURL := appendToURLPath(c.URL(), blobName)
|
||||
return NewPageBlobURL(blobURL, c.client.Pipeline())
|
||||
}
|
||||
|
||||
// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container.
|
||||
func (c ContainerURL) Create(ctx context.Context, metadata Metadata, publicAccessType PublicAccessType) (*ContainerCreateResponse, error) {
|
||||
return c.client.Create(ctx, nil, metadata, publicAccessType, nil)
|
||||
}
|
||||
|
||||
// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container.
|
||||
func (c ContainerURL) Delete(ctx context.Context, ac ContainerAccessConditions) (*ContainerDeleteResponse, error) {
|
||||
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
|
||||
return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
|
||||
}
|
||||
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers()
|
||||
return c.client.Delete(ctx, nil, ac.LeaseAccessConditions.pointers(),
|
||||
ifModifiedSince, ifUnmodifiedSince, nil)
|
||||
}
|
||||
|
||||
// GetProperties returns the container's properties.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata.
|
||||
func (c ContainerURL) GetProperties(ctx context.Context, ac LeaseAccessConditions) (*ContainerGetPropertiesResponse, error) {
|
||||
// NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties.
|
||||
// This allows us to not expose a GetProperties method at all simplifying the API.
|
||||
return c.client.GetProperties(ctx, nil, ac.pointers(), nil)
|
||||
}
|
||||
|
||||
// SetMetadata sets the container's metadata.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata.
|
||||
func (c ContainerURL) SetMetadata(ctx context.Context, metadata Metadata, ac ContainerAccessConditions) (*ContainerSetMetadataResponse, error) {
|
||||
if !ac.IfUnmodifiedSince.IsZero() || ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
|
||||
return nil, errors.New("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service")
|
||||
}
|
||||
ifModifiedSince, _, _, _ := ac.ModifiedAccessConditions.pointers()
|
||||
return c.client.SetMetadata(ctx, nil, ac.LeaseAccessConditions.pointers(), metadata, ifModifiedSince, nil)
|
||||
}
|
||||
|
||||
// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl.
|
||||
func (c ContainerURL) GetAccessPolicy(ctx context.Context, ac LeaseAccessConditions) (*SignedIdentifiers, error) {
|
||||
return c.client.GetAccessPolicy(ctx, nil, ac.pointers(), nil)
|
||||
}
|
||||
|
||||
// The AccessPolicyPermission type simplifies creating the permissions string for a container's access policy.
|
||||
// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field.
|
||||
type AccessPolicyPermission struct {
|
||||
Read, Add, Create, Write, Delete, List bool
|
||||
}
|
||||
|
||||
// String produces the access policy permission string for an Azure Storage container.
|
||||
// Call this method to set AccessPolicy's Permission field.
|
||||
func (p AccessPolicyPermission) String() string {
|
||||
var b bytes.Buffer
|
||||
if p.Read {
|
||||
b.WriteRune('r')
|
||||
}
|
||||
if p.Add {
|
||||
b.WriteRune('a')
|
||||
}
|
||||
if p.Create {
|
||||
b.WriteRune('c')
|
||||
}
|
||||
if p.Write {
|
||||
b.WriteRune('w')
|
||||
}
|
||||
if p.Delete {
|
||||
b.WriteRune('d')
|
||||
}
|
||||
if p.List {
|
||||
b.WriteRune('l')
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// Parse initializes the AccessPolicyPermission's fields from a string.
|
||||
func (p *AccessPolicyPermission) Parse(s string) error {
|
||||
*p = AccessPolicyPermission{} // Clear the flags
|
||||
for _, r := range s {
|
||||
switch r {
|
||||
case 'r':
|
||||
p.Read = true
|
||||
case 'a':
|
||||
p.Add = true
|
||||
case 'c':
|
||||
p.Create = true
|
||||
case 'w':
|
||||
p.Write = true
|
||||
case 'd':
|
||||
p.Delete = true
|
||||
case 'l':
|
||||
p.List = true
|
||||
default:
|
||||
return fmt.Errorf("invalid permission: '%v'", r)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl.
|
||||
func (c ContainerURL) SetAccessPolicy(ctx context.Context, accessType PublicAccessType, si []SignedIdentifier,
|
||||
ac ContainerAccessConditions) (*ContainerSetAccessPolicyResponse, error) {
|
||||
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
|
||||
return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
|
||||
}
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers()
|
||||
return c.client.SetAccessPolicy(ctx, si, nil, ac.LeaseAccessConditions.pointers(),
|
||||
accessType, ifModifiedSince, ifUnmodifiedSince, nil)
|
||||
}
|
||||
|
||||
// AcquireLease acquires a lease on the container for delete operations. The lease duration must be between 15 to 60 seconds, or infinite (-1).
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||
func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*ContainerAcquireLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
||||
return c.client.AcquireLease(ctx, nil, &duration, &proposedID,
|
||||
ifModifiedSince, ifUnmodifiedSince, nil)
|
||||
}
|
||||
|
||||
// RenewLease renews the container's previously-acquired lease.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||
func (c ContainerURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerRenewLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
||||
return c.client.RenewLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
|
||||
}
|
||||
|
||||
// ReleaseLease releases the container's previously-acquired lease.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||
func (c ContainerURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerReleaseLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
||||
return c.client.ReleaseLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
|
||||
}
|
||||
|
||||
// BreakLease breaks the container's previously-acquired lease (if it exists).
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||
func (c ContainerURL) BreakLease(ctx context.Context, period int32, ac ModifiedAccessConditions) (*ContainerBreakLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
||||
return c.client.BreakLease(ctx, nil, leasePeriodPointer(period), ifModifiedSince, ifUnmodifiedSince, nil)
|
||||
}
|
||||
|
||||
// ChangeLease changes the container's lease ID.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||
func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*ContainerChangeLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
||||
return c.client.ChangeLease(ctx, leaseID, proposedID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
|
||||
}
|
||||
|
||||
// ListBlobsFlatSegment returns a single segment of blobs starting from the specified Marker. Use an empty
|
||||
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
|
||||
// After getting a segment, process it, and then call ListBlobsFlatSegment again (passing the the
|
||||
// previously-returned Marker) to get the next segment.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
|
||||
func (c ContainerURL) ListBlobsFlatSegment(ctx context.Context, marker Marker, o ListBlobsSegmentOptions) (*ListBlobsFlatSegmentResponse, error) {
|
||||
prefix, include, maxResults := o.pointers()
|
||||
return c.client.ListBlobFlatSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil)
|
||||
}
|
||||
|
||||
// ListBlobsHierarchySegment returns a single segment of blobs starting from the specified Marker. Use an empty
|
||||
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
|
||||
// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the
|
||||
// previously-returned Marker) to get the next segment.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
|
||||
func (c ContainerURL) ListBlobsHierarchySegment(ctx context.Context, marker Marker, delimiter string, o ListBlobsSegmentOptions) (*ListBlobsHierarchySegmentResponse, error) {
|
||||
if o.Details.Snapshots {
|
||||
return nil, errors.New("snapshots are not supported in this listing operation")
|
||||
}
|
||||
prefix, include, maxResults := o.pointers()
|
||||
return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.Val, maxResults, include, nil, nil)
|
||||
}
|
||||
|
||||
// ListBlobsSegmentOptions defines options available when calling ListBlobs.
|
||||
type ListBlobsSegmentOptions struct {
|
||||
Details BlobListingDetails // No IncludeType header is produced if ""
|
||||
Prefix string // No Prefix header is produced if ""
|
||||
|
||||
// SetMaxResults sets the maximum desired results you want the service to return. Note, the
|
||||
// service may return fewer results than requested.
|
||||
// MaxResults=0 means no 'MaxResults' header specified.
|
||||
MaxResults int32
|
||||
}
|
||||
|
||||
func (o *ListBlobsSegmentOptions) pointers() (prefix *string, include []ListBlobsIncludeItemType, maxResults *int32) {
|
||||
if o.Prefix != "" {
|
||||
prefix = &o.Prefix
|
||||
}
|
||||
include = o.Details.slice()
|
||||
if o.MaxResults != 0 {
|
||||
maxResults = &o.MaxResults
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// BlobListingDetails indicates what additional information the service should return with each blob.
|
||||
type BlobListingDetails struct {
|
||||
Copy, Metadata, Snapshots, UncommittedBlobs, Deleted bool
|
||||
}
|
||||
|
||||
// string produces the Include query parameter's value.
|
||||
func (d *BlobListingDetails) slice() []ListBlobsIncludeItemType {
|
||||
items := []ListBlobsIncludeItemType{}
|
||||
// NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails!
|
||||
if d.Copy {
|
||||
items = append(items, ListBlobsIncludeItemCopy)
|
||||
}
|
||||
if d.Deleted {
|
||||
items = append(items, ListBlobsIncludeItemDeleted)
|
||||
}
|
||||
if d.Metadata {
|
||||
items = append(items, ListBlobsIncludeItemMetadata)
|
||||
}
|
||||
if d.Snapshots {
|
||||
items = append(items, ListBlobsIncludeItemSnapshots)
|
||||
}
|
||||
if d.UncommittedBlobs {
|
||||
items = append(items, ListBlobsIncludeItemUncommittedblobs)
|
||||
}
|
||||
return items
|
||||
}
|
||||
233
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go
generated
vendored
Normal file
233
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go
generated
vendored
Normal file
@@ -0,0 +1,233 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
const (
|
||||
// PageBlobPageBytes indicates the number of bytes in a page (512).
|
||||
PageBlobPageBytes = 512
|
||||
|
||||
// PageBlobMaxPutPagesBytes indicates the maximum number of bytes that can be sent in a call to PutPage.
|
||||
PageBlobMaxUploadPagesBytes = 4 * 1024 * 1024 // 4MB
|
||||
)
|
||||
|
||||
// PageBlobURL defines a set of operations applicable to page blobs.
|
||||
type PageBlobURL struct {
|
||||
BlobURL
|
||||
pbClient pageBlobClient
|
||||
}
|
||||
|
||||
// NewPageBlobURL creates a PageBlobURL object using the specified URL and request policy pipeline.
|
||||
func NewPageBlobURL(url url.URL, p pipeline.Pipeline) PageBlobURL {
|
||||
blobClient := newBlobClient(url, p)
|
||||
pbClient := newPageBlobClient(url, p)
|
||||
return PageBlobURL{BlobURL: BlobURL{blobClient: blobClient}, pbClient: pbClient}
|
||||
}
|
||||
|
||||
// WithPipeline creates a new PageBlobURL object identical to the source but with the specific request policy pipeline.
|
||||
func (pb PageBlobURL) WithPipeline(p pipeline.Pipeline) PageBlobURL {
|
||||
return NewPageBlobURL(pb.blobClient.URL(), p)
|
||||
}
|
||||
|
||||
// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp.
|
||||
// Pass "" to remove the snapshot returning a URL to the base blob.
|
||||
func (pb PageBlobURL) WithSnapshot(snapshot string) PageBlobURL {
|
||||
p := NewBlobURLParts(pb.URL())
|
||||
p.Snapshot = snapshot
|
||||
return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline())
|
||||
}
|
||||
|
||||
func (pb PageBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
|
||||
return pb.blobClient.GetAccountInfo(ctx)
|
||||
}
|
||||
|
||||
// Create creates a page blob of the specified length. Call PutPage to upload data data to a page blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
||||
func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*PageBlobCreateResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return pb.pbClient.Create(ctx, 0, size, nil, PremiumPageBlobAccessTierNone,
|
||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl,
|
||||
metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &sequenceNumber, nil)
|
||||
}
|
||||
|
||||
// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes.
|
||||
// This method panics if the stream is not at position 0.
|
||||
// Note that the http client closes the body stream after the request is sent to the service.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
|
||||
func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac PageBlobAccessConditions, transactionalMD5 []byte) (*PageBlobUploadPagesResponse, error) {
|
||||
count, err := validateSeekableStreamAt0AndGetCount(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers()
|
||||
return pb.pbClient.UploadPages(ctx, body, count, transactionalMD5, nil, nil,
|
||||
PageRange{Start: offset, End: offset + count - 1}.pointers(),
|
||||
ac.LeaseAccessConditions.pointers(),
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob.
|
||||
// The sourceOffset specifies the start offset of source data to copy from.
|
||||
// The destOffset specifies the start offset of data in page blob will be written to.
|
||||
// The count must be a multiple of 512 bytes.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url.
|
||||
func (pb PageBlobURL) UploadPagesFromURL(ctx context.Context, sourceURL url.URL, sourceOffset int64, destOffset int64, count int64, transactionalMD5 []byte, destinationAccessConditions PageBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions) (*PageBlobUploadPagesFromURLResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers()
|
||||
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers()
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := destinationAccessConditions.SequenceNumberAccessConditions.pointers()
|
||||
return pb.pbClient.UploadPagesFromURL(ctx, sourceURL.String(), *PageRange{Start: sourceOffset, End: sourceOffset + count - 1}.pointers(), 0,
|
||||
*PageRange{Start: destOffset, End: destOffset + count - 1}.pointers(), transactionalMD5, nil, nil,
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
destinationAccessConditions.LeaseAccessConditions.pointers(),
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// ClearPages frees the specified pages from the page blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
|
||||
func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac PageBlobAccessConditions) (*PageBlobClearPagesResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers()
|
||||
return pb.pbClient.ClearPages(ctx, 0, nil,
|
||||
PageRange{Start: offset, End: offset + count - 1}.pointers(),
|
||||
ac.LeaseAccessConditions.pointers(),
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan,
|
||||
ifSequenceNumberEqual, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
|
||||
func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageList, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return pb.pbClient.GetPageRanges(ctx, nil, nil,
|
||||
httpRange{offset: offset, count: count}.pointers(),
|
||||
ac.LeaseAccessConditions.pointers(),
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
|
||||
func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot string, ac BlobAccessConditions) (*PageList, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, &prevSnapshot,
|
||||
httpRange{offset: offset, count: count}.pointers(),
|
||||
ac.LeaseAccessConditions.pointers(),
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
||||
nil)
|
||||
}
|
||||
|
||||
// Resize resizes the page blob to the specified size (which must be a multiple of 512).
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
|
||||
func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessConditions) (*PageBlobResizeResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(),
|
||||
nil, nil, EncryptionAlgorithmNone, // CPK
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// SetSequenceNumber sets the page blob's sequence number.
|
||||
func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceNumberActionType, sequenceNumber int64,
|
||||
ac BlobAccessConditions) (*PageBlobUpdateSequenceNumberResponse, error) {
|
||||
sn := &sequenceNumber
|
||||
if action == SequenceNumberActionIncrement {
|
||||
sn = nil
|
||||
}
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers()
|
||||
return pb.pbClient.UpdateSequenceNumber(ctx, action, nil,
|
||||
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch,
|
||||
sn, nil)
|
||||
}
|
||||
|
||||
// StartIncrementalCopy begins an operation to start an incremental copy from one page blob's snapshot to this page blob.
|
||||
// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination.
|
||||
// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and
|
||||
// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots.
|
||||
func (pb PageBlobURL) StartCopyIncremental(ctx context.Context, source url.URL, snapshot string, ac BlobAccessConditions) (*PageBlobCopyIncrementalResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
qp := source.Query()
|
||||
qp.Set("snapshot", snapshot)
|
||||
source.RawQuery = qp.Encode()
|
||||
return pb.pbClient.CopyIncremental(ctx, source.String(), nil,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
func (pr PageRange) pointers() *string {
|
||||
endOffset := strconv.FormatInt(int64(pr.End), 10)
|
||||
asString := fmt.Sprintf("bytes=%v-%s", pr.Start, endOffset)
|
||||
return &asString
|
||||
}
|
||||
|
||||
type PageBlobAccessConditions struct {
|
||||
ModifiedAccessConditions
|
||||
LeaseAccessConditions
|
||||
SequenceNumberAccessConditions
|
||||
}
|
||||
|
||||
// SequenceNumberAccessConditions identifies page blob-specific access conditions which you optionally set.
|
||||
type SequenceNumberAccessConditions struct {
|
||||
// IfSequenceNumberLessThan ensures that the page blob operation succeeds
|
||||
// only if the blob's sequence number is less than a value.
|
||||
// IfSequenceNumberLessThan=0 means no 'IfSequenceNumberLessThan' header specified.
|
||||
// IfSequenceNumberLessThan>0 means 'IfSequenceNumberLessThan' header specified with its value
|
||||
// IfSequenceNumberLessThan==-1 means 'IfSequenceNumberLessThan' header specified with a value of 0
|
||||
IfSequenceNumberLessThan int64
|
||||
|
||||
// IfSequenceNumberLessThanOrEqual ensures that the page blob operation succeeds
|
||||
// only if the blob's sequence number is less than or equal to a value.
|
||||
// IfSequenceNumberLessThanOrEqual=0 means no 'IfSequenceNumberLessThanOrEqual' header specified.
|
||||
// IfSequenceNumberLessThanOrEqual>0 means 'IfSequenceNumberLessThanOrEqual' header specified with its value
|
||||
// IfSequenceNumberLessThanOrEqual=-1 means 'IfSequenceNumberLessThanOrEqual' header specified with a value of 0
|
||||
IfSequenceNumberLessThanOrEqual int64
|
||||
|
||||
// IfSequenceNumberEqual ensures that the page blob operation succeeds
|
||||
// only if the blob's sequence number is equal to a value.
|
||||
// IfSequenceNumberEqual=0 means no 'IfSequenceNumberEqual' header specified.
|
||||
// IfSequenceNumberEqual>0 means 'IfSequenceNumberEqual' header specified with its value
|
||||
// IfSequenceNumberEqual=-1 means 'IfSequenceNumberEqual' header specified with a value of 0
|
||||
IfSequenceNumberEqual int64
|
||||
}
|
||||
|
||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
||||
func (ac SequenceNumberAccessConditions) pointers() (snltoe *int64, snlt *int64, sne *int64) {
|
||||
var zero int64 // Defaults to 0
|
||||
switch ac.IfSequenceNumberLessThan {
|
||||
case -1:
|
||||
snlt = &zero
|
||||
case 0:
|
||||
snlt = nil
|
||||
default:
|
||||
snlt = &ac.IfSequenceNumberLessThan
|
||||
}
|
||||
|
||||
switch ac.IfSequenceNumberLessThanOrEqual {
|
||||
case -1:
|
||||
snltoe = &zero
|
||||
case 0:
|
||||
snltoe = nil
|
||||
default:
|
||||
snltoe = &ac.IfSequenceNumberLessThanOrEqual
|
||||
}
|
||||
switch ac.IfSequenceNumberEqual {
|
||||
case -1:
|
||||
sne = &zero
|
||||
case 0:
|
||||
sne = nil
|
||||
default:
|
||||
sne = &ac.IfSequenceNumberEqual
|
||||
}
|
||||
return
|
||||
}
|
||||
159
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go
generated
vendored
Normal file
159
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go
generated
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
const (
|
||||
// ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container.
|
||||
ContainerNameRoot = "$root"
|
||||
|
||||
// ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container.
|
||||
ContainerNameLogs = "$logs"
|
||||
)
|
||||
|
||||
// A ServiceURL represents a URL to the Azure Storage Blob service allowing you to manipulate blob containers.
|
||||
type ServiceURL struct {
|
||||
client serviceClient
|
||||
}
|
||||
|
||||
// NewServiceURL creates a ServiceURL object using the specified URL and request policy pipeline.
|
||||
func NewServiceURL(primaryURL url.URL, p pipeline.Pipeline) ServiceURL {
|
||||
client := newServiceClient(primaryURL, p)
|
||||
return ServiceURL{client: client}
|
||||
}
|
||||
|
||||
//GetUserDelegationCredential obtains a UserDelegationKey object using the base ServiceURL object.
|
||||
//OAuth is required for this call, as well as any role that can delegate access to the storage account.
|
||||
func (s ServiceURL) GetUserDelegationCredential(ctx context.Context, info KeyInfo, timeout *int32, requestID *string) (UserDelegationCredential, error) {
|
||||
sc := newServiceClient(s.client.url, s.client.p)
|
||||
udk, err := sc.GetUserDelegationKey(ctx, info, timeout, requestID)
|
||||
if err != nil {
|
||||
return UserDelegationCredential{}, err
|
||||
}
|
||||
return NewUserDelegationCredential(strings.Split(s.client.url.Host, ".")[0], *udk), nil
|
||||
}
|
||||
|
||||
//TODO this was supposed to be generated
|
||||
//NewKeyInfo creates a new KeyInfo struct with the correct time formatting & conversion
|
||||
func NewKeyInfo(Start, Expiry time.Time) KeyInfo {
|
||||
return KeyInfo{
|
||||
Start: Start.UTC().Format(SASTimeFormat),
|
||||
Expiry: Expiry.UTC().Format(SASTimeFormat),
|
||||
}
|
||||
}
|
||||
|
||||
func (s ServiceURL) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) {
|
||||
return s.client.GetAccountInfo(ctx)
|
||||
}
|
||||
|
||||
// URL returns the URL endpoint used by the ServiceURL object.
|
||||
func (s ServiceURL) URL() url.URL {
|
||||
return s.client.URL()
|
||||
}
|
||||
|
||||
// String returns the URL as a string.
|
||||
func (s ServiceURL) String() string {
|
||||
u := s.URL()
|
||||
return u.String()
|
||||
}
|
||||
|
||||
// WithPipeline creates a new ServiceURL object identical to the source but with the specified request policy pipeline.
|
||||
func (s ServiceURL) WithPipeline(p pipeline.Pipeline) ServiceURL {
|
||||
return NewServiceURL(s.URL(), p)
|
||||
}
|
||||
|
||||
// NewContainerURL creates a new ContainerURL object by concatenating containerName to the end of
|
||||
// ServiceURL's URL. The new ContainerURL uses the same request policy pipeline as the ServiceURL.
|
||||
// To change the pipeline, create the ContainerURL and then call its WithPipeline method passing in the
|
||||
// desired pipeline object. Or, call this package's NewContainerURL instead of calling this object's
|
||||
// NewContainerURL method.
|
||||
func (s ServiceURL) NewContainerURL(containerName string) ContainerURL {
|
||||
containerURL := appendToURLPath(s.URL(), containerName)
|
||||
return NewContainerURL(containerURL, s.client.Pipeline())
|
||||
}
|
||||
|
||||
// appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required)
|
||||
func appendToURLPath(u url.URL, name string) url.URL {
|
||||
// e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f"
|
||||
// When you call url.Parse() this is what you'll get:
|
||||
// Scheme: "https"
|
||||
// Opaque: ""
|
||||
// User: nil
|
||||
// Host: "ms.com"
|
||||
// Path: "/a/b/" This should start with a / and it might or might not have a trailing slash
|
||||
// RawPath: ""
|
||||
// ForceQuery: false
|
||||
// RawQuery: "k1=v1&k2=v2"
|
||||
// Fragment: "f"
|
||||
if len(u.Path) == 0 || u.Path[len(u.Path)-1] != '/' {
|
||||
u.Path += "/" // Append "/" to end before appending name
|
||||
}
|
||||
u.Path += name
|
||||
return u
|
||||
}
|
||||
|
||||
// ListContainersFlatSegment returns a single segment of containers starting from the specified Marker. Use an empty
|
||||
// Marker to start enumeration from the beginning. Container names are returned in lexicographic order.
|
||||
// After getting a segment, process it, and then call ListContainersFlatSegment again (passing the the
|
||||
// previously-returned Marker) to get the next segment. For more information, see
|
||||
// https://docs.microsoft.com/rest/api/storageservices/list-containers2.
|
||||
func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersSegmentResponse, error) {
|
||||
prefix, include, maxResults := o.pointers()
|
||||
return s.client.ListContainersSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil)
|
||||
}
|
||||
|
||||
// ListContainersOptions defines options available when calling ListContainers.
|
||||
type ListContainersSegmentOptions struct {
|
||||
Detail ListContainersDetail // No IncludeType header is produced if ""
|
||||
Prefix string // No Prefix header is produced if ""
|
||||
MaxResults int32 // 0 means unspecified
|
||||
// TODO: update swagger to generate this type?
|
||||
}
|
||||
|
||||
func (o *ListContainersSegmentOptions) pointers() (prefix *string, include ListContainersIncludeType, maxResults *int32) {
|
||||
if o.Prefix != "" {
|
||||
prefix = &o.Prefix
|
||||
}
|
||||
if o.MaxResults != 0 {
|
||||
maxResults = &o.MaxResults
|
||||
}
|
||||
include = ListContainersIncludeType(o.Detail.string())
|
||||
return
|
||||
}
|
||||
|
||||
// ListContainersFlatDetail indicates what additional information the service should return with each container.
|
||||
type ListContainersDetail struct {
|
||||
// Tells the service whether to return metadata for each container.
|
||||
Metadata bool
|
||||
}
|
||||
|
||||
// string produces the Include query parameter's value.
|
||||
func (d *ListContainersDetail) string() string {
|
||||
items := make([]string, 0, 1)
|
||||
// NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails!
|
||||
if d.Metadata {
|
||||
items = append(items, string(ListContainersIncludeMetadata))
|
||||
}
|
||||
if len(items) > 0 {
|
||||
return strings.Join(items, ",")
|
||||
}
|
||||
return string(ListContainersIncludeNone)
|
||||
}
|
||||
|
||||
func (bsu ServiceURL) GetProperties(ctx context.Context) (*StorageServiceProperties, error) {
|
||||
return bsu.client.GetProperties(ctx, nil, nil)
|
||||
}
|
||||
|
||||
func (bsu ServiceURL) SetProperties(ctx context.Context, properties StorageServiceProperties) (*ServiceSetPropertiesResponse, error) {
|
||||
return bsu.client.SetProperties(ctx, properties, nil, nil)
|
||||
}
|
||||
|
||||
func (bsu ServiceURL) GetStatistics(ctx context.Context) (*StorageServiceStats, error) {
|
||||
return bsu.client.GetStatistics(ctx, nil, nil)
|
||||
}
|
||||
38
vendor/github.com/Azure/azure-storage-blob-go/azblob/user_delegation_credential.go
generated
vendored
Normal file
38
vendor/github.com/Azure/azure-storage-blob-go/azblob/user_delegation_credential.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
)
|
||||
|
||||
// NewUserDelegationCredential creates a new UserDelegationCredential using a Storage account's name and a user delegation key from it
|
||||
func NewUserDelegationCredential(accountName string, key UserDelegationKey) UserDelegationCredential {
|
||||
return UserDelegationCredential{
|
||||
accountName: accountName,
|
||||
accountKey: key,
|
||||
}
|
||||
}
|
||||
|
||||
type UserDelegationCredential struct {
|
||||
accountName string
|
||||
accountKey UserDelegationKey
|
||||
}
|
||||
|
||||
// AccountName returns the Storage account's name
|
||||
func (f UserDelegationCredential) AccountName() string {
|
||||
return f.accountName
|
||||
}
|
||||
|
||||
// ComputeHMAC
|
||||
func (f UserDelegationCredential) ComputeHMACSHA256(message string) (base64String string) {
|
||||
bytes, _ := base64.StdEncoding.DecodeString(f.accountKey.Value)
|
||||
h := hmac.New(sha256.New, bytes)
|
||||
h.Write([]byte(message))
|
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
// Private method to return important parameters for NewSASQueryParameters
|
||||
func (f UserDelegationCredential) getUDKParams() *UserDelegationKey {
|
||||
return &f.accountKey
|
||||
}
|
||||
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go
generated
vendored
Normal file
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
package azblob
|
||||
|
||||
const serviceLibVersion = "0.10"
|
||||
55
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go
generated
vendored
Normal file
55
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// Credential represent any credential type; it is used to create a credential policy Factory.
|
||||
type Credential interface {
|
||||
pipeline.Factory
|
||||
credentialMarker()
|
||||
}
|
||||
|
||||
type credentialFunc pipeline.FactoryFunc
|
||||
|
||||
func (f credentialFunc) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
|
||||
return f(next, po)
|
||||
}
|
||||
|
||||
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
|
||||
func (credentialFunc) credentialMarker() {}
|
||||
|
||||
//////////////////////////////
|
||||
|
||||
// NewAnonymousCredential creates an anonymous credential for use with HTTP(S) requests that read public resource
|
||||
// or for use with Shared Access Signatures (SAS).
|
||||
func NewAnonymousCredential() Credential {
|
||||
return anonymousCredentialFactory
|
||||
}
|
||||
|
||||
var anonymousCredentialFactory Credential = &anonymousCredentialPolicyFactory{} // Singleton
|
||||
|
||||
// anonymousCredentialPolicyFactory is the credential's policy factory.
|
||||
type anonymousCredentialPolicyFactory struct {
|
||||
}
|
||||
|
||||
// New creates a credential policy object.
|
||||
func (f *anonymousCredentialPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
|
||||
return &anonymousCredentialPolicy{next: next}
|
||||
}
|
||||
|
||||
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
|
||||
func (*anonymousCredentialPolicyFactory) credentialMarker() {}
|
||||
|
||||
// anonymousCredentialPolicy is the credential's policy object.
|
||||
type anonymousCredentialPolicy struct {
|
||||
next pipeline.Policy
|
||||
}
|
||||
|
||||
// Do implements the credential's policy interface.
|
||||
func (p anonymousCredentialPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
// For anonymous credentials, this is effectively a no-op
|
||||
return p.next.Do(ctx, request)
|
||||
}
|
||||
205
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go
generated
vendored
Normal file
205
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go
generated
vendored
Normal file
@@ -0,0 +1,205 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the
|
||||
// storage account's name and either its primary or secondary key.
|
||||
func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) {
|
||||
bytes, err := base64.StdEncoding.DecodeString(accountKey)
|
||||
if err != nil {
|
||||
return &SharedKeyCredential{}, err
|
||||
}
|
||||
return &SharedKeyCredential{accountName: accountName, accountKey: bytes}, nil
|
||||
}
|
||||
|
||||
// SharedKeyCredential contains an account's name and its primary or secondary key.
|
||||
// It is immutable making it shareable and goroutine-safe.
|
||||
type SharedKeyCredential struct {
|
||||
// Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only
|
||||
accountName string
|
||||
accountKey []byte
|
||||
}
|
||||
|
||||
// AccountName returns the Storage account's name.
|
||||
func (f SharedKeyCredential) AccountName() string {
|
||||
return f.accountName
|
||||
}
|
||||
|
||||
func (f SharedKeyCredential) getAccountKey() []byte {
|
||||
return f.accountKey
|
||||
}
|
||||
|
||||
// noop function to satisfy StorageAccountCredential interface
|
||||
func (f SharedKeyCredential) getUDKParams() *UserDelegationKey {
|
||||
return nil
|
||||
}
|
||||
|
||||
// New creates a credential policy object.
|
||||
func (f *SharedKeyCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
|
||||
return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
// Add a x-ms-date header if it doesn't already exist
|
||||
if d := request.Header.Get(headerXmsDate); d == "" {
|
||||
request.Header[headerXmsDate] = []string{time.Now().UTC().Format(http.TimeFormat)}
|
||||
}
|
||||
stringToSign, err := f.buildStringToSign(request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
signature := f.ComputeHMACSHA256(stringToSign)
|
||||
authHeader := strings.Join([]string{"SharedKey ", f.accountName, ":", signature}, "")
|
||||
request.Header[headerAuthorization] = []string{authHeader}
|
||||
|
||||
response, err := next.Do(ctx, request)
|
||||
if err != nil && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusForbidden {
|
||||
// Service failed to authenticate request, log it
|
||||
po.Log(pipeline.LogError, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n")
|
||||
}
|
||||
return response, err
|
||||
})
|
||||
}
|
||||
|
||||
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
|
||||
func (*SharedKeyCredential) credentialMarker() {}
|
||||
|
||||
// Constants ensuring that header names are correctly spelled and consistently cased.
|
||||
const (
|
||||
headerAuthorization = "Authorization"
|
||||
headerCacheControl = "Cache-Control"
|
||||
headerContentEncoding = "Content-Encoding"
|
||||
headerContentDisposition = "Content-Disposition"
|
||||
headerContentLanguage = "Content-Language"
|
||||
headerContentLength = "Content-Length"
|
||||
headerContentMD5 = "Content-MD5"
|
||||
headerContentType = "Content-Type"
|
||||
headerDate = "Date"
|
||||
headerIfMatch = "If-Match"
|
||||
headerIfModifiedSince = "If-Modified-Since"
|
||||
headerIfNoneMatch = "If-None-Match"
|
||||
headerIfUnmodifiedSince = "If-Unmodified-Since"
|
||||
headerRange = "Range"
|
||||
headerUserAgent = "User-Agent"
|
||||
headerXmsDate = "x-ms-date"
|
||||
headerXmsVersion = "x-ms-version"
|
||||
)
|
||||
|
||||
// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS.
|
||||
func (f SharedKeyCredential) ComputeHMACSHA256(message string) (base64String string) {
|
||||
h := hmac.New(sha256.New, f.accountKey)
|
||||
h.Write([]byte(message))
|
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func (f *SharedKeyCredential) buildStringToSign(request pipeline.Request) (string, error) {
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
|
||||
headers := request.Header
|
||||
contentLength := headers.Get(headerContentLength)
|
||||
if contentLength == "0" {
|
||||
contentLength = ""
|
||||
}
|
||||
|
||||
canonicalizedResource, err := f.buildCanonicalizedResource(request.URL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
stringToSign := strings.Join([]string{
|
||||
request.Method,
|
||||
headers.Get(headerContentEncoding),
|
||||
headers.Get(headerContentLanguage),
|
||||
contentLength,
|
||||
headers.Get(headerContentMD5),
|
||||
headers.Get(headerContentType),
|
||||
"", // Empty date because x-ms-date is expected (as per web page above)
|
||||
headers.Get(headerIfModifiedSince),
|
||||
headers.Get(headerIfMatch),
|
||||
headers.Get(headerIfNoneMatch),
|
||||
headers.Get(headerIfUnmodifiedSince),
|
||||
headers.Get(headerRange),
|
||||
buildCanonicalizedHeader(headers),
|
||||
canonicalizedResource,
|
||||
}, "\n")
|
||||
return stringToSign, nil
|
||||
}
|
||||
|
||||
func buildCanonicalizedHeader(headers http.Header) string {
|
||||
cm := map[string][]string{}
|
||||
for k, v := range headers {
|
||||
headerName := strings.TrimSpace(strings.ToLower(k))
|
||||
if strings.HasPrefix(headerName, "x-ms-") {
|
||||
cm[headerName] = v // NOTE: the value must not have any whitespace around it.
|
||||
}
|
||||
}
|
||||
if len(cm) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
keys := make([]string, 0, len(cm))
|
||||
for key := range cm {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
ch := bytes.NewBufferString("")
|
||||
for i, key := range keys {
|
||||
if i > 0 {
|
||||
ch.WriteRune('\n')
|
||||
}
|
||||
ch.WriteString(key)
|
||||
ch.WriteRune(':')
|
||||
ch.WriteString(strings.Join(cm[key], ","))
|
||||
}
|
||||
return string(ch.Bytes())
|
||||
}
|
||||
|
||||
func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) {
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
|
||||
cr := bytes.NewBufferString("/")
|
||||
cr.WriteString(f.accountName)
|
||||
|
||||
if len(u.Path) > 0 {
|
||||
// Any portion of the CanonicalizedResource string that is derived from
|
||||
// the resource's URI should be encoded exactly as it is in the URI.
|
||||
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
|
||||
cr.WriteString(u.EscapedPath())
|
||||
} else {
|
||||
// a slash is required to indicate the root path
|
||||
cr.WriteString("/")
|
||||
}
|
||||
|
||||
// params is a map[string][]string; param name is key; params values is []string
|
||||
params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values
|
||||
if err != nil {
|
||||
return "", errors.New("parsing query parameters must succeed, otherwise there might be serious problems in the SDK/generated code")
|
||||
}
|
||||
|
||||
if len(params) > 0 { // There is at least 1 query parameter
|
||||
paramNames := []string{} // We use this to sort the parameter key names
|
||||
for paramName := range params {
|
||||
paramNames = append(paramNames, paramName) // paramNames must be lowercase
|
||||
}
|
||||
sort.Strings(paramNames)
|
||||
|
||||
for _, paramName := range paramNames {
|
||||
paramValues := params[paramName]
|
||||
sort.Strings(paramValues)
|
||||
|
||||
// Join the sorted key values separated by ','
|
||||
// Then prepend "keyName:"; then add this string to the buffer
|
||||
cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ","))
|
||||
}
|
||||
}
|
||||
return string(cr.Bytes()), nil
|
||||
}
|
||||
137
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_token.go
generated
vendored
Normal file
137
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_token.go
generated
vendored
Normal file
@@ -0,0 +1,137 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync/atomic"
|
||||
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// TokenRefresher represents a callback method that you write; this method is called periodically
|
||||
// so you can refresh the token credential's value.
|
||||
type TokenRefresher func(credential TokenCredential) time.Duration
|
||||
|
||||
// TokenCredential represents a token credential (which is also a pipeline.Factory).
|
||||
type TokenCredential interface {
|
||||
Credential
|
||||
Token() string
|
||||
SetToken(newToken string)
|
||||
}
|
||||
|
||||
// NewTokenCredential creates a token credential for use with role-based access control (RBAC) access to Azure Storage
|
||||
// resources. You initialize the TokenCredential with an initial token value. If you pass a non-nil value for
|
||||
// tokenRefresher, then the function you pass will be called immediately so it can refresh and change the
|
||||
// TokenCredential's token value by calling SetToken. Your tokenRefresher function must return a time.Duration
|
||||
// indicating how long the TokenCredential object should wait before calling your tokenRefresher function again.
|
||||
// If your tokenRefresher callback fails to refresh the token, you can return a duration of 0 to stop your
|
||||
// TokenCredential object from ever invoking tokenRefresher again. Also, oen way to deal with failing to refresh a
|
||||
// token is to cancel a context.Context object used by requests that have the TokenCredential object in their pipeline.
|
||||
func NewTokenCredential(initialToken string, tokenRefresher TokenRefresher) TokenCredential {
|
||||
tc := &tokenCredential{}
|
||||
tc.SetToken(initialToken) // We don't set it above to guarantee atomicity
|
||||
if tokenRefresher == nil {
|
||||
return tc // If no callback specified, return the simple tokenCredential
|
||||
}
|
||||
|
||||
tcwr := &tokenCredentialWithRefresh{token: tc}
|
||||
tcwr.token.startRefresh(tokenRefresher)
|
||||
runtime.SetFinalizer(tcwr, func(deadTC *tokenCredentialWithRefresh) {
|
||||
deadTC.token.stopRefresh()
|
||||
deadTC.token = nil // Sanity (not really required)
|
||||
})
|
||||
return tcwr
|
||||
}
|
||||
|
||||
// tokenCredentialWithRefresh is a wrapper over a token credential.
|
||||
// When this wrapper object gets GC'd, it stops the tokenCredential's timer
|
||||
// which allows the tokenCredential object to also be GC'd.
|
||||
type tokenCredentialWithRefresh struct {
|
||||
token *tokenCredential
|
||||
}
|
||||
|
||||
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
|
||||
func (*tokenCredentialWithRefresh) credentialMarker() {}
|
||||
|
||||
// Token returns the current token value
|
||||
func (f *tokenCredentialWithRefresh) Token() string { return f.token.Token() }
|
||||
|
||||
// SetToken changes the current token value
|
||||
func (f *tokenCredentialWithRefresh) SetToken(token string) { f.token.SetToken(token) }
|
||||
|
||||
// New satisfies pipeline.Factory's New method creating a pipeline policy object.
|
||||
func (f *tokenCredentialWithRefresh) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
|
||||
return f.token.New(next, po)
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// tokenCredential is a pipeline.Factory is the credential's policy factory.
|
||||
type tokenCredential struct {
|
||||
token atomic.Value
|
||||
|
||||
// The members below are only used if the user specified a tokenRefresher callback function.
|
||||
timer *time.Timer
|
||||
tokenRefresher TokenRefresher
|
||||
lock sync.Mutex
|
||||
stopped bool
|
||||
}
|
||||
|
||||
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
|
||||
func (*tokenCredential) credentialMarker() {}
|
||||
|
||||
// Token returns the current token value
|
||||
func (f *tokenCredential) Token() string { return f.token.Load().(string) }
|
||||
|
||||
// SetToken changes the current token value
|
||||
func (f *tokenCredential) SetToken(token string) { f.token.Store(token) }
|
||||
|
||||
// startRefresh calls refresh which immediately calls tokenRefresher
|
||||
// and then starts a timer to call tokenRefresher in the future.
|
||||
func (f *tokenCredential) startRefresh(tokenRefresher TokenRefresher) {
|
||||
f.tokenRefresher = tokenRefresher
|
||||
f.stopped = false // In case user calls StartRefresh, StopRefresh, & then StartRefresh again
|
||||
f.refresh()
|
||||
}
|
||||
|
||||
// refresh calls the user's tokenRefresher so they can refresh the token (by
|
||||
// calling SetToken) and then starts another time (based on the returned duration)
|
||||
// in order to refresh the token again in the future.
|
||||
func (f *tokenCredential) refresh() {
|
||||
d := f.tokenRefresher(f) // Invoke the user's refresh callback outside of the lock
|
||||
if d > 0 { // If duration is 0 or negative, refresher wants to not be called again
|
||||
f.lock.Lock()
|
||||
if !f.stopped {
|
||||
f.timer = time.AfterFunc(d, f.refresh)
|
||||
}
|
||||
f.lock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// stopRefresh stops any pending timer and sets stopped field to true to prevent
|
||||
// any new timer from starting.
|
||||
// NOTE: Stopping the timer allows the GC to destroy the tokenCredential object.
|
||||
func (f *tokenCredential) stopRefresh() {
|
||||
f.lock.Lock()
|
||||
f.stopped = true
|
||||
if f.timer != nil {
|
||||
f.timer.Stop()
|
||||
}
|
||||
f.lock.Unlock()
|
||||
}
|
||||
|
||||
// New satisfies pipeline.Factory's New method creating a pipeline policy object.
|
||||
func (f *tokenCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
|
||||
return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
if request.URL.Scheme != "https" {
|
||||
// HTTPS must be used, otherwise the tokens are at the risk of being exposed
|
||||
return nil, errors.New("token credentials require a URL using the https protocol scheme")
|
||||
}
|
||||
request.Header[headerAuthorization] = []string{"Bearer " + f.Token()}
|
||||
return next.Do(ctx, request)
|
||||
})
|
||||
}
|
||||
28
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_unix.go
generated
vendored
Normal file
28
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_unix.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
// +build linux darwin freebsd openbsd netbsd dragonfly solaris
|
||||
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type mmf []byte
|
||||
|
||||
func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) {
|
||||
prot, flags := unix.PROT_READ, unix.MAP_SHARED // Assume read-only
|
||||
if writable {
|
||||
prot, flags = unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED
|
||||
}
|
||||
addr, err := unix.Mmap(int(file.Fd()), offset, length, prot, flags)
|
||||
return mmf(addr), err
|
||||
}
|
||||
|
||||
func (m *mmf) unmap() {
|
||||
err := unix.Munmap(*m)
|
||||
*m = nil
|
||||
if err != nil {
|
||||
panic("if we are unable to unmap the memory-mapped file, there is serious concern for memory corruption")
|
||||
}
|
||||
}
|
||||
38
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_windows.go
generated
vendored
Normal file
38
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_windows.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type mmf []byte
|
||||
|
||||
func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) {
|
||||
prot, access := uint32(syscall.PAGE_READONLY), uint32(syscall.FILE_MAP_READ) // Assume read-only
|
||||
if writable {
|
||||
prot, access = uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE)
|
||||
}
|
||||
hMMF, errno := syscall.CreateFileMapping(syscall.Handle(file.Fd()), nil, prot, uint32(int64(length)>>32), uint32(int64(length)&0xffffffff), nil)
|
||||
if hMMF == 0 {
|
||||
return nil, os.NewSyscallError("CreateFileMapping", errno)
|
||||
}
|
||||
defer syscall.CloseHandle(hMMF)
|
||||
addr, errno := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length))
|
||||
m := mmf{}
|
||||
h := (*reflect.SliceHeader)(unsafe.Pointer(&m))
|
||||
h.Data = addr
|
||||
h.Len = length
|
||||
h.Cap = h.Len
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m *mmf) unmap() {
|
||||
addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0])))
|
||||
*m = mmf{}
|
||||
err := syscall.UnmapViewOfFile(addr)
|
||||
if err != nil {
|
||||
panic("if we are unable to unmap the memory-mapped file, there is serious concern for memory corruption")
|
||||
}
|
||||
}
|
||||
46
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go
generated
vendored
Normal file
46
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// PipelineOptions is used to configure a request policy pipeline's retry policy and logging.
|
||||
type PipelineOptions struct {
|
||||
// Log configures the pipeline's logging infrastructure indicating what information is logged and where.
|
||||
Log pipeline.LogOptions
|
||||
|
||||
// Retry configures the built-in retry policy behavior.
|
||||
Retry RetryOptions
|
||||
|
||||
// RequestLog configures the built-in request logging policy.
|
||||
RequestLog RequestLogOptions
|
||||
|
||||
// Telemetry configures the built-in telemetry policy behavior.
|
||||
Telemetry TelemetryOptions
|
||||
|
||||
// HTTPSender configures the sender of HTTP requests
|
||||
HTTPSender pipeline.Factory
|
||||
}
|
||||
|
||||
// NewPipeline creates a Pipeline using the specified credentials and options.
|
||||
func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline {
|
||||
// Closest to API goes first; closest to the wire goes last
|
||||
f := []pipeline.Factory{
|
||||
NewTelemetryPolicyFactory(o.Telemetry),
|
||||
NewUniqueRequestIDPolicyFactory(),
|
||||
NewRetryPolicyFactory(o.Retry),
|
||||
}
|
||||
|
||||
if _, ok := c.(*anonymousCredentialPolicyFactory); !ok {
|
||||
// For AnonymousCredential, we optimize out the policy factory since it doesn't do anything
|
||||
// NOTE: The credential's policy factory must appear close to the wire so it can sign any
|
||||
// changes made by other factories (like UniqueRequestIDPolicyFactory)
|
||||
f = append(f, c)
|
||||
}
|
||||
f = append(f,
|
||||
NewRequestLogPolicyFactory(o.RequestLog),
|
||||
pipeline.MethodFactoryMarker()) // indicates at what stage in the pipeline the method factory is invoked
|
||||
|
||||
|
||||
return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: o.HTTPSender, Log: o.Log})
|
||||
}
|
||||
182
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go
generated
vendored
Normal file
182
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go
generated
vendored
Normal file
@@ -0,0 +1,182 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// RequestLogOptions configures the retry policy's behavior.
|
||||
type RequestLogOptions struct {
|
||||
// LogWarningIfTryOverThreshold logs a warning if a tried operation takes longer than the specified
|
||||
// duration (-1=no logging; 0=default threshold).
|
||||
LogWarningIfTryOverThreshold time.Duration
|
||||
}
|
||||
|
||||
func (o RequestLogOptions) defaults() RequestLogOptions {
|
||||
if o.LogWarningIfTryOverThreshold == 0 {
|
||||
// It would be good to relate this to https://azure.microsoft.com/en-us/support/legal/sla/storage/v1_2/
|
||||
// But this monitors the time to get the HTTP response; NOT the time to download the response body.
|
||||
o.LogWarningIfTryOverThreshold = 3 * time.Second // Default to 3 seconds
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// NewRequestLogPolicyFactory creates a RequestLogPolicyFactory object configured using the specified options.
|
||||
func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory {
|
||||
o = o.defaults() // Force defaults to be calculated
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
||||
// These variables are per-policy; shared by multiple calls to Do
|
||||
var try int32
|
||||
operationStart := time.Now() // If this is the 1st try, record the operation state time
|
||||
return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) {
|
||||
try++ // The first try is #1 (not #0)
|
||||
|
||||
// Log the outgoing request as informational
|
||||
if po.ShouldLog(pipeline.LogInfo) {
|
||||
b := &bytes.Buffer{}
|
||||
fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", try)
|
||||
pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), nil, nil)
|
||||
po.Log(pipeline.LogInfo, b.String())
|
||||
}
|
||||
|
||||
// Set the time for this particular retry operation and then Do the operation.
|
||||
tryStart := time.Now()
|
||||
response, err = next.Do(ctx, request) // Make the request
|
||||
tryEnd := time.Now()
|
||||
tryDuration := tryEnd.Sub(tryStart)
|
||||
opDuration := tryEnd.Sub(operationStart)
|
||||
|
||||
logLevel, forceLog := pipeline.LogInfo, false // Default logging information
|
||||
|
||||
// If the response took too long, we'll upgrade to warning.
|
||||
if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold {
|
||||
// Log a warning if the try duration exceeded the specified threshold
|
||||
logLevel, forceLog = pipeline.LogWarning, true
|
||||
}
|
||||
|
||||
if err == nil { // We got a response from the service
|
||||
sc := response.Response().StatusCode
|
||||
if ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict && sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) {
|
||||
logLevel, forceLog = pipeline.LogError, true // Promote to Error any 4xx (except those listed is an error) or any 5xx
|
||||
} else {
|
||||
// For other status codes, we leave the level as is.
|
||||
}
|
||||
} else { // This error did not get an HTTP response from the service; upgrade the severity to Error
|
||||
logLevel, forceLog = pipeline.LogError, true
|
||||
}
|
||||
|
||||
if shouldLog := po.ShouldLog(logLevel); forceLog || shouldLog {
|
||||
// We're going to log this; build the string to log
|
||||
b := &bytes.Buffer{}
|
||||
slow := ""
|
||||
if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold {
|
||||
slow = fmt.Sprintf("[SLOW >%v]", o.LogWarningIfTryOverThreshold)
|
||||
}
|
||||
fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v%s, OpTime=%v) -- ", try, tryDuration, slow, opDuration)
|
||||
if err != nil { // This HTTP request did not get a response from the service
|
||||
fmt.Fprint(b, "REQUEST ERROR\n")
|
||||
} else {
|
||||
if logLevel == pipeline.LogError {
|
||||
fmt.Fprint(b, "RESPONSE STATUS CODE ERROR\n")
|
||||
} else {
|
||||
fmt.Fprint(b, "RESPONSE SUCCESSFULLY RECEIVED\n")
|
||||
}
|
||||
}
|
||||
|
||||
pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), response.Response(), err)
|
||||
if logLevel <= pipeline.LogError {
|
||||
b.Write(stack()) // For errors (or lower levels), we append the stack trace (an expensive operation)
|
||||
}
|
||||
msg := b.String()
|
||||
|
||||
if forceLog {
|
||||
pipeline.ForceLog(logLevel, msg)
|
||||
}
|
||||
if shouldLog {
|
||||
po.Log(logLevel, msg)
|
||||
}
|
||||
}
|
||||
return response, err
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// RedactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret.
|
||||
func RedactSigQueryParam(rawQuery string) (bool, string) {
|
||||
rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig=
|
||||
sigFound := strings.Contains(rawQuery, "?sig=")
|
||||
if !sigFound {
|
||||
sigFound = strings.Contains(rawQuery, "&sig=")
|
||||
if !sigFound {
|
||||
return sigFound, rawQuery // [?|&]sig= not found; return same rawQuery passed in (no memory allocation)
|
||||
}
|
||||
}
|
||||
// [?|&]sig= found, redact its value
|
||||
values, _ := url.ParseQuery(rawQuery)
|
||||
for name := range values {
|
||||
if strings.EqualFold(name, "sig") {
|
||||
values[name] = []string{"REDACTED"}
|
||||
}
|
||||
}
|
||||
return sigFound, values.Encode()
|
||||
}
|
||||
|
||||
func prepareRequestForLogging(request pipeline.Request) *http.Request {
|
||||
req := request
|
||||
if sigFound, rawQuery := RedactSigQueryParam(req.URL.RawQuery); sigFound {
|
||||
// Make copy so we don't destroy the query parameters we actually need to send in the request
|
||||
req = request.Copy()
|
||||
req.Request.URL.RawQuery = rawQuery
|
||||
}
|
||||
|
||||
return prepareRequestForServiceLogging(req)
|
||||
}
|
||||
|
||||
func stack() []byte {
|
||||
buf := make([]byte, 1024)
|
||||
for {
|
||||
n := runtime.Stack(buf, false)
|
||||
if n < len(buf) {
|
||||
return buf[:n]
|
||||
}
|
||||
buf = make([]byte, 2*len(buf))
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
// Redact phase useful for blob and file service only. For other services,
|
||||
// this method can directly return request.Request.
|
||||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
func prepareRequestForServiceLogging(request pipeline.Request) *http.Request {
|
||||
req := request
|
||||
if exist, key := doesHeaderExistCaseInsensitive(req.Header, xMsCopySourceHeader); exist {
|
||||
req = request.Copy()
|
||||
url, err := url.Parse(req.Header.Get(key))
|
||||
if err == nil {
|
||||
if sigFound, rawQuery := RedactSigQueryParam(url.RawQuery); sigFound {
|
||||
url.RawQuery = rawQuery
|
||||
req.Header.Set(xMsCopySourceHeader, url.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
return req.Request
|
||||
}
|
||||
|
||||
const xMsCopySourceHeader = "x-ms-copy-source"
|
||||
|
||||
func doesHeaderExistCaseInsensitive(header http.Header, key string) (bool, string) {
|
||||
for keyInHeader := range header {
|
||||
if strings.EqualFold(keyInHeader, key) {
|
||||
return true, keyInHeader
|
||||
}
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
414
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go
generated
vendored
Normal file
414
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go
generated
vendored
Normal file
@@ -0,0 +1,414 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// RetryPolicy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.
|
||||
type RetryPolicy int32
|
||||
|
||||
const (
|
||||
// RetryPolicyExponential tells the pipeline to use an exponential back-off retry policy
|
||||
RetryPolicyExponential RetryPolicy = 0
|
||||
|
||||
// RetryPolicyFixed tells the pipeline to use a fixed back-off retry policy
|
||||
RetryPolicyFixed RetryPolicy = 1
|
||||
)
|
||||
|
||||
// RetryOptions configures the retry policy's behavior.
|
||||
type RetryOptions struct {
|
||||
// Policy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.\
|
||||
// A value of zero means that you accept our default policy.
|
||||
Policy RetryPolicy
|
||||
|
||||
// MaxTries specifies the maximum number of attempts an operation will be tried before producing an error (0=default).
|
||||
// A value of zero means that you accept our default policy. A value of 1 means 1 try and no retries.
|
||||
MaxTries int32
|
||||
|
||||
// TryTimeout indicates the maximum time allowed for any single try of an HTTP request.
|
||||
// A value of zero means that you accept our default timeout. NOTE: When transferring large amounts
|
||||
// of data, the default TryTimeout will probably not be sufficient. You should override this value
|
||||
// based on the bandwidth available to the host machine and proximity to the Storage service. A good
|
||||
// starting point may be something like (60 seconds per MB of anticipated-payload-size).
|
||||
TryTimeout time.Duration
|
||||
|
||||
// RetryDelay specifies the amount of delay to use before retrying an operation (0=default).
|
||||
// When RetryPolicy is specified as RetryPolicyExponential, the delay increases exponentially
|
||||
// with each retry up to a maximum specified by MaxRetryDelay.
|
||||
// If you specify 0, then you must also specify 0 for MaxRetryDelay.
|
||||
// If you specify RetryDelay, then you must also specify MaxRetryDelay, and MaxRetryDelay should be
|
||||
// equal to or greater than RetryDelay.
|
||||
RetryDelay time.Duration
|
||||
|
||||
// MaxRetryDelay specifies the maximum delay allowed before retrying an operation (0=default).
|
||||
// If you specify 0, then you must also specify 0 for RetryDelay.
|
||||
MaxRetryDelay time.Duration
|
||||
|
||||
// RetryReadsFromSecondaryHost specifies whether the retry policy should retry a read operation against another host.
|
||||
// If RetryReadsFromSecondaryHost is "" (the default) then operations are not retried against another host.
|
||||
// NOTE: Before setting this field, make sure you understand the issues around reading stale & potentially-inconsistent
|
||||
// data at this webpage: https://docs.microsoft.com/en-us/azure/storage/common/storage-designing-ha-apps-with-ragrs
|
||||
RetryReadsFromSecondaryHost string // Comment this our for non-Blob SDKs
|
||||
}
|
||||
|
||||
func (o RetryOptions) retryReadsFromSecondaryHost() string {
|
||||
return o.RetryReadsFromSecondaryHost // This is for the Blob SDK only
|
||||
//return "" // This is for non-blob SDKs
|
||||
}
|
||||
|
||||
func (o RetryOptions) defaults() RetryOptions {
|
||||
// We assume the following:
|
||||
// 1. o.Policy should either be RetryPolicyExponential or RetryPolicyFixed
|
||||
// 2. o.MaxTries >= 0
|
||||
// 3. o.TryTimeout, o.RetryDelay, and o.MaxRetryDelay >=0
|
||||
// 4. o.RetryDelay <= o.MaxRetryDelay
|
||||
// 5. Both o.RetryDelay and o.MaxRetryDelay must be 0 or neither can be 0
|
||||
|
||||
IfDefault := func(current *time.Duration, desired time.Duration) {
|
||||
if *current == time.Duration(0) {
|
||||
*current = desired
|
||||
}
|
||||
}
|
||||
|
||||
// Set defaults if unspecified
|
||||
if o.MaxTries == 0 {
|
||||
o.MaxTries = 4
|
||||
}
|
||||
switch o.Policy {
|
||||
case RetryPolicyExponential:
|
||||
IfDefault(&o.TryTimeout, 1*time.Minute)
|
||||
IfDefault(&o.RetryDelay, 4*time.Second)
|
||||
IfDefault(&o.MaxRetryDelay, 120*time.Second)
|
||||
|
||||
case RetryPolicyFixed:
|
||||
IfDefault(&o.TryTimeout, 1*time.Minute)
|
||||
IfDefault(&o.RetryDelay, 30*time.Second)
|
||||
IfDefault(&o.MaxRetryDelay, 120*time.Second)
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o RetryOptions) calcDelay(try int32) time.Duration { // try is >=1; never 0
|
||||
pow := func(number int64, exponent int32) int64 { // pow is nested helper function
|
||||
var result int64 = 1
|
||||
for n := int32(0); n < exponent; n++ {
|
||||
result *= number
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
delay := time.Duration(0)
|
||||
switch o.Policy {
|
||||
case RetryPolicyExponential:
|
||||
delay = time.Duration(pow(2, try-1)-1) * o.RetryDelay
|
||||
|
||||
case RetryPolicyFixed:
|
||||
if try > 1 { // Any try after the 1st uses the fixed delay
|
||||
delay = o.RetryDelay
|
||||
}
|
||||
}
|
||||
|
||||
// Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3)
|
||||
// For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757
|
||||
delay = time.Duration(float32(delay) * (rand.Float32()/2 + 0.8)) // NOTE: We want math/rand; not crypto/rand
|
||||
if delay > o.MaxRetryDelay {
|
||||
delay = o.MaxRetryDelay
|
||||
}
|
||||
return delay
|
||||
}
|
||||
|
||||
// NewRetryPolicyFactory creates a RetryPolicyFactory object configured using the specified options.
|
||||
func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
|
||||
o = o.defaults() // Force defaults to be calculated
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
||||
return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) {
|
||||
// Before each try, we'll select either the primary or secondary URL.
|
||||
primaryTry := int32(0) // This indicates how many tries we've attempted against the primary DC
|
||||
|
||||
// We only consider retrying against a secondary if we have a read request (GET/HEAD) AND this policy has a Secondary URL it can use
|
||||
considerSecondary := (request.Method == http.MethodGet || request.Method == http.MethodHead) && o.retryReadsFromSecondaryHost() != ""
|
||||
|
||||
// Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2)
|
||||
// When to retry: connection failure or temporary/timeout. NOTE: StorageError considers HTTP 500/503 as temporary & is therefore retryable
|
||||
// If using a secondary:
|
||||
// Even tries go against primary; odd tries go against the secondary
|
||||
// For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2)
|
||||
// If secondary gets a 404, don't fail, retry but future retries are only against the primary
|
||||
// When retrying against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2))
|
||||
for try := int32(1); try <= o.MaxTries; try++ {
|
||||
logf("\n=====> Try=%d\n", try)
|
||||
|
||||
// Determine which endpoint to try. It's primary if there is no secondary or if it is an add # attempt.
|
||||
tryingPrimary := !considerSecondary || (try%2 == 1)
|
||||
// Select the correct host and delay
|
||||
if tryingPrimary {
|
||||
primaryTry++
|
||||
delay := o.calcDelay(primaryTry)
|
||||
logf("Primary try=%d, Delay=%v\n", primaryTry, delay)
|
||||
time.Sleep(delay) // The 1st try returns 0 delay
|
||||
} else {
|
||||
// For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757
|
||||
delay := time.Duration(float32(time.Second) * (rand.Float32()/2 + 0.8))
|
||||
logf("Secondary try=%d, Delay=%v\n", try-primaryTry, delay)
|
||||
time.Sleep(delay) // Delay with some jitter before trying secondary
|
||||
}
|
||||
|
||||
// Clone the original request to ensure that each try starts with the original (unmutated) request.
|
||||
requestCopy := request.Copy()
|
||||
|
||||
// For each try, seek to the beginning of the Body stream. We do this even for the 1st try because
|
||||
// the stream may not be at offset 0 when we first get it and we want the same behavior for the
|
||||
// 1st try as for additional tries.
|
||||
err = requestCopy.RewindBody()
|
||||
if err != nil {
|
||||
return nil, errors.New("we must be able to seek on the Body Stream, otherwise retries would cause data corruption")
|
||||
}
|
||||
|
||||
if !tryingPrimary {
|
||||
requestCopy.URL.Host = o.retryReadsFromSecondaryHost()
|
||||
requestCopy.Host = o.retryReadsFromSecondaryHost()
|
||||
}
|
||||
|
||||
// Set the server-side timeout query parameter "timeout=[seconds]"
|
||||
timeout := int32(o.TryTimeout.Seconds()) // Max seconds per try
|
||||
if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two
|
||||
t := int32(deadline.Sub(time.Now()).Seconds()) // Duration from now until user's ctx reaches its deadline
|
||||
logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", timeout, t)
|
||||
if t < timeout {
|
||||
timeout = t
|
||||
}
|
||||
if timeout < 0 {
|
||||
timeout = 0 // If timeout ever goes negative, set it to zero; this happen while debugging
|
||||
}
|
||||
logf("TryTimeout adjusted to=%d sec\n", timeout)
|
||||
}
|
||||
q := requestCopy.Request.URL.Query()
|
||||
q.Set("timeout", strconv.Itoa(int(timeout+1))) // Add 1 to "round up"
|
||||
requestCopy.Request.URL.RawQuery = q.Encode()
|
||||
logf("Url=%s\n", requestCopy.Request.URL.String())
|
||||
|
||||
// Set the time for this particular retry operation and then Do the operation.
|
||||
tryCtx, tryCancel := context.WithTimeout(ctx, time.Second*time.Duration(timeout))
|
||||
//requestCopy.Body = &deadlineExceededReadCloser{r: requestCopy.Request.Body}
|
||||
response, err = next.Do(tryCtx, requestCopy) // Make the request
|
||||
/*err = improveDeadlineExceeded(err)
|
||||
if err == nil {
|
||||
response.Response().Body = &deadlineExceededReadCloser{r: response.Response().Body}
|
||||
}*/
|
||||
logf("Err=%v, response=%v\n", err, response)
|
||||
|
||||
action := "" // This MUST get changed within the switch code below
|
||||
switch {
|
||||
case ctx.Err() != nil:
|
||||
action = "NoRetry: Op timeout"
|
||||
case !tryingPrimary && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusNotFound:
|
||||
// If attempt was against the secondary & it returned a StatusNotFound (404), then
|
||||
// the resource was not found. This may be due to replication delay. So, in this
|
||||
// case, we'll never try the secondary again for this operation.
|
||||
considerSecondary = false
|
||||
action = "Retry: Secondary URL returned 404"
|
||||
case err != nil:
|
||||
// NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation.
|
||||
// Use ServiceCode to verify if the error is related to storage service-side,
|
||||
// ServiceCode is set only when error related to storage service happened.
|
||||
if stErr, ok := err.(StorageError); ok {
|
||||
if stErr.Temporary() {
|
||||
action = "Retry: StorageError with error service code and Temporary()"
|
||||
} else if stErr.Response() != nil && isSuccessStatusCode(stErr.Response()) { // TODO: This is a temporarily work around, remove this after protocol layer fix the issue that net.Error is wrapped as storageError
|
||||
action = "Retry: StorageError with success status code"
|
||||
} else {
|
||||
action = "NoRetry: StorageError not Temporary() and without retriable status code"
|
||||
}
|
||||
} else if netErr, ok := err.(net.Error); ok {
|
||||
// Use non-retriable net.Error list, but not retriable list.
|
||||
// As there are errors without Temporary() implementation,
|
||||
// while need be retried, like 'connection reset by peer', 'transport connection broken' and etc.
|
||||
// So the SDK do retry for most of the case, unless the error should not be retried for sure.
|
||||
if !isNotRetriable(netErr) {
|
||||
action = "Retry: net.Error and not in the non-retriable list"
|
||||
} else {
|
||||
action = "NoRetry: net.Error and in the non-retriable list"
|
||||
}
|
||||
} else if err == io.ErrUnexpectedEOF {
|
||||
action = "Retry: unexpected EOF"
|
||||
} else {
|
||||
action = "NoRetry: unrecognized error"
|
||||
}
|
||||
default:
|
||||
action = "NoRetry: successful HTTP request" // no error
|
||||
}
|
||||
|
||||
logf("Action=%s\n", action)
|
||||
// fmt.Println(action + "\n") // This is where we could log the retry operation; action is why we're retrying
|
||||
if action[0] != 'R' { // Retry only if action starts with 'R'
|
||||
if err != nil {
|
||||
tryCancel() // If we're returning an error, cancel this current/last per-retry timeout context
|
||||
} else {
|
||||
// We wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper.
|
||||
// So, when the user closes the Body, the our per-try context gets closed too.
|
||||
// Another option, is that the Last Policy do this wrapping for a per-retry context (not for the user's context)
|
||||
if response == nil || response.Response() == nil {
|
||||
// We do panic in the case response or response.Response() is nil,
|
||||
// as for client, the response should not be nil if request is sent and the operations is executed successfully.
|
||||
// Another option, is that execute the cancel function when response or response.Response() is nil,
|
||||
// as in this case, current per-try has nothing to do in future.
|
||||
return nil, errors.New("invalid state, response should not be nil when the operation is executed successfully")
|
||||
}
|
||||
response.Response().Body = &contextCancelReadCloser{cf: tryCancel, body: response.Response().Body}
|
||||
}
|
||||
break // Don't retry
|
||||
}
|
||||
if response != nil && response.Response() != nil && response.Response().Body != nil {
|
||||
// If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection
|
||||
body := response.Response().Body
|
||||
io.Copy(ioutil.Discard, body)
|
||||
body.Close()
|
||||
}
|
||||
// If retrying, cancel the current per-try timeout context
|
||||
tryCancel()
|
||||
}
|
||||
return response, err // Not retryable or too many retries; return the last response/error
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// contextCancelReadCloser helps to invoke context's cancelFunc properly when the ReadCloser is closed.
|
||||
type contextCancelReadCloser struct {
|
||||
cf context.CancelFunc
|
||||
body io.ReadCloser
|
||||
}
|
||||
|
||||
func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) {
|
||||
return rc.body.Read(p)
|
||||
}
|
||||
|
||||
func (rc *contextCancelReadCloser) Close() error {
|
||||
err := rc.body.Close()
|
||||
if rc.cf != nil {
|
||||
rc.cf()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// isNotRetriable checks if the provided net.Error isn't retriable.
|
||||
func isNotRetriable(errToParse net.Error) bool {
|
||||
// No error, so this is NOT retriable.
|
||||
if errToParse == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// The error is either temporary or a timeout so it IS retriable (not not retriable).
|
||||
if errToParse.Temporary() || errToParse.Timeout() {
|
||||
return false
|
||||
}
|
||||
|
||||
genericErr := error(errToParse)
|
||||
|
||||
// From here all the error are neither Temporary() nor Timeout().
|
||||
switch err := errToParse.(type) {
|
||||
case *net.OpError:
|
||||
// The net.Error is also a net.OpError but the inner error is nil, so this is not retriable.
|
||||
if err.Err == nil {
|
||||
return true
|
||||
}
|
||||
genericErr = err.Err
|
||||
}
|
||||
|
||||
switch genericErr.(type) {
|
||||
case *net.AddrError, net.UnknownNetworkError, *net.DNSError, net.InvalidAddrError, *net.ParseError, *net.DNSConfigError:
|
||||
// If the error is one of the ones listed, then it is NOT retriable.
|
||||
return true
|
||||
}
|
||||
|
||||
// If it's invalid header field name/value error thrown by http module, then it is NOT retriable.
|
||||
// This could happen when metadata's key or value is invalid. (RoundTrip in transport.go)
|
||||
if strings.Contains(genericErr.Error(), "invalid header field") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Assume the error is retriable.
|
||||
return false
|
||||
}
|
||||
|
||||
var successStatusCodes = []int{http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent, http.StatusPartialContent}
|
||||
|
||||
func isSuccessStatusCode(resp *http.Response) bool {
|
||||
if resp == nil {
|
||||
return false
|
||||
}
|
||||
for _, i := range successStatusCodes {
|
||||
if i == resp.StatusCode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// According to https://github.com/golang/go/wiki/CompilerOptimizations, the compiler will inline this method and hopefully optimize all calls to it away
|
||||
var logf = func(format string, a ...interface{}) {}
|
||||
|
||||
// Use this version to see the retry method's code path (import "fmt")
|
||||
//var logf = fmt.Printf
|
||||
|
||||
/*
|
||||
type deadlineExceededReadCloser struct {
|
||||
r io.ReadCloser
|
||||
}
|
||||
|
||||
func (r *deadlineExceededReadCloser) Read(p []byte) (int, error) {
|
||||
n, err := 0, io.EOF
|
||||
if r.r != nil {
|
||||
n, err = r.r.Read(p)
|
||||
}
|
||||
return n, improveDeadlineExceeded(err)
|
||||
}
|
||||
func (r *deadlineExceededReadCloser) Seek(offset int64, whence int) (int64, error) {
|
||||
// For an HTTP request, the ReadCloser MUST also implement seek
|
||||
// For an HTTP response, Seek MUST not be called (or this will panic)
|
||||
o, err := r.r.(io.Seeker).Seek(offset, whence)
|
||||
return o, improveDeadlineExceeded(err)
|
||||
}
|
||||
func (r *deadlineExceededReadCloser) Close() error {
|
||||
if c, ok := r.r.(io.Closer); ok {
|
||||
c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// timeoutError is the internal struct that implements our richer timeout error.
|
||||
type deadlineExceeded struct {
|
||||
responseError
|
||||
}
|
||||
|
||||
var _ net.Error = (*deadlineExceeded)(nil) // Ensure deadlineExceeded implements the net.Error interface at compile time
|
||||
|
||||
// improveDeadlineExceeded creates a timeoutError object that implements the error interface IF cause is a context.DeadlineExceeded error.
|
||||
func improveDeadlineExceeded(cause error) error {
|
||||
// If cause is not DeadlineExceeded, return the same error passed in.
|
||||
if cause != context.DeadlineExceeded {
|
||||
return cause
|
||||
}
|
||||
// Else, convert DeadlineExceeded to our timeoutError which gives a richer string message
|
||||
return &deadlineExceeded{
|
||||
responseError: responseError{
|
||||
ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Error implements the error interface's Error method to return a string representation of the error.
|
||||
func (e *deadlineExceeded) Error() string {
|
||||
return e.ErrorNode.Error("context deadline exceeded; when creating a pipeline, consider increasing RetryOptions' TryTimeout field")
|
||||
}
|
||||
*/
|
||||
51
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_telemetry.go
generated
vendored
Normal file
51
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_telemetry.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// TelemetryOptions configures the telemetry policy's behavior.
|
||||
type TelemetryOptions struct {
|
||||
// Value is a string prepended to each request's User-Agent and sent to the service.
|
||||
// The service records the user-agent in logs for diagnostics and tracking of client requests.
|
||||
Value string
|
||||
}
|
||||
|
||||
// NewTelemetryPolicyFactory creates a factory that can create telemetry policy objects
|
||||
// which add telemetry information to outgoing HTTP requests.
|
||||
func NewTelemetryPolicyFactory(o TelemetryOptions) pipeline.Factory {
|
||||
b := &bytes.Buffer{}
|
||||
b.WriteString(o.Value)
|
||||
if b.Len() > 0 {
|
||||
b.WriteRune(' ')
|
||||
}
|
||||
fmt.Fprintf(b, "Azure-Storage/%s %s", serviceLibVersion, platformInfo)
|
||||
telemetryValue := b.String()
|
||||
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
||||
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
request.Header.Set("User-Agent", telemetryValue)
|
||||
return next.Do(ctx, request)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// NOTE: the ONLY function that should write to this variable is this func
|
||||
var platformInfo = func() string {
|
||||
// Azure-Storage/version (runtime; os type and version)”
|
||||
// Azure-Storage/1.4.0 (NODE-VERSION v4.5.0; Windows_NT 10.0.14393)'
|
||||
operatingSystem := runtime.GOOS // Default OS string
|
||||
switch operatingSystem {
|
||||
case "windows":
|
||||
operatingSystem = os.Getenv("OS") // Get more specific OS information
|
||||
case "linux": // accept default OS info
|
||||
case "freebsd": // accept default OS info
|
||||
}
|
||||
return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem)
|
||||
}()
|
||||
24
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go
generated
vendored
Normal file
24
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// NewUniqueRequestIDPolicyFactory creates a UniqueRequestIDPolicyFactory object
|
||||
// that sets the request's x-ms-client-request-id header if it doesn't already exist.
|
||||
func NewUniqueRequestIDPolicyFactory() pipeline.Factory {
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
||||
// This is Policy's Do method:
|
||||
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
id := request.Header.Get(xMsClientRequestID)
|
||||
if id == "" { // Add a unique request ID if the caller didn't specify one already
|
||||
request.Header.Set(xMsClientRequestID, newUUID().String())
|
||||
}
|
||||
return next.Do(ctx, request)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
const xMsClientRequestID = "x-ms-client-request-id"
|
||||
184
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go
generated
vendored
Normal file
184
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go
generated
vendored
Normal file
@@ -0,0 +1,184 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const CountToEnd = 0
|
||||
|
||||
// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation.
|
||||
type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error)
|
||||
|
||||
// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters
|
||||
// that should be used to make an HTTP GET request.
|
||||
type HTTPGetterInfo struct {
|
||||
// Offset specifies the start offset that should be used when
|
||||
// creating the HTTP GET request's Range header
|
||||
Offset int64
|
||||
|
||||
// Count specifies the count of bytes that should be used to calculate
|
||||
// the end offset when creating the HTTP GET request's Range header
|
||||
Count int64
|
||||
|
||||
// ETag specifies the resource's etag that should be used when creating
|
||||
// the HTTP GET request's If-Match header
|
||||
ETag ETag
|
||||
}
|
||||
|
||||
// FailedReadNotifier is a function type that represents the notification function called when a read fails
|
||||
type FailedReadNotifier func(failureCount int, lastError error, offset int64, count int64, willRetry bool)
|
||||
|
||||
// RetryReaderOptions contains properties which can help to decide when to do retry.
|
||||
type RetryReaderOptions struct {
|
||||
// MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made
|
||||
// while reading from a RetryReader. A value of zero means that no additional HTTP
|
||||
// GET requests will be made.
|
||||
MaxRetryRequests int
|
||||
doInjectError bool
|
||||
doInjectErrorRound int
|
||||
injectedError error
|
||||
|
||||
// NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging.
|
||||
NotifyFailedRead FailedReadNotifier
|
||||
|
||||
// TreatEarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default,
|
||||
// retryReader has the following special behaviour: closing the response body before it is all read is treated as a
|
||||
// retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the =
|
||||
// read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If
|
||||
// TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead
|
||||
// treated as a fatal (non-retryable) error.
|
||||
// Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens
|
||||
// from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors
|
||||
// which will be retried.
|
||||
TreatEarlyCloseAsError bool
|
||||
}
|
||||
|
||||
// retryReader implements io.ReaderCloser methods.
|
||||
// retryReader tries to read from response, and if there is retriable network error
|
||||
// returned during reading, it will retry according to retry reader option through executing
|
||||
// user defined action with provided data to get a new response, and continue the overall reading process
|
||||
// through reading from the new response.
|
||||
type retryReader struct {
|
||||
ctx context.Context
|
||||
info HTTPGetterInfo
|
||||
countWasBounded bool
|
||||
o RetryReaderOptions
|
||||
getter HTTPGetter
|
||||
|
||||
// we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response
|
||||
responseMu *sync.Mutex
|
||||
response *http.Response
|
||||
}
|
||||
|
||||
// NewRetryReader creates a retry reader.
|
||||
func NewRetryReader(ctx context.Context, initialResponse *http.Response,
|
||||
info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser {
|
||||
return &retryReader{
|
||||
ctx: ctx,
|
||||
getter: getter,
|
||||
info: info,
|
||||
countWasBounded: info.Count != CountToEnd,
|
||||
response: initialResponse,
|
||||
responseMu: &sync.Mutex{},
|
||||
o: o}
|
||||
}
|
||||
|
||||
func (s *retryReader) setResponse(r *http.Response) {
|
||||
s.responseMu.Lock()
|
||||
defer s.responseMu.Unlock()
|
||||
s.response = r
|
||||
}
|
||||
|
||||
func (s *retryReader) Read(p []byte) (n int, err error) {
|
||||
for try := 0; ; try++ {
|
||||
//fmt.Println(try) // Comment out for debugging.
|
||||
if s.countWasBounded && s.info.Count == CountToEnd {
|
||||
// User specified an original count and the remaining bytes are 0, return 0, EOF
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
s.responseMu.Lock()
|
||||
resp := s.response
|
||||
s.responseMu.Unlock()
|
||||
if resp == nil { // We don't have a response stream to read from, try to get one.
|
||||
newResponse, err := s.getter(s.ctx, s.info)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Successful GET; this is the network stream we'll read from.
|
||||
s.setResponse(newResponse)
|
||||
resp = newResponse
|
||||
}
|
||||
n, err := resp.Body.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running)
|
||||
|
||||
// Injection mechanism for testing.
|
||||
if s.o.doInjectError && try == s.o.doInjectErrorRound {
|
||||
if s.o.injectedError != nil {
|
||||
err = s.o.injectedError
|
||||
} else {
|
||||
err = &net.DNSError{IsTemporary: true}
|
||||
}
|
||||
}
|
||||
|
||||
// We successfully read data or end EOF.
|
||||
if err == nil || err == io.EOF {
|
||||
s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future
|
||||
if s.info.Count != CountToEnd {
|
||||
s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future
|
||||
}
|
||||
return n, err // Return the return to the caller
|
||||
}
|
||||
s.Close() // Error, close stream
|
||||
s.setResponse(nil) // Our stream is no longer good
|
||||
|
||||
// Check the retry count and error code, and decide whether to retry.
|
||||
retriesExhausted := try >= s.o.MaxRetryRequests
|
||||
_, isNetError := err.(net.Error)
|
||||
isUnexpectedEOF := err == io.ErrUnexpectedEOF
|
||||
willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted
|
||||
|
||||
// Notify, for logging purposes, of any failures
|
||||
if s.o.NotifyFailedRead != nil {
|
||||
failureCount := try + 1 // because try is zero-based
|
||||
s.o.NotifyFailedRead(failureCount, err, s.info.Offset, s.info.Count, willRetry)
|
||||
}
|
||||
|
||||
if willRetry {
|
||||
continue
|
||||
// Loop around and try to get and read from new stream.
|
||||
}
|
||||
return n, err // Not retryable, or retries exhausted, so just return
|
||||
}
|
||||
}
|
||||
|
||||
// By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry
|
||||
// Is this safe, to close early from another goroutine? Early close ultimately ends up calling
|
||||
// net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors"
|
||||
// which is exactly the behaviour we want.
|
||||
// NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read)
|
||||
// then there are two different types of error that may happen - either the one one we check for here,
|
||||
// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine
|
||||
// to check for one, since the other is a net.Error, which our main Read retry loop is already handing.
|
||||
func (s *retryReader) wasRetryableEarlyClose(err error) bool {
|
||||
if s.o.TreatEarlyCloseAsError {
|
||||
return false // user wants all early closes to be errors, and so not retryable
|
||||
}
|
||||
// unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text
|
||||
return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage)
|
||||
}
|
||||
|
||||
const ReadOnClosedBodyMessage = "read on closed response body"
|
||||
|
||||
func (s *retryReader) Close() error {
|
||||
s.responseMu.Lock()
|
||||
defer s.responseMu.Unlock()
|
||||
if s.response != nil && s.response.Body != nil {
|
||||
return s.response.Body.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
219
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go
generated
vendored
Normal file
219
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go
generated
vendored
Normal file
@@ -0,0 +1,219 @@
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas
|
||||
type AccountSASSignatureValues struct {
|
||||
Version string `param:"sv"` // If not specified, this defaults to SASVersion
|
||||
Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants
|
||||
StartTime time.Time `param:"st"` // Not specified if IsZero
|
||||
ExpiryTime time.Time `param:"se"` // Not specified if IsZero
|
||||
Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String()
|
||||
IPRange IPRange `param:"sip"`
|
||||
Services string `param:"ss"` // Create by initializing AccountSASServices and then call String()
|
||||
ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String()
|
||||
}
|
||||
|
||||
// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce
|
||||
// the proper SAS query parameters.
|
||||
func (v AccountSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) {
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS
|
||||
if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" {
|
||||
return SASQueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType")
|
||||
}
|
||||
if v.Version == "" {
|
||||
v.Version = SASVersion
|
||||
}
|
||||
perms := &AccountSASPermissions{}
|
||||
if err := perms.Parse(v.Permissions); err != nil {
|
||||
return SASQueryParameters{}, err
|
||||
}
|
||||
v.Permissions = perms.String()
|
||||
|
||||
startTime, expiryTime, _ := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, time.Time{})
|
||||
|
||||
stringToSign := strings.Join([]string{
|
||||
sharedKeyCredential.AccountName(),
|
||||
v.Permissions,
|
||||
v.Services,
|
||||
v.ResourceTypes,
|
||||
startTime,
|
||||
expiryTime,
|
||||
v.IPRange.String(),
|
||||
string(v.Protocol),
|
||||
v.Version,
|
||||
""}, // That right, the account SAS requires a terminating extra newline
|
||||
"\n")
|
||||
|
||||
signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign)
|
||||
p := SASQueryParameters{
|
||||
// Common SAS parameters
|
||||
version: v.Version,
|
||||
protocol: v.Protocol,
|
||||
startTime: v.StartTime,
|
||||
expiryTime: v.ExpiryTime,
|
||||
permissions: v.Permissions,
|
||||
ipRange: v.IPRange,
|
||||
|
||||
// Account-specific SAS parameters
|
||||
services: v.Services,
|
||||
resourceTypes: v.ResourceTypes,
|
||||
|
||||
// Calculated SAS signature
|
||||
signature: signature,
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// The AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS.
|
||||
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field.
|
||||
type AccountSASPermissions struct {
|
||||
Read, Write, Delete, List, Add, Create, Update, Process bool
|
||||
}
|
||||
|
||||
// String produces the SAS permissions string for an Azure Storage account.
|
||||
// Call this method to set AccountSASSignatureValues's Permissions field.
|
||||
func (p AccountSASPermissions) String() string {
|
||||
var buffer bytes.Buffer
|
||||
if p.Read {
|
||||
buffer.WriteRune('r')
|
||||
}
|
||||
if p.Write {
|
||||
buffer.WriteRune('w')
|
||||
}
|
||||
if p.Delete {
|
||||
buffer.WriteRune('d')
|
||||
}
|
||||
if p.List {
|
||||
buffer.WriteRune('l')
|
||||
}
|
||||
if p.Add {
|
||||
buffer.WriteRune('a')
|
||||
}
|
||||
if p.Create {
|
||||
buffer.WriteRune('c')
|
||||
}
|
||||
if p.Update {
|
||||
buffer.WriteRune('u')
|
||||
}
|
||||
if p.Process {
|
||||
buffer.WriteRune('p')
|
||||
}
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
// Parse initializes the AccountSASPermissions's fields from a string.
|
||||
func (p *AccountSASPermissions) Parse(s string) error {
|
||||
*p = AccountSASPermissions{} // Clear out the flags
|
||||
for _, r := range s {
|
||||
switch r {
|
||||
case 'r':
|
||||
p.Read = true
|
||||
case 'w':
|
||||
p.Write = true
|
||||
case 'd':
|
||||
p.Delete = true
|
||||
case 'l':
|
||||
p.List = true
|
||||
case 'a':
|
||||
p.Add = true
|
||||
case 'c':
|
||||
p.Create = true
|
||||
case 'u':
|
||||
p.Update = true
|
||||
case 'p':
|
||||
p.Process = true
|
||||
default:
|
||||
return fmt.Errorf("Invalid permission character: '%v'", r)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS.
|
||||
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field.
|
||||
type AccountSASServices struct {
|
||||
Blob, Queue, File bool
|
||||
}
|
||||
|
||||
// String produces the SAS services string for an Azure Storage account.
|
||||
// Call this method to set AccountSASSignatureValues's Services field.
|
||||
func (s AccountSASServices) String() string {
|
||||
var buffer bytes.Buffer
|
||||
if s.Blob {
|
||||
buffer.WriteRune('b')
|
||||
}
|
||||
if s.Queue {
|
||||
buffer.WriteRune('q')
|
||||
}
|
||||
if s.File {
|
||||
buffer.WriteRune('f')
|
||||
}
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
// Parse initializes the AccountSASServices' fields from a string.
|
||||
func (a *AccountSASServices) Parse(s string) error {
|
||||
*a = AccountSASServices{} // Clear out the flags
|
||||
for _, r := range s {
|
||||
switch r {
|
||||
case 'b':
|
||||
a.Blob = true
|
||||
case 'q':
|
||||
a.Queue = true
|
||||
case 'f':
|
||||
a.File = true
|
||||
default:
|
||||
return fmt.Errorf("Invalid service character: '%v'", r)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS.
|
||||
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field.
|
||||
type AccountSASResourceTypes struct {
|
||||
Service, Container, Object bool
|
||||
}
|
||||
|
||||
// String produces the SAS resource types string for an Azure Storage account.
|
||||
// Call this method to set AccountSASSignatureValues's ResourceTypes field.
|
||||
func (rt AccountSASResourceTypes) String() string {
|
||||
var buffer bytes.Buffer
|
||||
if rt.Service {
|
||||
buffer.WriteRune('s')
|
||||
}
|
||||
if rt.Container {
|
||||
buffer.WriteRune('c')
|
||||
}
|
||||
if rt.Object {
|
||||
buffer.WriteRune('o')
|
||||
}
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
// Parse initializes the AccountSASResourceType's fields from a string.
|
||||
func (rt *AccountSASResourceTypes) Parse(s string) error {
|
||||
*rt = AccountSASResourceTypes{} // Clear out the flags
|
||||
for _, r := range s {
|
||||
switch r {
|
||||
case 's':
|
||||
rt.Service = true
|
||||
case 'c':
|
||||
rt.Container = true
|
||||
case 'o':
|
||||
rt.Object = true
|
||||
default:
|
||||
return fmt.Errorf("Invalid resource type: '%v'", r)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user