mirror of
https://github.com/rclone/rclone.git
synced 2025-12-27 21:53:27 +00:00
Compare commits
30 Commits
v1.42
...
sandeepkru
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e24fe27153 | ||
|
|
2cb79cb43d | ||
|
|
4c2fbf9b36 | ||
|
|
ed4f1b2936 | ||
|
|
144c1a04d4 | ||
|
|
25ec7f5c00 | ||
|
|
b15603d5ea | ||
|
|
71c974bf9a | ||
|
|
03c5b8232e | ||
|
|
72392a2d72 | ||
|
|
b062ae9d13 | ||
|
|
8c0335a176 | ||
|
|
794e55de27 | ||
|
|
038ed1aaf0 | ||
|
|
97beff5370 | ||
|
|
b9b9bce0db | ||
|
|
947e10eb2b | ||
|
|
6b42421374 | ||
|
|
fa051ff970 | ||
|
|
69164b3dda | ||
|
|
935533e57f | ||
|
|
1550f70865 | ||
|
|
1a65c3a740 | ||
|
|
a29a1de43d | ||
|
|
e7ae5e8ee0 | ||
|
|
56e1e82005 | ||
|
|
8442498693 | ||
|
|
08021c4636 | ||
|
|
3f0789e2db | ||
|
|
7110349547 |
@@ -47,4 +47,4 @@ deploy:
|
||||
on:
|
||||
all_branches: true
|
||||
go: "1.10.1"
|
||||
condition: $TRAVIS_OS_NAME == linux && $TRAVIS_PULL_REQUEST == false
|
||||
condition: $TRAVIS_PULL_REQUEST == false
|
||||
|
||||
107
Gopkg.lock
generated
107
Gopkg.lock
generated
@@ -14,28 +14,20 @@
|
||||
[[projects]]
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata"]
|
||||
revision = "29f476ffa9c4cd4fd14336b6043090ac1ad76733"
|
||||
version = "v0.21.0"
|
||||
revision = "0fd7230b2a7505833d5f69b75cbd6c9582401479"
|
||||
version = "v0.23.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = [
|
||||
"storage",
|
||||
"version"
|
||||
]
|
||||
revision = "4650843026a7fdec254a8d9cf893693a254edd0b"
|
||||
version = "v16.2.1"
|
||||
name = "github.com/Azure/azure-pipeline-go"
|
||||
packages = ["pipeline"]
|
||||
revision = "7571e8eb0876932ab505918ff7ed5107773e5ee2"
|
||||
version = "0.1.7"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
packages = [
|
||||
"autorest",
|
||||
"autorest/adal",
|
||||
"autorest/azure",
|
||||
"autorest/date"
|
||||
]
|
||||
revision = "eaa7994b2278094c904d31993d26f56324db3052"
|
||||
version = "v10.8.1"
|
||||
name = "github.com/Azure/azure-storage-blob-go"
|
||||
packages = ["2017-07-29/azblob"]
|
||||
revision = "66ba96e49ebbdc3cd26970c6c675c906d304b5e2"
|
||||
version = "0.1.4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -74,6 +66,7 @@
|
||||
"aws/credentials/ec2rolecreds",
|
||||
"aws/credentials/endpointcreds",
|
||||
"aws/credentials/stscreds",
|
||||
"aws/csm",
|
||||
"aws/defaults",
|
||||
"aws/ec2metadata",
|
||||
"aws/endpoints",
|
||||
@@ -84,6 +77,8 @@
|
||||
"internal/sdkrand",
|
||||
"internal/shareddefaults",
|
||||
"private/protocol",
|
||||
"private/protocol/eventstream",
|
||||
"private/protocol/eventstream/eventstreamapi",
|
||||
"private/protocol/query",
|
||||
"private/protocol/query/queryutil",
|
||||
"private/protocol/rest",
|
||||
@@ -94,8 +89,8 @@
|
||||
"service/s3/s3manager",
|
||||
"service/sts"
|
||||
]
|
||||
revision = "4f5d298bd2dcb34b06d944594f458d1f77ac4d66"
|
||||
version = "v1.13.42"
|
||||
revision = "bfc1a07cf158c30c41a3eefba8aae043d0bb5bff"
|
||||
version = "v1.14.8"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/billziss-gh/cgofuse"
|
||||
@@ -121,12 +116,6 @@
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
|
||||
version = "v3.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/djherbis/times"
|
||||
packages = ["."]
|
||||
@@ -154,8 +143,8 @@
|
||||
[[projects]]
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "6529cf7c58879c08d927016dde4477f18a0634cb"
|
||||
version = "v1.36.0"
|
||||
revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5"
|
||||
version = "v1.37.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/golang/protobuf"
|
||||
@@ -193,16 +182,10 @@
|
||||
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kr/fs"
|
||||
packages = ["."]
|
||||
revision = "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/marstr/guid"
|
||||
packages = ["."]
|
||||
revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f"
|
||||
version = "v1.1.0"
|
||||
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/mattn/go-runewidth"
|
||||
@@ -217,16 +200,16 @@
|
||||
revision = "887eb06ab6a255fbf5744b5812788e884078620a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/swift"
|
||||
packages = ["."]
|
||||
revision = "b2a7479cf26fa841ff90dd932d0221cb5c50782d"
|
||||
version = "v1.0.39"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/nsf/termbox-go"
|
||||
packages = ["."]
|
||||
revision = "5a49b82160547cc98fca189a677a1c14eff796f8"
|
||||
revision = "5c94acc5e6eb520f1bcd183974e01171cc4c23b3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -249,8 +232,8 @@
|
||||
"log",
|
||||
"reopen"
|
||||
]
|
||||
revision = "b98065a377794d577e2a0e32869378b9ce4b8952"
|
||||
version = "v0.1.1"
|
||||
revision = "807ee759d82c84982a89fb3dc875ef884942f1e5"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/errors"
|
||||
@@ -261,8 +244,8 @@
|
||||
[[projects]]
|
||||
name = "github.com/pkg/sftp"
|
||||
packages = ["."]
|
||||
revision = "5bf2a174b604c6b5549dd9740d924ff2f02e3ad7"
|
||||
version = "1.6.0"
|
||||
revision = "57673e38ea946592a59c26592b7e6fbda646975b"
|
||||
version = "1.8.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
@@ -282,17 +265,11 @@
|
||||
revision = "55d61fa8aa702f59229e6cff85793c22e580eaf5"
|
||||
version = "v1.5.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
packages = ["."]
|
||||
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/sevlyar/go-daemon"
|
||||
packages = ["."]
|
||||
revision = "45a2ba1b7c6710a044163fa109bf08d060bc3afa"
|
||||
revision = "f9261e73885de99b1647d68bedadf2b9a99ad11f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -306,8 +283,8 @@
|
||||
".",
|
||||
"doc"
|
||||
]
|
||||
revision = "a1f051bc3eba734da4772d60e2d677f47cf93ef4"
|
||||
version = "v0.0.2"
|
||||
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
|
||||
version = "v0.0.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/pflag"
|
||||
@@ -321,14 +298,14 @@
|
||||
"assert",
|
||||
"require"
|
||||
]
|
||||
revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71"
|
||||
version = "v1.2.1"
|
||||
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
|
||||
version = "v1.2.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/t3rm1n4l/go-mega"
|
||||
packages = ["."]
|
||||
revision = "3ba49835f4db01d6329782cbdc7a0a8bb3a26c5f"
|
||||
revision = "57978a63bd3f91fa7e188b751a7e7e6dd4e33813"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -351,8 +328,8 @@
|
||||
"service",
|
||||
"utils"
|
||||
]
|
||||
revision = "9e88dc1b83728e1462fd74bb61b0f5e28ac95bb6"
|
||||
version = "v2.2.12"
|
||||
revision = "4f9ac88c5fec7350e960aabd0de1f1ede0ad2895"
|
||||
version = "v2.2.14"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -364,6 +341,7 @@
|
||||
"ed25519",
|
||||
"ed25519/internal/edwards25519",
|
||||
"internal/chacha20",
|
||||
"internal/subtle",
|
||||
"nacl/secretbox",
|
||||
"pbkdf2",
|
||||
"poly1305",
|
||||
@@ -373,7 +351,7 @@
|
||||
"ssh/agent",
|
||||
"ssh/terminal"
|
||||
]
|
||||
revision = "4ec37c66abab2c7e02ae775328b2ff001c3f025a"
|
||||
revision = "027cca12c2d63e3d62b670d901e8a2c95854feec"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -387,13 +365,12 @@
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"lex/httplex",
|
||||
"publicsuffix",
|
||||
"webdav",
|
||||
"webdav/internal/xml",
|
||||
"websocket"
|
||||
]
|
||||
revision = "640f4622ab692b87c2f3a94265e6f579fe38263d"
|
||||
revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -405,7 +382,7 @@
|
||||
"jws",
|
||||
"jwt"
|
||||
]
|
||||
revision = "cdc340f7c179dbbfa4afd43b7614e8fcadde4269"
|
||||
revision = "1e0a3fa8ba9a5c9eb35c271780101fdaf1b205d7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -414,7 +391,7 @@
|
||||
"unix",
|
||||
"windows"
|
||||
]
|
||||
revision = "6f686a352de66814cdd080d970febae7767857a3"
|
||||
revision = "6c888cc515d3ed83fc103cf1d84468aad274b0a7"
|
||||
|
||||
[[projects]]
|
||||
name = "golang.org/x/text"
|
||||
@@ -453,7 +430,7 @@
|
||||
"googleapi/internal/uritemplates",
|
||||
"storage/v1"
|
||||
]
|
||||
revision = "bb395b674c9930450ea7243b3e3c8f43150f4c11"
|
||||
revision = "2eea9ba0a3d94f6ab46508083e299a00bbbc65f6"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
@@ -470,8 +447,8 @@
|
||||
"log",
|
||||
"urlfetch"
|
||||
]
|
||||
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||
version = "v1.0.0"
|
||||
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
@@ -482,6 +459,6 @@
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "e250c0e18b90fecd81621d7ffcc1580931e668bac9048de910fdf6df8e4a140c"
|
||||
inputs-digest = "898be2e0549915b0f877529a45db62ce6b9904e7ecf8e3fed48d768d429c32ce"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
||||
17
Gopkg.toml
17
Gopkg.toml
@@ -1,20 +1,15 @@
|
||||
# github.com/yunify/qingstor-sdk-go depends on an old version of
|
||||
# github.com/pengsrc/go-shared - pin the version here
|
||||
#
|
||||
# When the version here moves on, we can unpin
|
||||
# https://github.com/yunify/qingstor-sdk-go/blob/master/glide.yaml
|
||||
[[override]]
|
||||
version = "=v0.1.1"
|
||||
name = "github.com/pengsrc/go-shared"
|
||||
|
||||
# pin this to master to pull in the macOS changes
|
||||
# can likely remove for 1.42
|
||||
# can likely remove for 1.43
|
||||
[[override]]
|
||||
branch = "master"
|
||||
name = "github.com/sevlyar/go-daemon"
|
||||
|
||||
# pin this to master to pull in the fix for linux/mips
|
||||
# can likely remove for 1.42
|
||||
# can likely remove for 1.43
|
||||
[[override]]
|
||||
branch = "master"
|
||||
name = "github.com/coreos/bbolt"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/Azure/azure-storage-blob-go"
|
||||
version = "0.1.4"
|
||||
|
||||
38
Makefile
38
Makefile
@@ -1,12 +1,22 @@
|
||||
SHELL = bash
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags)-$${APPVEYOR_REPO_BRANCH:-$${TRAVIS_BRANCH:-$$(git rev-parse --abbrev-ref HEAD)}} | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/; s/-\(HEAD\|master\)$$//')
|
||||
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD))
|
||||
TAG_BRANCH := -$(BRANCH)
|
||||
BRANCH_PATH := branch/
|
||||
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
TAG_BRANCH :=
|
||||
BRANCH_PATH :=
|
||||
endif
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||
# Run full tests if go >= go1.9
|
||||
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 9)')
|
||||
BETA_URL := https://beta.rclone.org/$(TAG)/
|
||||
BETA_PATH := $(BRANCH_PATH)$(TAG)
|
||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
||||
# Pass in GOTAGS=xyz on the make command line to set build tags
|
||||
ifdef GOTAGS
|
||||
BUILDTAGS=-tags "$(GOTAGS)"
|
||||
@@ -21,6 +31,7 @@ rclone:
|
||||
|
||||
vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
@echo BRANCH="'$(BRANCH)'"
|
||||
@echo TAG="'$(TAG)'"
|
||||
@echo LAST_TAG="'$(LAST_TAG)'"
|
||||
@echo NEW_TAG="'$(NEW_TAG)'"
|
||||
@@ -160,25 +171,32 @@ else
|
||||
endif
|
||||
|
||||
appveyor_upload:
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ memstore:beta-rclone-org/$(TAG)
|
||||
ifeq ($(APPVEYOR_REPO_BRANCH),master)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ memstore:beta-rclone-org
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
BUILD_FLAGS := -exclude "^(windows|darwin)/"
|
||||
ifeq ($(TRAVIS_OS_NAME),osx)
|
||||
BUILD_FLAGS := -include "^darwin/" -cgo
|
||||
endif
|
||||
|
||||
travis_beta:
|
||||
ifeq ($(TRAVIS_OS_NAME),linux)
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||
endif
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt -exclude "^windows/" -parallel 8 $(BUILDTAGS) $(TAG)β
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ memstore:beta-rclone-org/$(TAG)
|
||||
ifeq ($(TRAVIS_BRANCH),master)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ memstore:beta-rclone-org
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)β
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
# Fetch the windows builds from appveyor
|
||||
fetch_windows:
|
||||
rclone -v copy --include 'rclone-v*-windows-*.zip' memstore:beta-rclone-org/$(TAG) build/
|
||||
rclone -v copy --include 'rclone-v*-windows-*.zip' $(BETA_UPLOAD) build/
|
||||
-#cp -av build/rclone-v*-windows-386.zip build/rclone-current-windows-386.zip
|
||||
-#cp -av build/rclone-v*-windows-amd64.zip build/rclone-current-windows-amd64.zip
|
||||
md5sum build/rclone-*-windows-*.zip | sort
|
||||
|
||||
@@ -31,6 +31,7 @@ Making a release
|
||||
* # announce with forum post, twitter post, G+ post
|
||||
|
||||
Early in the next release cycle update the vendored dependencies
|
||||
* Review any pinned packages in Gopkg.toml and remove if possible
|
||||
* make update
|
||||
* git status
|
||||
* git add new files
|
||||
|
||||
@@ -1,15 +1,19 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
@@ -17,13 +21,11 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/azure-storage-blob-go/2017-07-29/azblob"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
@@ -59,10 +61,13 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "account",
|
||||
Help: "Storage Account Name",
|
||||
Help: "Storage Account Name (leave blank to use connection string or SAS URL)",
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "Storage Account Key",
|
||||
Help: "Storage Account Key (leave blank to use connection string or SAS URL)",
|
||||
}, {
|
||||
Name: "sas_url",
|
||||
Help: "SAS URL for container level access only\n(leave blank if using account/key or connection string)",
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service - leave blank normally.",
|
||||
@@ -75,14 +80,13 @@ func init() {
|
||||
|
||||
// Fs represents a remote azure server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
features *fs.Features // optional features
|
||||
account string // account name
|
||||
key []byte // auth key
|
||||
endpoint string // name of the starting api endpoint
|
||||
bc *storage.BlobStorageClient
|
||||
cc *storage.Container
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
features *fs.Features // optional features
|
||||
account string // account name
|
||||
endpoint string // name of the starting api endpoint
|
||||
svcURL *azblob.ServiceURL // reference to serviceURL
|
||||
cntURL *azblob.ContainerURL // reference to containerURL
|
||||
container string // the container we are working on
|
||||
containerOKMu sync.Mutex // mutex to protect container OK
|
||||
containerOK bool // true if we have created the container
|
||||
@@ -93,13 +97,14 @@ type Fs struct {
|
||||
|
||||
// Object describes a azure object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
modTime time.Time // The modified time of the object if known
|
||||
md5 string // MD5 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
meta map[string]string // blob metadata
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
modTime time.Time // The modified time of the object if known
|
||||
md5 string // MD5 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
accessTier azblob.AccessTierType // Blob Access Tier
|
||||
meta map[string]string // blob metadata
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -159,8 +164,8 @@ var retryErrorCodes = []int{
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
// FIXME interpret special errors - more to do here
|
||||
if storageErr, ok := err.(storage.AzureStorageServiceError); ok {
|
||||
statusCode := storageErr.StatusCode
|
||||
if storageErr, ok := err.(azblob.StorageError); ok {
|
||||
statusCode := storageErr.Response().StatusCode
|
||||
for _, e := range retryErrorCodes {
|
||||
if statusCode == e {
|
||||
return true, err
|
||||
@@ -183,36 +188,55 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
account := config.FileGet(name, "account")
|
||||
if account == "" {
|
||||
return nil, errors.New("account not found")
|
||||
}
|
||||
key := config.FileGet(name, "key")
|
||||
if key == "" {
|
||||
return nil, errors.New("key not found")
|
||||
}
|
||||
keyBytes, err := base64.StdEncoding.DecodeString(key)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("malformed storage account key: %v", err)
|
||||
}
|
||||
sasURL := config.FileGet(name, "sas_url")
|
||||
endpoint := config.FileGet(name, "endpoint", "blob.core.windows.net")
|
||||
|
||||
endpoint := config.FileGet(name, "endpoint", storage.DefaultBaseURL)
|
||||
|
||||
client, err := storage.NewClient(account, key, endpoint, apiVersion, true)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make azure storage client")
|
||||
var (
|
||||
u *url.URL
|
||||
serviceURL azblob.ServiceURL
|
||||
containerURL azblob.ContainerURL
|
||||
)
|
||||
switch {
|
||||
case account != "" && key != "":
|
||||
credential := azblob.NewSharedKeyCredential(account, key)
|
||||
u, err = url.Parse(fmt.Sprintf("https://%s.%s", account, endpoint))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||
}
|
||||
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
containerURL = serviceURL.NewContainerURL(container)
|
||||
case sasURL != "":
|
||||
u, err = url.Parse(sasURL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse SAS URL")
|
||||
}
|
||||
// use anonymous credentials in case of sas url
|
||||
pipeline := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})
|
||||
// Check if we have container level SAS or account level sas
|
||||
parts := azblob.NewBlobURLParts(*u)
|
||||
if parts.ContainerName != "" {
|
||||
if parts.ContainerName != container {
|
||||
return nil, errors.New("Container name in SAS URL and container provided in command do not match")
|
||||
}
|
||||
containerURL = azblob.NewContainerURL(*u, pipeline)
|
||||
} else {
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
containerURL = serviceURL.NewContainerURL(container)
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("Need account+key or connectionString or sasURL")
|
||||
}
|
||||
client.HTTPClient = fshttp.NewClient(fs.Config)
|
||||
bc := client.GetBlobService()
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
container: container,
|
||||
root: directory,
|
||||
account: account,
|
||||
key: keyBytes,
|
||||
endpoint: endpoint,
|
||||
bc: &bc,
|
||||
cc: bc.GetContainerReference(container),
|
||||
svcURL: &serviceURL,
|
||||
cntURL: &containerURL,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
}
|
||||
@@ -250,22 +274,17 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *storage.Blob) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *azblob.Blob) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
err := o.decodeMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and headers, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err := o.readMetaData() // reads info and headers, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
@@ -276,13 +295,13 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// getBlobReference creates an empty blob reference with no metadata
|
||||
func (f *Fs) getBlobReference(remote string) *storage.Blob {
|
||||
return f.cc.GetBlobReference(f.root + remote)
|
||||
func (f *Fs) getBlobReference(remote string) azblob.BlobURL {
|
||||
return f.cntURL.NewBlobURL(f.root + remote)
|
||||
}
|
||||
|
||||
// getBlobWithModTime adds the modTime passed in to o.meta and creates
|
||||
// a Blob from it.
|
||||
func (o *Object) getBlobWithModTime(modTime time.Time) *storage.Blob {
|
||||
func (o *Object) getBlobWithModTime(modTime time.Time) *azblob.BlobURL {
|
||||
// Make sure o.meta is not nil
|
||||
if o.meta == nil {
|
||||
o.meta = make(map[string]string, 1)
|
||||
@@ -292,12 +311,18 @@ func (o *Object) getBlobWithModTime(modTime time.Time) *storage.Blob {
|
||||
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
||||
|
||||
blob := o.getBlobReference()
|
||||
blob.Metadata = o.meta
|
||||
return blob
|
||||
ctx := context.Background()
|
||||
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{})
|
||||
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &blob
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object
|
||||
type listFn func(remote string, object *storage.Blob, isDirectory bool) error
|
||||
type listFn func(remote string, object *azblob.Blob, isDirectory bool) error
|
||||
|
||||
// list lists the objects into the function supplied from
|
||||
// the container and root supplied
|
||||
@@ -318,32 +343,38 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
if !recurse {
|
||||
delimiter = "/"
|
||||
}
|
||||
params := storage.ListBlobsParameters{
|
||||
MaxResults: maxResults,
|
||||
Prefix: root,
|
||||
Delimiter: delimiter,
|
||||
Include: &storage.IncludeBlobDataset{
|
||||
Snapshots: false,
|
||||
Metadata: true,
|
||||
UncommittedBlobs: false,
|
||||
|
||||
options := azblob.ListBlobsSegmentOptions{
|
||||
Details: azblob.BlobListingDetails{
|
||||
Copy: false,
|
||||
Metadata: true,
|
||||
Snapshots: false,
|
||||
UncommittedBlobs: false,
|
||||
Deleted: false,
|
||||
},
|
||||
Prefix: root,
|
||||
MaxResults: int32(maxResults),
|
||||
}
|
||||
for {
|
||||
var response storage.BlobListResponse
|
||||
ctx := context.Background()
|
||||
for marker := (azblob.Marker{}); marker.NotDone(); {
|
||||
var response *azblob.ListBlobsHierarchyResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
response, err = f.cc.ListBlobs(params)
|
||||
response, err = f.cntURL.ListBlobsHierarchySegment(ctx, marker, delimiter, options)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if storageErr, ok := err.(storage.AzureStorageServiceError); ok && storageErr.StatusCode == http.StatusNotFound {
|
||||
if storageErr, ok := err.(azblob.StorageError); ok && storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
for i := range response.Blobs {
|
||||
file := &response.Blobs[i]
|
||||
// Advance marker to next
|
||||
marker = response.NextMarker
|
||||
|
||||
for i := range response.Blobs.Blob {
|
||||
file := &response.Blobs.Blob[i]
|
||||
// Finish if file name no longer has prefix
|
||||
// if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
|
||||
// return nil
|
||||
@@ -365,8 +396,8 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
}
|
||||
}
|
||||
// Send the subdirectories
|
||||
for _, remote := range response.BlobPrefixes {
|
||||
remote := strings.TrimRight(remote, "/")
|
||||
for _, remote := range response.Blobs.BlobPrefix {
|
||||
remote := strings.TrimRight(remote.Name, "/")
|
||||
if !strings.HasPrefix(remote, f.root) {
|
||||
fs.Debugf(f, "Odd directory name received %q", remote)
|
||||
continue
|
||||
@@ -378,17 +409,12 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// end if no NextFileName
|
||||
if response.NextMarker == "" {
|
||||
break
|
||||
}
|
||||
params.Marker = response.NextMarker
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(remote string, object *storage.Blob, isDirectory bool) (fs.DirEntry, error) {
|
||||
func (f *Fs) itemToDirEntry(remote string, object *azblob.Blob, isDirectory bool) (fs.DirEntry, error) {
|
||||
if isDirectory {
|
||||
d := fs.NewDir(remote, time.Time{})
|
||||
return d, nil
|
||||
@@ -412,7 +438,7 @@ func (f *Fs) markContainerOK() {
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.list(dir, false, listChunkSize, func(remote string, object *storage.Blob, isDirectory bool) error {
|
||||
err = f.list(dir, false, listChunkSize, func(remote string, object *azblob.Blob, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -435,13 +461,8 @@ func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
err = f.listContainersToFn(func(container *storage.Container) error {
|
||||
t, err := time.Parse(time.RFC1123, container.Properties.LastModified)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Failed to parse LastModified %q: %v", container.Properties.LastModified, err)
|
||||
t = time.Time{}
|
||||
}
|
||||
d := fs.NewDir(container.Name, t)
|
||||
err = f.listContainersToFn(func(container *azblob.Container) error {
|
||||
d := fs.NewDir(container.Name, container.Properties.LastModified)
|
||||
entries = append(entries, d)
|
||||
return nil
|
||||
})
|
||||
@@ -488,7 +509,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.list(dir, true, listChunkSize, func(remote string, object *storage.Blob, isDirectory bool) error {
|
||||
err = f.list(dir, true, listChunkSize, func(remote string, object *azblob.Blob, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -504,27 +525,34 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
}
|
||||
|
||||
// listContainerFn is called from listContainersToFn to handle a container
|
||||
type listContainerFn func(*storage.Container) error
|
||||
type listContainerFn func(*azblob.Container) error
|
||||
|
||||
// listContainersToFn lists the containers to the function supplied
|
||||
func (f *Fs) listContainersToFn(fn listContainerFn) error {
|
||||
// FIXME page the containers if necessary?
|
||||
params := storage.ListContainersParameters{}
|
||||
var response *storage.ContainerListResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
response, err = f.bc.ListContainers(params)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
params := azblob.ListContainersSegmentOptions{
|
||||
MaxResults: int32(listChunkSize),
|
||||
}
|
||||
for i := range response.Containers {
|
||||
err = fn(&response.Containers[i])
|
||||
ctx := context.Background()
|
||||
for marker := (azblob.Marker{}); marker.NotDone(); {
|
||||
var response *azblob.ListContainersResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
response, err = f.svcURL.ListContainersSegment(ctx, marker, params)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range response.Containers {
|
||||
err = fn(&response.Containers[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
marker = response.NextMarker
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -549,23 +577,20 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
if f.containerOK {
|
||||
return nil
|
||||
}
|
||||
options := storage.CreateContainerOptions{
|
||||
Access: storage.ContainerAccessTypePrivate,
|
||||
}
|
||||
|
||||
// now try to create the container
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
err := f.cc.Create(&options)
|
||||
ctx := context.Background()
|
||||
_, err := f.cntURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
|
||||
if err != nil {
|
||||
if storageErr, ok := err.(storage.AzureStorageServiceError); ok {
|
||||
switch storageErr.StatusCode {
|
||||
case http.StatusConflict:
|
||||
switch storageErr.Code {
|
||||
case "ContainerAlreadyExists":
|
||||
f.containerOK = true
|
||||
return false, nil
|
||||
case "ContainerBeingDeleted":
|
||||
f.containerDeleted = true
|
||||
return true, err
|
||||
}
|
||||
if storageErr, ok := err.(azblob.StorageError); ok {
|
||||
switch storageErr.ServiceCode() {
|
||||
case azblob.ServiceCodeContainerAlreadyExists:
|
||||
f.containerOK = true
|
||||
return false, nil
|
||||
case azblob.ServiceCodeContainerBeingDeleted:
|
||||
f.containerDeleted = true
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -581,7 +606,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// isEmpty checks to see if a given directory is empty and returns an error if not
|
||||
func (f *Fs) isEmpty(dir string) (err error) {
|
||||
empty := true
|
||||
err = f.list("", true, 1, func(remote string, object *storage.Blob, isDirectory bool) error {
|
||||
err = f.list("", true, 1, func(remote string, object *azblob.Blob, isDirectory bool) error {
|
||||
empty = false
|
||||
return nil
|
||||
})
|
||||
@@ -599,16 +624,16 @@ func (f *Fs) isEmpty(dir string) (err error) {
|
||||
func (f *Fs) deleteContainer() error {
|
||||
f.containerOKMu.Lock()
|
||||
defer f.containerOKMu.Unlock()
|
||||
options := storage.DeleteContainerOptions{}
|
||||
options := azblob.ContainerAccessConditions{}
|
||||
ctx := context.Background()
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
exists, err := f.cc.Exists()
|
||||
_, err := f.cntURL.Delete(ctx, options)
|
||||
if err != nil {
|
||||
if storageErr, ok := err.(azblob.StorageError); ok && storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound {
|
||||
return false, fs.ErrorDirNotFound
|
||||
}
|
||||
return f.shouldRetry(err)
|
||||
}
|
||||
if !exists {
|
||||
return false, fs.ErrorDirNotFound
|
||||
}
|
||||
err = f.cc.Delete(&options)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -671,17 +696,36 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
dstBlob := f.getBlobReference(remote)
|
||||
srcBlob := srcObj.getBlobReference()
|
||||
options := storage.CopyOptions{}
|
||||
sourceBlobURL := srcBlob.GetURL()
|
||||
dstBlobURL := f.getBlobReference(remote)
|
||||
srcBlobURL := srcObj.getBlobReference()
|
||||
|
||||
source, err := url.Parse(srcBlobURL.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
options := azblob.BlobAccessConditions{}
|
||||
ctx := context.Background()
|
||||
var startCopy *azblob.BlobsStartCopyFromURLResponse
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = dstBlob.Copy(sourceBlobURL, &options)
|
||||
startCopy, err = dstBlobURL.StartCopyFromURL(ctx, *source, nil, options, options)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
copyStatus := startCopy.CopyStatus()
|
||||
for copyStatus == azblob.CopyStatusPending {
|
||||
time.Sleep(1 * time.Second)
|
||||
getMetadata, err := dstBlobURL.GetProperties(ctx, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copyStatus = getMetadata.CopyStatus()
|
||||
}
|
||||
|
||||
return f.NewObject(remote)
|
||||
}
|
||||
|
||||
@@ -726,7 +770,7 @@ func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// decodeMetaData sets the metadata from the data passed in
|
||||
// decodeMetaDataFromProperties sets the metadata from the data passed in
|
||||
//
|
||||
// Sets
|
||||
// o.id
|
||||
@@ -734,14 +778,17 @@ func (o *Object) Size() int64 {
|
||||
// o.size
|
||||
// o.md5
|
||||
// o.meta
|
||||
func (o *Object) decodeMetaData(info *storage.Blob) (err error) {
|
||||
o.md5 = info.Properties.ContentMD5
|
||||
o.mimeType = info.Properties.ContentType
|
||||
o.size = info.Properties.ContentLength
|
||||
o.modTime = time.Time(info.Properties.LastModified)
|
||||
if len(info.Metadata) > 0 {
|
||||
o.meta = info.Metadata
|
||||
if modTime, ok := info.Metadata[modTimeKey]; ok {
|
||||
func (o *Object) decodeMetaDataFromProperties(info *azblob.BlobsGetPropertiesResponse) (err error) {
|
||||
// FIXME - Client library returns MD5 as base64 decoded string, object struct should be changed
|
||||
// to maintain md5 as simple byte array instead of as string
|
||||
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
|
||||
o.mimeType = info.ContentType()
|
||||
o.size = info.ContentLength()
|
||||
o.modTime = time.Time(info.LastModified())
|
||||
metadata := info.NewMetadata()
|
||||
if len(metadata) > 0 {
|
||||
o.meta = metadata
|
||||
if modTime, ok := metadata[modTimeKey]; ok {
|
||||
when, err := time.Parse(timeFormatIn, modTime)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Couldn't parse %v = %q: %v", modTimeKey, modTime, err)
|
||||
@@ -755,7 +802,7 @@ func (o *Object) decodeMetaData(info *storage.Blob) (err error) {
|
||||
}
|
||||
|
||||
// getBlobReference creates an empty blob reference with no metadata
|
||||
func (o *Object) getBlobReference() *storage.Blob {
|
||||
func (o *Object) getBlobReference() azblob.BlobURL {
|
||||
return o.fs.getBlobReference(o.remote)
|
||||
}
|
||||
|
||||
@@ -778,19 +825,22 @@ func (o *Object) readMetaData() (err error) {
|
||||
blob := o.getBlobReference()
|
||||
|
||||
// Read metadata (this includes metadata)
|
||||
getPropertiesOptions := storage.GetBlobPropertiesOptions{}
|
||||
options := azblob.BlobAccessConditions{}
|
||||
ctx := context.Background()
|
||||
var blobProperties *azblob.BlobsGetPropertiesResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = blob.GetProperties(&getPropertiesOptions)
|
||||
blobProperties, err = blob.GetProperties(ctx, options)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if storageErr, ok := err.(storage.AzureStorageServiceError); ok && storageErr.StatusCode == http.StatusNotFound {
|
||||
// On directories - GetProperties does not work and current SDK does not populate service code correctly hence check regular http response as well
|
||||
if storageErr, ok := err.(azblob.StorageError); ok && storageErr.ServiceCode() == azblob.ServiceCodeBlobNotFound || storageErr.Response().StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return o.decodeMetaData(blob)
|
||||
return o.decodeMetaDataFromProperties(blobProperties)
|
||||
}
|
||||
|
||||
// timeString returns modTime as the number of milliseconds
|
||||
@@ -827,16 +877,14 @@ func (o *Object) ModTime() (result time.Time) {
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
blob := o.getBlobWithModTime(modTime)
|
||||
options := storage.SetBlobMetadataOptions{}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
err := blob.SetMetadata(&options)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
// Make sure o.meta is not nil
|
||||
if o.meta == nil {
|
||||
o.meta = make(map[string]string, 1)
|
||||
}
|
||||
// Set modTimeKey in it
|
||||
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
||||
o.modTime = modTime
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -847,10 +895,9 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
getBlobOptions := storage.GetBlobOptions{}
|
||||
getBlobRangeOptions := storage.GetBlobRangeOptions{
|
||||
GetBlobOptions: &getBlobOptions,
|
||||
}
|
||||
// Offset and Count for range download
|
||||
var offset int64
|
||||
var count int64
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
@@ -862,14 +909,10 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
start = o.size - end
|
||||
end = 0
|
||||
}
|
||||
getBlobRangeOptions.Range = &storage.BlobRange{
|
||||
Start: uint64(start),
|
||||
End: uint64(end),
|
||||
}
|
||||
offset = start
|
||||
count = end - start
|
||||
case *fs.SeekOption:
|
||||
getBlobRangeOptions.Range = &storage.BlobRange{
|
||||
Start: uint64(x.Offset),
|
||||
}
|
||||
offset = x.Offset
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
@@ -877,17 +920,17 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
}
|
||||
}
|
||||
blob := o.getBlobReference()
|
||||
ctx := context.Background()
|
||||
ac := azblob.BlobAccessConditions{}
|
||||
var dowloadResponse *azblob.DownloadResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
if getBlobRangeOptions.Range == nil {
|
||||
in, err = blob.Get(&getBlobOptions)
|
||||
} else {
|
||||
in, err = blob.GetRange(&getBlobRangeOptions)
|
||||
}
|
||||
dowloadResponse, err = blob.Download(ctx, offset, count, ac, false)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open for download")
|
||||
}
|
||||
in = dowloadResponse.Body(azblob.RetryReaderOptions{})
|
||||
return in, nil
|
||||
}
|
||||
|
||||
@@ -915,7 +958,7 @@ func init() {
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
//
|
||||
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
|
||||
func (o *Object) uploadMultipart(in io.Reader, size int64, blob *storage.Blob, putBlobOptions *storage.PutBlobOptions) (err error) {
|
||||
func (o *Object) uploadMultipart(in io.Reader, size int64, blob *azblob.BlobURL, httpHeaders *azblob.BlobHTTPHeaders) (err error) {
|
||||
// Calculate correct chunkSize
|
||||
chunkSize := int64(chunkSize)
|
||||
var totalParts int64
|
||||
@@ -937,34 +980,41 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, blob *storage.Blob, p
|
||||
}
|
||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, fs.SizeSuffix(chunkSize))
|
||||
|
||||
// Create an empty blob
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err := blob.CreateBlockBlob(putBlobOptions)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
// https://godoc.org/github.com/Azure/azure-storage-blob-go/2017-07-29/azblob#example-BlockBlobURL
|
||||
// Utilities are cloned from above example
|
||||
// These helper functions convert a binary block ID to a base-64 string and vice versa
|
||||
// NOTE: The blockID must be <= 64 bytes and ALL blockIDs for the block must be the same length
|
||||
blockIDBinaryToBase64 := func(blockID []byte) string { return base64.StdEncoding.EncodeToString(blockID) }
|
||||
// These helper functions convert an int block ID to a base-64 string and vice versa
|
||||
blockIDIntToBase64 := func(blockID uint64) string {
|
||||
binaryBlockID := (&[8]byte{})[:] // All block IDs are 8 bytes long
|
||||
binary.LittleEndian.PutUint64(binaryBlockID, blockID)
|
||||
return blockIDBinaryToBase64(binaryBlockID)
|
||||
}
|
||||
|
||||
// block ID variables
|
||||
var (
|
||||
rawID uint64
|
||||
bytesID = make([]byte, 8)
|
||||
blockID = "" // id in base64 encoded form
|
||||
blocks = make([]storage.Block, 0, totalParts)
|
||||
blocks = make([]string, totalParts)
|
||||
)
|
||||
|
||||
// increment the blockID
|
||||
nextID := func() {
|
||||
rawID++
|
||||
binary.LittleEndian.PutUint64(bytesID, rawID)
|
||||
blockID = base64.StdEncoding.EncodeToString(bytesID)
|
||||
blocks = append(blocks, storage.Block{
|
||||
ID: blockID,
|
||||
Status: storage.BlockStatusLatest,
|
||||
})
|
||||
blockID = blockIDIntToBase64(rawID)
|
||||
blocks = append(blocks, blockID)
|
||||
}
|
||||
|
||||
// Get BlockBlobURL, we will use default pipeline here
|
||||
blockBlobURL := blob.ToBlockBlobURL()
|
||||
ctx := context.Background()
|
||||
ac := azblob.LeaseAccessConditions{} // Use default lease access conditions
|
||||
|
||||
// FIXME - Accounting
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
// in, wrap := accounting.UnWrap(in)
|
||||
|
||||
// Upload the chunks
|
||||
remaining := size
|
||||
@@ -1004,13 +1054,8 @@ outer:
|
||||
defer o.fs.uploadToken.Put()
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
|
||||
// Upload the block, with MD5 for check
|
||||
md5sum := md5.Sum(buf)
|
||||
putBlockOptions := storage.PutBlockOptions{
|
||||
ContentMD5: base64.StdEncoding.EncodeToString(md5sum[:]),
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = blob.PutBlockWithLength(blockID, uint64(len(buf)), wrap(bytes.NewBuffer(buf)), &putBlockOptions)
|
||||
_, err = blockBlobURL.StageBlock(ctx, blockID, bytes.NewReader(buf), ac)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
|
||||
@@ -1040,9 +1085,8 @@ outer:
|
||||
}
|
||||
|
||||
// Finalise the upload session
|
||||
putBlockListOptions := storage.PutBlockListOptions{}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err := blob.PutBlockList(blocks, &putBlockListOptions)
|
||||
_, err := blockBlobURL.CommitBlockList(ctx, blocks, *httpHeaders, o.meta, azblob.BlobAccessConditions{})
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1060,29 +1104,45 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return err
|
||||
}
|
||||
size := src.Size()
|
||||
blob := o.getBlobWithModTime(src.ModTime())
|
||||
blob.Properties.ContentType = fs.MimeType(o)
|
||||
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
blob.Properties.ContentMD5 = base64.StdEncoding.EncodeToString(sourceMD5bytes)
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
// Update Mod time
|
||||
err = o.SetModTime(src.ModTime())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blob := o.getBlobReference()
|
||||
httpHeaders := azblob.BlobHTTPHeaders{}
|
||||
httpHeaders.ContentType = fs.MimeType(o)
|
||||
// Multipart upload doesn't support MD5 checksums at put block calls, hence calculate
|
||||
// MD5 only for PutBlob requests
|
||||
if size < int64(uploadCutoff) {
|
||||
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
httpHeaders.ContentMD5 = sourceMD5bytes
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
putBlobOptions := storage.PutBlobOptions{}
|
||||
|
||||
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
||||
BufferSize: int(chunkSize),
|
||||
MaxBuffers: 4,
|
||||
Metadata: o.meta,
|
||||
BlobHTTPHeaders: httpHeaders,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
if size >= int64(uploadCutoff) {
|
||||
// If a large file upload in chunks
|
||||
err = o.uploadMultipart(in, size, blob, &putBlobOptions)
|
||||
err = o.uploadMultipart(in, size, &blob, &httpHeaders)
|
||||
} else {
|
||||
// Write a small blob in one transaction
|
||||
if size == 0 {
|
||||
in = nil
|
||||
}
|
||||
err = blob.CreateBlockBlobFromReader(in, &putBlobOptions)
|
||||
blockBlobURL := blob.ToBlockBlobURL()
|
||||
_, err = azblob.UploadStreamToBlockBlob(ctx, in, blockBlobURL, putBlobOptions)
|
||||
}
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
@@ -1096,9 +1156,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
blob := o.getBlobReference()
|
||||
options := storage.DeleteBlobOptions{}
|
||||
snapShotOptions := azblob.DeleteSnapshotsOptionNone
|
||||
ac := azblob.BlobAccessConditions{}
|
||||
ctx := context.Background()
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err := blob.Delete(&options)
|
||||
_, err := blob.Delete(ctx, snapShotOptions, ac)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
|
||||
package azureblob_test
|
||||
|
||||
import (
|
||||
|
||||
6
backend/azureblob/azureblob_unsupported.go
Normal file
6
backend/azureblob/azureblob_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build freebsd netbsd openbsd plan9 solaris !go1.8
|
||||
|
||||
package azureblob
|
||||
@@ -54,17 +54,18 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
driveAuthOwnerOnly = flags.BoolP("drive-auth-owner-only", "", false, "Only consider files owned by the authenticated user.")
|
||||
driveUseTrash = flags.BoolP("drive-use-trash", "", true, "Send files to the trash instead of deleting permanently.")
|
||||
driveSkipGdocs = flags.BoolP("drive-skip-gdocs", "", false, "Skip google documents in all listings.")
|
||||
driveSharedWithMe = flags.BoolP("drive-shared-with-me", "", false, "Only show files that are shared with me")
|
||||
driveTrashedOnly = flags.BoolP("drive-trashed-only", "", false, "Only show files that are in the trash")
|
||||
driveExtensions = flags.StringP("drive-formats", "", defaultExtensions, "Comma separated list of preferred formats for downloading Google docs.")
|
||||
driveUseCreatedDate = flags.BoolP("drive-use-created-date", "", false, "Use created date instead of modified date.")
|
||||
driveListChunk = flags.Int64P("drive-list-chunk", "", 1000, "Size of listing chunk 100-1000. 0 to disable.")
|
||||
driveImpersonate = flags.StringP("drive-impersonate", "", "", "Impersonate this user when using a service account.")
|
||||
driveAlternateExport = flags.BoolP("drive-alternate-export", "", false, "Use alternate export URLs for google documents export.")
|
||||
driveAcknowledgeAbuse = flags.BoolP("drive-acknowledge-abuse", "", false, "Set to allow files which return cannotDownloadAbusiveFile to be downloaded.")
|
||||
driveAuthOwnerOnly = flags.BoolP("drive-auth-owner-only", "", false, "Only consider files owned by the authenticated user.")
|
||||
driveUseTrash = flags.BoolP("drive-use-trash", "", true, "Send files to the trash instead of deleting permanently.")
|
||||
driveSkipGdocs = flags.BoolP("drive-skip-gdocs", "", false, "Skip google documents in all listings.")
|
||||
driveSharedWithMe = flags.BoolP("drive-shared-with-me", "", false, "Only show files that are shared with me")
|
||||
driveTrashedOnly = flags.BoolP("drive-trashed-only", "", false, "Only show files that are in the trash")
|
||||
driveExtensions = flags.StringP("drive-formats", "", defaultExtensions, "Comma separated list of preferred formats for downloading Google docs.")
|
||||
driveUseCreatedDate = flags.BoolP("drive-use-created-date", "", false, "Use created date instead of modified date.")
|
||||
driveListChunk = flags.Int64P("drive-list-chunk", "", 1000, "Size of listing chunk 100-1000. 0 to disable.")
|
||||
driveImpersonate = flags.StringP("drive-impersonate", "", "", "Impersonate this user when using a service account.")
|
||||
driveAlternateExport = flags.BoolP("drive-alternate-export", "", false, "Use alternate export URLs for google documents export.")
|
||||
driveAcknowledgeAbuse = flags.BoolP("drive-acknowledge-abuse", "", false, "Set to allow files which return cannotDownloadAbusiveFile to be downloaded.")
|
||||
driveKeepRevisionForever = flags.BoolP("drive-keep-revision-forever", "", false, "Keep new head revision forever.")
|
||||
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
chunkSize = fs.SizeSuffix(8 * 1024 * 1024)
|
||||
@@ -857,7 +858,7 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt
|
||||
// Make the API request to upload metadata and file data.
|
||||
// Don't retry, return a retry error instead
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = f.svc.Files.Create(createInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).Do()
|
||||
info, err = f.svc.Files.Create(createInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).KeepRevisionForever(*driveKeepRevisionForever).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1014,7 +1015,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
|
||||
var info *drive.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, err = o.fs.svc.Files.Copy(srcObj.id, createInfo).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).Do()
|
||||
info, err = o.fs.svc.Files.Copy(srcObj.id, createInfo).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).KeepRevisionForever(*driveKeepRevisionForever).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1665,7 +1666,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
if size == 0 || size < int64(driveUploadCutoff) {
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = o.fs.svc.Files.Update(o.id, updateInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(o.fs.isTeamDrive).Do()
|
||||
info, err = o.fs.svc.Files.Update(o.id, updateInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(o.fs.isTeamDrive).KeepRevisionForever(*driveKeepRevisionForever).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -58,6 +58,9 @@ func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string,
|
||||
if f.isTeamDrive {
|
||||
params.Set("supportsTeamDrives", "true")
|
||||
}
|
||||
if *driveKeepRevisionForever {
|
||||
params.Set("keepRevisionForever", "true")
|
||||
}
|
||||
urls := "https://www.googleapis.com/upload/drive/v3/files"
|
||||
method := "POST"
|
||||
if fileID != "" {
|
||||
|
||||
@@ -480,6 +480,8 @@ func (f *Fs) mkdir(abspath string) error {
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
||||
err = nil
|
||||
case 521: // dir already exists: error number according to RFC 959: issue #2363
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
||||
@@ -38,6 +38,7 @@ import (
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
eventWaitTime = 500 * time.Millisecond
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
useTrash = true // FIXME make configurable - rclone global
|
||||
)
|
||||
@@ -570,6 +571,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
}
|
||||
}
|
||||
|
||||
waitEvent := f.srv.WaitEventsStart()
|
||||
|
||||
err = f.deleteNode(dirNode)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "delete directory node failed")
|
||||
@@ -579,7 +582,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
if dirNode == rootNode {
|
||||
f.clearRoot()
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond) // FIXME give the callback a chance
|
||||
|
||||
f.srv.WaitEvents(waitEvent, eventWaitTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -653,6 +657,8 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
||||
}
|
||||
}
|
||||
|
||||
waitEvent := f.srv.WaitEventsStart()
|
||||
|
||||
// rename the object if required
|
||||
if srcLeaf != dstLeaf {
|
||||
//log.Printf("rename %q to %q", srcLeaf, dstLeaf)
|
||||
@@ -665,7 +671,8 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond) // FIXME give events a chance...
|
||||
f.srv.WaitEvents(waitEvent, eventWaitTime)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -76,7 +76,7 @@ func init() {
|
||||
Optional: true,
|
||||
}, {
|
||||
Name: "use_insecure_cipher",
|
||||
Help: "Enable the user of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.",
|
||||
Help: "Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.",
|
||||
Optional: true,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
|
||||
@@ -87,6 +87,10 @@ func init() {
|
||||
Help: "Password.",
|
||||
Optional: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "bearer_token",
|
||||
Help: "Bearer token instead of user/pass (eg a Macaroon)",
|
||||
Optional: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -182,6 +186,7 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.Prop, err error) {
|
||||
ExtraHeaders: map[string]string{
|
||||
"Depth": "1",
|
||||
},
|
||||
NoRedirect: true,
|
||||
}
|
||||
var result api.Multistatus
|
||||
var resp *http.Response
|
||||
@@ -191,7 +196,13 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.Prop, err error) {
|
||||
})
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// does not exist
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
switch apiErr.StatusCode {
|
||||
case http.StatusNotFound:
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
case http.StatusMovedPermanently, http.StatusFound, http.StatusSeeOther:
|
||||
// Some sort of redirect - go doesn't deal with these properly (it resets
|
||||
// the method to GET). However we can assume that if it was redirected the
|
||||
// object was not found.
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
@@ -258,10 +269,12 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
if !strings.HasSuffix(endpoint, "/") {
|
||||
endpoint += "/"
|
||||
}
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
user := config.FileGet(name, "user")
|
||||
pass := config.FileGet(name, "pass")
|
||||
bearerToken := config.FileGet(name, "bearer_token")
|
||||
if pass != "" {
|
||||
var err error
|
||||
pass, err = obscure.Reveal(pass)
|
||||
@@ -282,7 +295,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
root: root,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()).SetUserPass(user, pass),
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
user: user,
|
||||
pass: pass,
|
||||
@@ -291,13 +304,18 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
if user != "" || pass != "" {
|
||||
f.srv.SetUserPass(user, pass)
|
||||
} else if bearerToken != "" {
|
||||
f.srv.SetHeader("Authorization", "BEARER "+bearerToken)
|
||||
}
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
err = f.setQuirks(vendor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if root != "" {
|
||||
if root != "" && !rootIsDir {
|
||||
// Check to see if the root actually an existing file
|
||||
remote := path.Base(root)
|
||||
f.root = path.Dir(root)
|
||||
@@ -542,6 +560,11 @@ func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption
|
||||
// mkParentDir makes the parent of the native path dirPath if
|
||||
// necessary and any directories above that
|
||||
func (f *Fs) mkParentDir(dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
// chop off trailing / if it exists
|
||||
if strings.HasSuffix(dirPath, "/") {
|
||||
dirPath = dirPath[:len(dirPath)-1]
|
||||
}
|
||||
parent := path.Dir(dirPath)
|
||||
if parent == "." {
|
||||
parent = ""
|
||||
@@ -551,10 +574,15 @@ func (f *Fs) mkParentDir(dirPath string) error {
|
||||
|
||||
// mkdir makes the directory and parents using native paths
|
||||
func (f *Fs) mkdir(dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
// We assume the root is already ceated
|
||||
if dirPath == "" {
|
||||
return nil
|
||||
}
|
||||
// Collections must end with /
|
||||
if !strings.HasSuffix(dirPath, "/") {
|
||||
dirPath += "/"
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "MKCOL",
|
||||
Path: dirPath,
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/template"
|
||||
@@ -66,28 +67,33 @@ var archFlags = map[string][]string{
|
||||
}
|
||||
|
||||
// runEnv - run a shell command with env
|
||||
func runEnv(args, env []string) {
|
||||
func runEnv(args, env []string) error {
|
||||
if *debug {
|
||||
args = append([]string{"echo"}, args...)
|
||||
}
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if env != nil {
|
||||
cmd.Env = append(os.Environ(), env...)
|
||||
}
|
||||
if *debug {
|
||||
log.Printf("args = %v, env = %v\n", args, cmd.Env)
|
||||
}
|
||||
err := cmd.Run()
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to run %v: %v", args, err)
|
||||
log.Print("----------------------------")
|
||||
log.Printf("Failed to run %v: %v", args, err)
|
||||
log.Printf("Command output was:\n%s", out)
|
||||
log.Print("----------------------------")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// run a shell command
|
||||
func run(args ...string) {
|
||||
runEnv(args, nil)
|
||||
err := runEnv(args, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Exiting after error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// chdir or die
|
||||
@@ -160,8 +166,8 @@ func buildDebAndRpm(dir, version, goarch string) []string {
|
||||
return artifacts
|
||||
}
|
||||
|
||||
// build the binary in dir
|
||||
func compileArch(version, goos, goarch, dir string) {
|
||||
// build the binary in dir returning success or failure
|
||||
func compileArch(version, goos, goarch, dir string) bool {
|
||||
log.Printf("Compiling %s/%s", goos, goarch)
|
||||
output := filepath.Join(dir, "rclone")
|
||||
if goos == "windows" {
|
||||
@@ -191,7 +197,11 @@ func compileArch(version, goos, goarch, dir string) {
|
||||
if flags, ok := archFlags[goarch]; ok {
|
||||
env = append(env, flags...)
|
||||
}
|
||||
runEnv(args, env)
|
||||
err = runEnv(args, env)
|
||||
if err != nil {
|
||||
log.Printf("Error compiling %s/%s: %v", goos, goarch, err)
|
||||
return false
|
||||
}
|
||||
if !*compileOnly {
|
||||
artifacts := []string{buildZip(dir)}
|
||||
// build a .deb and .rpm if appropriate
|
||||
@@ -207,6 +217,7 @@ func compileArch(version, goos, goarch, dir string) {
|
||||
run("rm", "-rf", dir)
|
||||
}
|
||||
log.Printf("Done compiling %s/%s", goos, goarch)
|
||||
return true
|
||||
}
|
||||
|
||||
func compile(version string) {
|
||||
@@ -231,6 +242,8 @@ func compile(version string) {
|
||||
log.Fatalf("Bad -exclude regexp: %v", err)
|
||||
}
|
||||
compiled := 0
|
||||
var failuresMu sync.Mutex
|
||||
var failures []string
|
||||
for _, osarch := range osarches {
|
||||
if excludeRe.MatchString(osarch) || !includeRe.MatchString(osarch) {
|
||||
continue
|
||||
@@ -246,13 +259,22 @@ func compile(version string) {
|
||||
}
|
||||
dir := filepath.Join("rclone-" + version + "-" + userGoos + "-" + goarch)
|
||||
run <- func() {
|
||||
compileArch(version, goos, goarch, dir)
|
||||
if !compileArch(version, goos, goarch, dir) {
|
||||
failuresMu.Lock()
|
||||
failures = append(failures, goos+"/"+goarch)
|
||||
failuresMu.Unlock()
|
||||
}
|
||||
}
|
||||
compiled++
|
||||
}
|
||||
close(run)
|
||||
wg.Wait()
|
||||
log.Printf("Compiled %d arches in %v", compiled, time.Since(start))
|
||||
if len(failures) > 0 {
|
||||
sort.Strings(failures)
|
||||
log.Printf("%d compile failures:\n %s\n", len(failures), strings.Join(failures, "\n "))
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
@@ -245,7 +245,7 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
|
||||
// If file exists then srcFileName != "", however if the file
|
||||
// doesn't exist then we assume it is a directory...
|
||||
if srcFileName != "" {
|
||||
dstRemote, dstFileName = fspath.RemoteSplit(dstRemote)
|
||||
dstRemote, dstFileName = fspath.Split(dstRemote)
|
||||
if dstRemote == "" {
|
||||
dstRemote = "."
|
||||
}
|
||||
@@ -268,7 +268,7 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
|
||||
|
||||
// NewFsDstFile creates a new dst fs with a destination file name from the arguments
|
||||
func NewFsDstFile(args []string) (fdst fs.Fs, dstFileName string) {
|
||||
dstRemote, dstFileName := fspath.RemoteSplit(args[0])
|
||||
dstRemote, dstFileName := fspath.Split(args[0])
|
||||
if dstRemote == "" {
|
||||
dstRemote = "."
|
||||
}
|
||||
|
||||
@@ -14,9 +14,9 @@ func init() {
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "deletefile remote:path",
|
||||
Short: `Remove a single file path from remote.`,
|
||||
Short: `Remove a single file from remote.`,
|
||||
Long: `
|
||||
Remove a single file path from remote. Unlike ` + "`" + `delete` + "`" + ` it cannot be used to
|
||||
Remove a single file from remote. Unlike ` + "`" + `delete` + "`" + ` it cannot be used to
|
||||
remove a directory and it doesn't obey include/exclude filters - if the specified file exists,
|
||||
it will always be removed.
|
||||
`,
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context" // switch to "context" when we stop supporting go1.8
|
||||
|
||||
"golang.org/x/net/webdav"
|
||||
)
|
||||
|
||||
|
||||
@@ -169,3 +169,5 @@ Contributors
|
||||
* Kasper Byrdal Nielsen <byrdal76@gmail.com>
|
||||
* Benjamin Joseph Dag <bjdag1234@users.noreply.github.com>
|
||||
* themylogin <themylogin@gmail.com>
|
||||
* Onno Zweers <onno.zweers@surfsara.nl>
|
||||
* Jasper Lievisse Adriaanse <jasper@humppa.nl>
|
||||
|
||||
@@ -117,6 +117,34 @@ MD5 hashes are stored with blobs. However blobs that were uploaded in
|
||||
chunks only have an MD5 if the source remote was capable of MD5
|
||||
hashes, eg the local disk.
|
||||
|
||||
### Authenticating with Azure Blob Storage
|
||||
|
||||
Rclone has 3 ways of authenticating with Azure Blob Storage:
|
||||
|
||||
#### Account and Key
|
||||
|
||||
This is the most straight forward and least flexible way. Just fill in the `account` and `key` lines and leave the rest blank.
|
||||
|
||||
#### SAS URL
|
||||
|
||||
This can be an account level SAS URL or container level SAS URL
|
||||
|
||||
To use it leave `account`, `key` blank and fill in `sas_url`.
|
||||
|
||||
Account level SAS URL or container level SAS URL can be obtained from Azure portal or Azure Storage Explorer.
|
||||
To get a container level SAS URL right click on a container in the Azure Blob explorer in the Azure portal.
|
||||
|
||||
If You use container level SAS URL, rclone operations are permitted only on particular container, eg
|
||||
|
||||
rclone ls azureblob:container
|
||||
|
||||
However these will not work
|
||||
|
||||
rclone lsd azureblob:
|
||||
rclone ls azureblob:othercontainer
|
||||
|
||||
This would be useful for temporarily allowing third parties access to a single container or putting credentials into an untrusted environment.
|
||||
|
||||
### Multipart uploads ###
|
||||
|
||||
Rclone supports multipart uploads with Azure Blob storage. Files
|
||||
|
||||
@@ -279,19 +279,40 @@ For example, to limit bandwidth usage to 10 MBytes/s use `--bwlimit 10M`
|
||||
|
||||
It is also possible to specify a "timetable" of limits, which will cause
|
||||
certain limits to be applied at certain times. To specify a timetable, format your
|
||||
entries as "HH:MM,BANDWIDTH HH:MM,BANDWIDTH...".
|
||||
entries as "WEEKDAY-HH:MM,BANDWIDTH WEEKDAY-HH:MM,BANDWIDTH..." where:
|
||||
WEEKDAY is optional element.
|
||||
It could be writen as whole world or only using 3 first characters.
|
||||
HH:MM is an hour from 00:00 to 23:59.
|
||||
|
||||
An example of a typical timetable to avoid link saturation during daytime
|
||||
working hours could be:
|
||||
|
||||
`--bwlimit "08:00,512 12:00,10M 13:00,512 18:00,30M 23:00,off"`
|
||||
|
||||
In this example, the transfer bandwidth will be set to 512kBytes/sec at 8am.
|
||||
In this example, the transfer bandwidth will be every day set to 512kBytes/sec at 8am.
|
||||
At noon, it will raise to 10Mbytes/s, and drop back to 512kBytes/sec at 1pm.
|
||||
At 6pm, the bandwidth limit will be set to 30MBytes/s, and at 11pm it will be
|
||||
completely disabled (full speed). Anything between 11pm and 8am will remain
|
||||
unlimited.
|
||||
|
||||
An example of timetable with WEEKDAY could be:
|
||||
|
||||
`--bwlimit "Mon-00:00,512 Fri-23:59,10M Sat-10:00,1M Sun-20:00,off"`
|
||||
|
||||
It mean that, the transfer bandwidh will be set to 512kBytes/sec on Monday.
|
||||
It will raise to 10Mbytes/s before the end of Friday.
|
||||
At 10:00 on Sunday it will be set to 1Mbyte/s.
|
||||
From 20:00 at Sunday will be unlimited.
|
||||
|
||||
Timeslots without weekday are extended to whole week.
|
||||
So this one example:
|
||||
|
||||
`--bwlimit "Mon-00:00,512 12:00,1M Sun-20:00,off"`
|
||||
|
||||
Is equal to this:
|
||||
|
||||
`--bwlimit "Mon-00:00,512Mon-12:00,1M Tue-12:00,1M Wed-12:00,1M Thu-12:00,1M Fri-12:00,1M Sat-12:00,1M Sun-12:00,1M Sun-20:00,off"`
|
||||
|
||||
Bandwidth limits only apply to the data transfer. They don't apply to the
|
||||
bandwidth of the directory listings etc.
|
||||
|
||||
|
||||
@@ -437,6 +437,10 @@ See rclone issue [#2243](https://github.com/ncw/rclone/issues/2243) for backgrou
|
||||
|
||||
When using a service account, this instructs rclone to impersonate the user passed in.
|
||||
|
||||
#### --drive-keep-revision-forever ####
|
||||
|
||||
Keeps new head revision of the file forever.
|
||||
|
||||
#### --drive-list-chunk int ####
|
||||
|
||||
Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
|
||||
@@ -30,53 +30,17 @@ n/s/q> n
|
||||
name> remote
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
2 / Amazon S3 (also Dreamhost, Ceph, Minio)
|
||||
\ "s3"
|
||||
3 / Backblaze B2
|
||||
\ "b2"
|
||||
4 / Box
|
||||
\ "box"
|
||||
5 / Dropbox
|
||||
\ "dropbox"
|
||||
6 / Encrypt/Decrypt a remote
|
||||
\ "crypt"
|
||||
7 / FTP Connection
|
||||
\ "ftp"
|
||||
8 / Google Cloud Storage (this is not Google Drive)
|
||||
\ "google cloud storage"
|
||||
9 / Google Drive
|
||||
\ "drive"
|
||||
10 / Hubic
|
||||
\ "hubic"
|
||||
11 / Local Disk
|
||||
\ "local"
|
||||
12 / Microsoft Azure Blob Storage
|
||||
\ "azureblob"
|
||||
13 / Microsoft OneDrive
|
||||
\ "onedrive"
|
||||
14 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
|
||||
\ "swift"
|
||||
15 / Pcloud
|
||||
\ "pcloud"
|
||||
16 / QingCloud Object Storage
|
||||
\ "qingstor"
|
||||
17 / SSH/SFTP Connection
|
||||
\ "sftp"
|
||||
18 / WebDAV
|
||||
[snip]
|
||||
22 / Webdav
|
||||
\ "webdav"
|
||||
19 / Yandex Disk
|
||||
\ "yandex"
|
||||
20 / http Connection
|
||||
\ "http"
|
||||
[snip]
|
||||
Storage> webdav
|
||||
URL of http host to connect to
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Connect to example.com
|
||||
\ "https://example.com"
|
||||
url> https://example.com/remote.php/webdav/
|
||||
Name of the WebDAV site/service/software you are using
|
||||
Name of the Webdav site/service/software you are using
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Nextcloud
|
||||
\ "nextcloud"
|
||||
@@ -98,13 +62,17 @@ Enter the password:
|
||||
password:
|
||||
Confirm the password:
|
||||
password:
|
||||
Bearer token instead of user/pass (eg a Macaroon)
|
||||
bearer_token>
|
||||
Remote config
|
||||
--------------------
|
||||
[remote]
|
||||
type = webdav
|
||||
url = https://example.com/remote.php/webdav/
|
||||
vendor = nextcloud
|
||||
user = user
|
||||
pass = *** ENCRYPTED ***
|
||||
bearer_token =
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
@@ -133,6 +101,10 @@ Owncloud or Nextcloud rclone will support modified times.
|
||||
|
||||
Hashes are not supported.
|
||||
|
||||
## Provider notes ##
|
||||
|
||||
See below for notes on specific providers.
|
||||
|
||||
### Owncloud ###
|
||||
|
||||
Click on the settings cog in the bottom right of the page and this
|
||||
@@ -149,7 +121,7 @@ Owncloud does. This [may be
|
||||
fixed](https://github.com/nextcloud/nextcloud-snap/issues/365) in the
|
||||
future.
|
||||
|
||||
## Put.io ##
|
||||
### Put.io ###
|
||||
|
||||
put.io can be accessed in a read only way using webdav.
|
||||
|
||||
@@ -174,9 +146,9 @@ mount.
|
||||
|
||||
For more help see [the put.io webdav docs](http://help.put.io/apps-and-integrations/ftp-and-webdav).
|
||||
|
||||
## Sharepoint ##
|
||||
### Sharepoint ###
|
||||
|
||||
Can be used with Sharepoint provided by OneDrive for Business
|
||||
Rclone can be used with Sharepoint provided by OneDrive for Business
|
||||
or Office365 Education Accounts.
|
||||
This feature is only needed for a few of these Accounts,
|
||||
mostly Office365 Education ones. These accounts are sometimes not
|
||||
@@ -213,4 +185,27 @@ url = https://[YOUR-DOMAIN]-my.sharepoint.com/personal/[YOUR-EMAIL]/Documents
|
||||
vendor = other
|
||||
user = YourEmailAddress
|
||||
pass = encryptedpassword
|
||||
```
|
||||
```
|
||||
|
||||
### dCache ###
|
||||
|
||||
dCache is a storage system with WebDAV doors that support, beside basic and x509,
|
||||
authentication with [Macaroons](https://www.dcache.org/manuals/workshop-2017-05-29-Umea/000-Final/anupam_macaroons_v02.pdf) (bearer tokens).
|
||||
|
||||
Configure as normal using the `other` type. Don't enter a username or
|
||||
password, instead enter your Macaroon as the `bearer_token`.
|
||||
|
||||
The config will end up looking something like this.
|
||||
|
||||
```
|
||||
[dcache]
|
||||
type = webdav
|
||||
url = https://dcache...
|
||||
vendor = other
|
||||
user =
|
||||
pass =
|
||||
bearer_token = your-macaroon
|
||||
```
|
||||
|
||||
There is a [script](https://github.com/onnozweers/dcache-scripts/blob/master/get-share-link) that
|
||||
obtains a Macaroon from a dCache WebDAV endpoint, and creates an rclone config file.
|
||||
|
||||
@@ -11,8 +11,9 @@ import (
|
||||
|
||||
// BwTimeSlot represents a bandwidth configuration at a point in time.
|
||||
type BwTimeSlot struct {
|
||||
HHMM int
|
||||
Bandwidth SizeSuffix
|
||||
DayOfTheWeek int
|
||||
HHMM int
|
||||
Bandwidth SizeSuffix
|
||||
}
|
||||
|
||||
// BwTimetable contains all configured time slots.
|
||||
@@ -22,15 +23,64 @@ type BwTimetable []BwTimeSlot
|
||||
func (x BwTimetable) String() string {
|
||||
ret := []string{}
|
||||
for _, ts := range x {
|
||||
ret = append(ret, fmt.Sprintf("%04.4d,%s", ts.HHMM, ts.Bandwidth.String()))
|
||||
ret = append(ret, fmt.Sprintf("%s-%04.4d,%s", time.Weekday(ts.DayOfTheWeek), ts.HHMM, ts.Bandwidth.String()))
|
||||
}
|
||||
return strings.Join(ret, " ")
|
||||
}
|
||||
|
||||
// Basic hour format checking
|
||||
func validateHour(HHMM string) error {
|
||||
if len(HHMM) != 5 {
|
||||
return errors.Errorf("invalid time specification (hh:mm): %q", HHMM)
|
||||
}
|
||||
hh, err := strconv.Atoi(HHMM[0:2])
|
||||
if err != nil {
|
||||
return errors.Errorf("invalid hour in time specification %q: %v", HHMM, err)
|
||||
}
|
||||
if hh < 0 || hh > 23 {
|
||||
return errors.Errorf("invalid hour (must be between 00 and 23): %q", hh)
|
||||
}
|
||||
mm, err := strconv.Atoi(HHMM[3:])
|
||||
if err != nil {
|
||||
return errors.Errorf("invalid minute in time specification: %q: %v", HHMM, err)
|
||||
}
|
||||
if mm < 0 || mm > 59 {
|
||||
return errors.Errorf("invalid minute (must be between 00 and 59): %q", hh)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Basic weekday format checking
|
||||
func parseWeekday(dayOfWeek string) (int, error) {
|
||||
dayOfWeek = strings.ToLower(dayOfWeek)
|
||||
if dayOfWeek == "sun" || dayOfWeek == "sunday" {
|
||||
return 0, nil
|
||||
}
|
||||
if dayOfWeek == "mon" || dayOfWeek == "monday" {
|
||||
return 1, nil
|
||||
}
|
||||
if dayOfWeek == "tue" || dayOfWeek == "tuesday" {
|
||||
return 2, nil
|
||||
}
|
||||
if dayOfWeek == "wed" || dayOfWeek == "wednesday" {
|
||||
return 3, nil
|
||||
}
|
||||
if dayOfWeek == "thu" || dayOfWeek == "thursday" {
|
||||
return 4, nil
|
||||
}
|
||||
if dayOfWeek == "fri" || dayOfWeek == "friday" {
|
||||
return 5, nil
|
||||
}
|
||||
if dayOfWeek == "sat" || dayOfWeek == "saturday" {
|
||||
return 6, nil
|
||||
}
|
||||
return 0, errors.Errorf("invalid weekday: %q", dayOfWeek)
|
||||
}
|
||||
|
||||
// Set the bandwidth timetable.
|
||||
func (x *BwTimetable) Set(s string) error {
|
||||
// The timetable is formatted as:
|
||||
// "hh:mm,bandwidth hh:mm,banwidth..." ex: "10:00,10G 11:30,1G 18:00,off"
|
||||
// "dayOfWeek-hh:mm,bandwidth dayOfWeek-hh:mm,banwidth..." ex: "Mon-10:00,10G Mon-11:30,1G Tue-18:00,off"
|
||||
// If only a single bandwidth identifier is provided, we assume constant bandwidth.
|
||||
|
||||
if len(s) == 0 {
|
||||
@@ -42,6 +92,7 @@ func (x *BwTimetable) Set(s string) error {
|
||||
if err := ts.Bandwidth.Set(s); err != nil {
|
||||
return err
|
||||
}
|
||||
ts.DayOfTheWeek = 0
|
||||
ts.HHMM = 0
|
||||
*x = BwTimetable{ts}
|
||||
return nil
|
||||
@@ -50,69 +101,100 @@ func (x *BwTimetable) Set(s string) error {
|
||||
for _, tok := range strings.Split(s, " ") {
|
||||
tv := strings.Split(tok, ",")
|
||||
|
||||
// Format must be HH:MM,BW
|
||||
// Format must be dayOfWeek-HH:MM,BW
|
||||
if len(tv) != 2 {
|
||||
return errors.Errorf("invalid time/bandwidth specification: %q", tok)
|
||||
}
|
||||
|
||||
// Basic timespec sanity checking
|
||||
HHMM := tv[0]
|
||||
if len(HHMM) != 5 {
|
||||
return errors.Errorf("invalid time specification (hh:mm): %q", HHMM)
|
||||
}
|
||||
hh, err := strconv.Atoi(HHMM[0:2])
|
||||
if err != nil {
|
||||
return errors.Errorf("invalid hour in time specification %q: %v", HHMM, err)
|
||||
}
|
||||
if hh < 0 || hh > 23 {
|
||||
return errors.Errorf("invalid hour (must be between 00 and 23): %q", hh)
|
||||
}
|
||||
mm, err := strconv.Atoi(HHMM[3:])
|
||||
if err != nil {
|
||||
return errors.Errorf("invalid minute in time specification: %q: %v", HHMM, err)
|
||||
}
|
||||
if mm < 0 || mm > 59 {
|
||||
return errors.Errorf("invalid minute (must be between 00 and 59): %q", hh)
|
||||
}
|
||||
weekday := 0
|
||||
HHMM := ""
|
||||
if !strings.Contains(tv[0], "-") {
|
||||
HHMM = tv[0]
|
||||
if err := validateHour(HHMM); err != nil {
|
||||
return err
|
||||
}
|
||||
for i := 0; i < 7; i++ {
|
||||
hh, _ := strconv.Atoi(HHMM[0:2])
|
||||
mm, _ := strconv.Atoi(HHMM[3:])
|
||||
ts := BwTimeSlot{
|
||||
DayOfTheWeek: i,
|
||||
HHMM: (hh * 100) + mm,
|
||||
}
|
||||
if err := ts.Bandwidth.Set(tv[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
*x = append(*x, ts)
|
||||
}
|
||||
} else {
|
||||
timespec := strings.Split(tv[0], "-")
|
||||
if len(timespec) != 2 {
|
||||
return errors.Errorf("invalid time specification: %q", tv[0])
|
||||
}
|
||||
var err error
|
||||
weekday, err = parseWeekday(timespec[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
HHMM = timespec[1]
|
||||
if err := validateHour(HHMM); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ts := BwTimeSlot{
|
||||
HHMM: (hh * 100) + mm,
|
||||
hh, _ := strconv.Atoi(HHMM[0:2])
|
||||
mm, _ := strconv.Atoi(HHMM[3:])
|
||||
ts := BwTimeSlot{
|
||||
DayOfTheWeek: weekday,
|
||||
HHMM: (hh * 100) + mm,
|
||||
}
|
||||
// Bandwidth limit for this time slot.
|
||||
if err := ts.Bandwidth.Set(tv[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
*x = append(*x, ts)
|
||||
}
|
||||
// Bandwidth limit for this time slot.
|
||||
if err := ts.Bandwidth.Set(tv[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
*x = append(*x, ts)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Difference in minutes between lateDayOfWeekHHMM and earlyDayOfWeekHHMM
|
||||
func timeDiff(lateDayOfWeekHHMM int, earlyDayOfWeekHHMM int) int {
|
||||
|
||||
lateTimeMinutes := (lateDayOfWeekHHMM / 10000) * 24 * 60
|
||||
lateTimeMinutes += ((lateDayOfWeekHHMM / 100) % 100) * 60
|
||||
lateTimeMinutes += lateDayOfWeekHHMM % 100
|
||||
|
||||
earlyTimeMinutes := (earlyDayOfWeekHHMM / 10000) * 24 * 60
|
||||
earlyTimeMinutes += ((earlyDayOfWeekHHMM / 100) % 100) * 60
|
||||
earlyTimeMinutes += earlyDayOfWeekHHMM % 100
|
||||
|
||||
return lateTimeMinutes - earlyTimeMinutes
|
||||
}
|
||||
|
||||
// LimitAt returns a BwTimeSlot for the time requested.
|
||||
func (x BwTimetable) LimitAt(tt time.Time) BwTimeSlot {
|
||||
// If the timetable is empty, we return an unlimited BwTimeSlot starting at midnight.
|
||||
// If the timetable is empty, we return an unlimited BwTimeSlot starting at Sunday midnight.
|
||||
if len(x) == 0 {
|
||||
return BwTimeSlot{HHMM: 0, Bandwidth: -1}
|
||||
return BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: -1}
|
||||
}
|
||||
|
||||
HHMM := tt.Hour()*100 + tt.Minute()
|
||||
dayOfWeekHHMM := int(tt.Weekday())*10000 + tt.Hour()*100 + tt.Minute()
|
||||
|
||||
// By default, we return the last element in the timetable. This
|
||||
// satisfies two conditions: 1) If there's only one element it
|
||||
// will always be selected, and 2) The last element of the table
|
||||
// will "wrap around" until overriden by an earlier time slot.
|
||||
// will "wrap around" until overridden by an earlier time slot.
|
||||
// there's only one time slot in the timetable.
|
||||
ret := x[len(x)-1]
|
||||
|
||||
mindif := 0
|
||||
first := true
|
||||
|
||||
// Look for most recent time slot.
|
||||
for _, ts := range x {
|
||||
// Ignore the past
|
||||
if HHMM < ts.HHMM {
|
||||
if dayOfWeekHHMM < (ts.DayOfTheWeek*10000)+ts.HHMM {
|
||||
continue
|
||||
}
|
||||
dif := ((HHMM / 100 * 60) + (HHMM % 100)) - ((ts.HHMM / 100 * 60) + (ts.HHMM % 100))
|
||||
dif := timeDiff(dayOfWeekHHMM, (ts.DayOfTheWeek*10000)+ts.HHMM)
|
||||
if first {
|
||||
mindif = dif
|
||||
first = false
|
||||
|
||||
@@ -19,25 +19,104 @@ func TestBwTimetableSet(t *testing.T) {
|
||||
err bool
|
||||
}{
|
||||
{"", BwTimetable{}, true},
|
||||
{"0", BwTimetable{BwTimeSlot{HHMM: 0, Bandwidth: 0}}, false},
|
||||
{"666", BwTimetable{BwTimeSlot{HHMM: 0, Bandwidth: 666 * 1024}}, false},
|
||||
{"10:20,666", BwTimetable{BwTimeSlot{HHMM: 1020, Bandwidth: 666 * 1024}}, false},
|
||||
{
|
||||
"11:00,333 13:40,666 23:50,10M 23:59,off",
|
||||
BwTimetable{
|
||||
BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{HHMM: 2350, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{HHMM: 2359, Bandwidth: -1},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{"bad,bad", BwTimetable{}, true},
|
||||
{"bad bad", BwTimetable{}, true},
|
||||
{"bad", BwTimetable{}, true},
|
||||
{"1000X", BwTimetable{}, true},
|
||||
{"2401,666", BwTimetable{}, true},
|
||||
{"1061,666", BwTimetable{}, true},
|
||||
{"bad-10:20,666", BwTimetable{}, true},
|
||||
{"Mon-bad,666", BwTimetable{}, true},
|
||||
{"Mon-10:20,bad", BwTimetable{}, true},
|
||||
{
|
||||
"0",
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: 0},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"666",
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: 666 * 1024},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"10:20,666",
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1020, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1020, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1020, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1020, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1020, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1020, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1020, Bandwidth: 666 * 1024},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"11:00,333 13:40,666 23:50,10M 23:59,off",
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2350, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2350, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2350, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2350, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2350, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2350, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2350, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2359, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2359, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2359, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2359, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2359, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2359, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2359, Bandwidth: -1},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"Mon-11:00,333 Tue-13:40,666 Fri-00:00,10M Sat-10:00,off Sun-23:00,666",
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 0000, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1000, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: 666 * 1024},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"Mon-11:00,333 Tue-13:40,666 Fri-00:00,10M 00:01,off Sun-23:00,666",
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 0000, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: 666 * 1024},
|
||||
},
|
||||
false,
|
||||
},
|
||||
} {
|
||||
tt := BwTimetable{}
|
||||
err := tt.Set(test.in)
|
||||
@@ -59,52 +138,189 @@ func TestBwTimetableLimitAt(t *testing.T) {
|
||||
{
|
||||
BwTimetable{},
|
||||
time.Date(2017, time.April, 20, 15, 0, 0, 0, time.UTC),
|
||||
BwTimeSlot{HHMM: 0, Bandwidth: -1},
|
||||
},
|
||||
{
|
||||
BwTimetable{BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024}},
|
||||
time.Date(2017, time.April, 20, 15, 0, 0, 0, time.UTC),
|
||||
BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: -1},
|
||||
},
|
||||
{
|
||||
BwTimetable{
|
||||
BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
},
|
||||
time.Date(2017, time.April, 20, 15, 0, 0, 0, time.UTC),
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
},
|
||||
{
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2350, Bandwidth: -1},
|
||||
},
|
||||
time.Date(2017, time.April, 20, 10, 15, 0, 0, time.UTC),
|
||||
BwTimeSlot{HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2350, Bandwidth: -1},
|
||||
},
|
||||
{
|
||||
BwTimetable{
|
||||
BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2350, Bandwidth: -1},
|
||||
},
|
||||
time.Date(2017, time.April, 20, 11, 0, 0, 0, time.UTC),
|
||||
BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
},
|
||||
{
|
||||
BwTimetable{
|
||||
BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2350, Bandwidth: -1},
|
||||
},
|
||||
time.Date(2017, time.April, 20, 13, 1, 0, 0, time.UTC),
|
||||
BwTimeSlot{HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
},
|
||||
{
|
||||
BwTimetable{
|
||||
BwTimeSlot{HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1300, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2301, Bandwidth: 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 3, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 2350, Bandwidth: -1},
|
||||
},
|
||||
time.Date(2017, time.April, 20, 23, 59, 0, 0, time.UTC),
|
||||
BwTimeSlot{HHMM: 2350, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 4, HHMM: 2350, Bandwidth: -1},
|
||||
},
|
||||
{
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 0000, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1000, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: 666 * 1024},
|
||||
},
|
||||
time.Date(2017, time.April, 20, 23, 59, 0, 0, time.UTC),
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
},
|
||||
{
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 0000, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1000, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: 666 * 1024},
|
||||
},
|
||||
time.Date(2017, time.April, 21, 23, 59, 0, 0, time.UTC),
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 0000, Bandwidth: 10 * 1024 * 1024},
|
||||
},
|
||||
{
|
||||
BwTimetable{
|
||||
BwTimeSlot{DayOfTheWeek: 1, HHMM: 1100, Bandwidth: 333 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 2, HHMM: 1340, Bandwidth: 666 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 5, HHMM: 0000, Bandwidth: 10 * 1024 * 1024},
|
||||
BwTimeSlot{DayOfTheWeek: 6, HHMM: 1000, Bandwidth: -1},
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: 666 * 1024},
|
||||
},
|
||||
time.Date(2017, time.April, 17, 10, 59, 0, 0, time.UTC),
|
||||
BwTimeSlot{DayOfTheWeek: 0, HHMM: 2300, Bandwidth: 666 * 1024},
|
||||
},
|
||||
} {
|
||||
slot := test.tt.LimitAt(test.now)
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/driveletter"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
@@ -865,12 +866,12 @@ func NewRemoteName() (name string) {
|
||||
for {
|
||||
fmt.Printf("name> ")
|
||||
name = ReadLine()
|
||||
parts := fs.Matcher.FindStringSubmatch(name + ":")
|
||||
parts := fspath.Matcher.FindStringSubmatch(name + ":")
|
||||
switch {
|
||||
case name == "":
|
||||
fmt.Printf("Can't use empty name.\n")
|
||||
case driveletter.IsDriveLetter(name):
|
||||
fmt.Printf("Can't use %q as it can be confused a drive letter.\n", name)
|
||||
fmt.Printf("Can't use %q as it can be confused with a drive letter.\n", name)
|
||||
case parts == nil:
|
||||
fmt.Printf("Can't use %q as it has invalid characters in it.\n", name)
|
||||
default:
|
||||
|
||||
@@ -16,7 +16,11 @@ import (
|
||||
|
||||
// ReadPassword reads a password without echoing it to the terminal.
|
||||
func ReadPassword() string {
|
||||
line, err := terminal.ReadPassword(int(os.Stdin.Fd()))
|
||||
stdin := int(os.Stdin.Fd())
|
||||
if !terminal.IsTerminal(stdin) {
|
||||
return ReadLine()
|
||||
}
|
||||
line, err := terminal.ReadPassword(stdin)
|
||||
_, _ = fmt.Fprintln(os.Stderr)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read password: %v", err)
|
||||
|
||||
@@ -173,7 +173,7 @@ func NewFilter(opt *Opt) (f *Filter, err error) {
|
||||
}
|
||||
|
||||
if addImplicitExclude && foundExcludeRule {
|
||||
fs.Infof(nil, "Using --filter is recommended instead of both --include and --exclude as the order they are parsed in is indeterminate")
|
||||
fs.Errorf(nil, "Using --filter is recommended instead of both --include and --exclude as the order they are parsed in is indeterminate")
|
||||
}
|
||||
|
||||
for _, rule := range f.Opt.FilterRule {
|
||||
|
||||
17
fs/fs.go
17
fs/fs.go
@@ -9,12 +9,11 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs/driveletter"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -786,24 +785,20 @@ func MustFind(name string) *RegInfo {
|
||||
return fs
|
||||
}
|
||||
|
||||
// Matcher is a pattern to match an rclone URL
|
||||
var Matcher = regexp.MustCompile(`^([\w_ -]+):(.*)$`)
|
||||
|
||||
// ParseRemote deconstructs a path into configName, fsPath, looking up
|
||||
// the fsName in the config file (returning NotFoundInConfigFile if not found)
|
||||
func ParseRemote(path string) (fsInfo *RegInfo, configName, fsPath string, err error) {
|
||||
parts := Matcher.FindStringSubmatch(path)
|
||||
configName, fsPath = fspath.Parse(path)
|
||||
var fsName string
|
||||
fsName, configName, fsPath = "local", "local", path
|
||||
if parts != nil && !driveletter.IsDriveLetter(parts[1]) {
|
||||
configName, fsPath = parts[1], parts[2]
|
||||
if configName != "" {
|
||||
fsName = ConfigFileGet(configName, "type")
|
||||
if fsName == "" {
|
||||
return nil, "", "", ErrorNotFoundInConfigFile
|
||||
}
|
||||
} else {
|
||||
fsName = "local"
|
||||
configName = "local"
|
||||
}
|
||||
// change native directory separators to / if there are any
|
||||
fsPath = filepath.ToSlash(fsPath)
|
||||
fsInfo, err = Find(fsName)
|
||||
return fsInfo, configName, fsPath, err
|
||||
}
|
||||
|
||||
@@ -252,6 +252,10 @@ func ShouldRetry(err error) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// FIXME Handle this correctly, perhaps Cause should not ever return nil?
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
// Check if it is a retriable error
|
||||
for _, retriableErr := range retriableErrors {
|
||||
if err == retriableErr {
|
||||
|
||||
@@ -3,27 +3,46 @@ package fspath
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
"github.com/ncw/rclone/fs/driveletter"
|
||||
)
|
||||
|
||||
// RemoteSplit splits a remote into a parent and a leaf
|
||||
// Matcher is a pattern to match an rclone URL
|
||||
var Matcher = regexp.MustCompile(`^([\w_ -]+):(.*)$`)
|
||||
|
||||
// Parse deconstructs a remote path into configName and fsPath
|
||||
//
|
||||
// If the path is a local path then configName will be returned as "".
|
||||
//
|
||||
// So "remote:path/to/dir" will return "remote", "path/to/dir"
|
||||
// and "/path/to/local" will return ("", "/path/to/local")
|
||||
//
|
||||
// Note that this will turn \ into / in the fsPath on Windows
|
||||
func Parse(path string) (configName, fsPath string) {
|
||||
parts := Matcher.FindStringSubmatch(path)
|
||||
configName, fsPath = "", path
|
||||
if parts != nil && !driveletter.IsDriveLetter(parts[1]) {
|
||||
configName, fsPath = parts[1], parts[2]
|
||||
}
|
||||
// change native directory separators to / if there are any
|
||||
fsPath = filepath.ToSlash(fsPath)
|
||||
return configName, fsPath
|
||||
}
|
||||
|
||||
// Split splits a remote into a parent and a leaf
|
||||
//
|
||||
// if it returns leaf as an empty string then remote is a directory
|
||||
//
|
||||
// if it returns parent as an empty string then that means the current directory
|
||||
//
|
||||
// The returned values have the property that parent + leaf == remote
|
||||
func RemoteSplit(remote string) (parent string, leaf string) {
|
||||
// Split remote on :
|
||||
i := strings.Index(remote, ":")
|
||||
remoteName := ""
|
||||
remotePath := remote
|
||||
if i >= 0 {
|
||||
remoteName = remote[:i+1]
|
||||
remotePath = remote[i+1:]
|
||||
} else if strings.HasSuffix(remotePath, "/") {
|
||||
// if no : and ends with / must be directory
|
||||
return remotePath, ""
|
||||
// (except under Windows where \ will be translated into /)
|
||||
func Split(remote string) (parent string, leaf string) {
|
||||
remoteName, remotePath := Parse(remote)
|
||||
if remoteName != "" {
|
||||
remoteName += ":"
|
||||
}
|
||||
// Construct new remote name without last segment
|
||||
parent, leaf = path.Split(remotePath)
|
||||
|
||||
@@ -7,8 +7,23 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRemoteSplit(t *testing.T) {
|
||||
func TestParse(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in, wantConfigName, wantFsPath string
|
||||
}{
|
||||
{"", "", ""},
|
||||
{"/path/to/file", "", "/path/to/file"},
|
||||
{"path/to/file", "", "path/to/file"},
|
||||
{"remote:path/to/file", "remote", "path/to/file"},
|
||||
{"remote:/path/to/file", "remote", "/path/to/file"},
|
||||
} {
|
||||
gotConfigName, gotFsPath := Parse(test.in)
|
||||
assert.Equal(t, test.wantConfigName, gotConfigName)
|
||||
assert.Equal(t, test.wantFsPath, gotFsPath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplit(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
remote, wantParent, wantLeaf string
|
||||
}{
|
||||
@@ -27,7 +42,7 @@ func TestRemoteSplit(t *testing.T) {
|
||||
{"root/", "root/", ""},
|
||||
{"a/b/", "a/b/", ""},
|
||||
} {
|
||||
gotParent, gotLeaf := RemoteSplit(test.remote)
|
||||
gotParent, gotLeaf := Split(test.remote)
|
||||
assert.Equal(t, test.wantParent, gotParent, test.remote)
|
||||
assert.Equal(t, test.wantLeaf, gotLeaf, test.remote)
|
||||
assert.Equal(t, test.remote, gotParent+gotLeaf, fmt.Sprintf("%s: %q + %q != %q", test.remote, gotParent, gotLeaf, test.remote))
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
_ "net/http/pprof" // install the pprof http handlers
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/cmd/serve/httplib"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package fs
|
||||
|
||||
// Version of rclone
|
||||
var Version = "v1.42"
|
||||
var Version = "v1.42-DEV"
|
||||
|
||||
@@ -143,6 +143,7 @@ type Opts struct {
|
||||
Parameters url.Values // any parameters for the final URL
|
||||
TransferEncoding []string // transfer encoding, set to "identity" to disable chunked encoding
|
||||
Close bool // set to close the connection after this transaction
|
||||
NoRedirect bool // if this is set then the client won't follow redirects
|
||||
}
|
||||
|
||||
// Copy creates a copy of the options
|
||||
@@ -189,6 +190,15 @@ func ClientWithHeaderReset(c *http.Client, headers map[string]string) *http.Clie
|
||||
return &clientCopy
|
||||
}
|
||||
|
||||
// ClientWithNoRedirects makes a new http client which won't follow redirects
|
||||
func ClientWithNoRedirects(c *http.Client) *http.Client {
|
||||
clientCopy := *c
|
||||
clientCopy.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
return &clientCopy
|
||||
}
|
||||
|
||||
// Call makes the call and returns the http.Response
|
||||
//
|
||||
// if err != nil then resp.Body will need to be closed
|
||||
@@ -252,7 +262,12 @@ func (api *Client) Call(opts *Opts) (resp *http.Response, err error) {
|
||||
if opts.UserName != "" || opts.Password != "" {
|
||||
req.SetBasicAuth(opts.UserName, opts.Password)
|
||||
}
|
||||
c := ClientWithHeaderReset(api.c, headers)
|
||||
var c *http.Client
|
||||
if opts.NoRedirect {
|
||||
c = ClientWithNoRedirects(api.c)
|
||||
} else {
|
||||
c = ClientWithHeaderReset(api.c, headers)
|
||||
}
|
||||
if api.signer != nil {
|
||||
err = api.signer(req)
|
||||
if err != nil {
|
||||
|
||||
1
vendor/cloud.google.com/go/.travis.yml
generated
vendored
1
vendor/cloud.google.com/go/.travis.yml
generated
vendored
@@ -14,6 +14,7 @@ script:
|
||||
GCLOUD_TESTS_GOLANG_KEY="$(pwd)/dulcet-port-762-key.json"
|
||||
GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID="gcloud-golang-firestore-tests"
|
||||
GCLOUD_TESTS_GOLANG_FIRESTORE_KEY="$(pwd)/gcloud-golang-firestore-tests-key.json"
|
||||
GCLOUD_TESTS_GOLANG_KEYRING="projects/dulcet-port-762/locations/us/keyRings/go-integration-test"
|
||||
./run-tests.sh $TRAVIS_COMMIT
|
||||
env:
|
||||
matrix:
|
||||
|
||||
37
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
37
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
@@ -31,11 +31,11 @@ To run the integrations tests, creating and configuration of a project in the
|
||||
Google Developers Console is required.
|
||||
|
||||
After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount).
|
||||
Ensure the project-level **Owner**
|
||||
Ensure the project-level **Owner**
|
||||
[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to the
|
||||
service account. Alternatively, the account can be granted all of the following roles:
|
||||
- **Editor**
|
||||
- **Logs Configuration Writer**
|
||||
- **Editor**
|
||||
- **Logs Configuration Writer**
|
||||
- **PubSub Admin**
|
||||
|
||||
Once you create a project, set the following environment variables to be able to
|
||||
@@ -43,13 +43,19 @@ run the against the actual APIs.
|
||||
|
||||
- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455)
|
||||
- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.
|
||||
- **GCLOUD_TESTS_API_KEY**: Your API key.
|
||||
|
||||
Firestore requires a different project and key:
|
||||
Some packages require additional environment variables to be set:
|
||||
|
||||
- **GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID**: Developers Console project's ID
|
||||
supporting Firestore
|
||||
- **GCLOUD_TESTS_GOLANG_FIRESTORE_KEY**: The path to the JSON key file.
|
||||
- firestore
|
||||
- **GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID**: project ID for Firestore.
|
||||
- **GCLOUD_TESTS_GOLANG_FIRESTORE_KEY**: The path to the JSON key file.
|
||||
- storage
|
||||
- **GCLOUD_TESTS_GOLANG_KEYRING**: The full name of the keyring for the tests, in the
|
||||
form "projects/P/locations/L/keyRings/R".
|
||||
- translate
|
||||
- **GCLOUD_TESTS_API_KEY**: API key for using the Translate API.
|
||||
- profiler
|
||||
- **GCLOUD_TESTS_GOLANG_ZONE**: Compute Engine zone.
|
||||
|
||||
Install the [gcloud command-line tool][gcloudcli] to your machine and use it
|
||||
to create some resources used in integration tests.
|
||||
@@ -80,11 +86,20 @@ $ gcloud beta spanner instances create go-integration-test --config regional-us-
|
||||
# NOTE: Spanner instances are priced by the node-hour, so you may want to delete
|
||||
# the instance after testing with 'gcloud beta spanner instances delete'.
|
||||
|
||||
|
||||
# For Storage integration tests:
|
||||
# Enable KMS for your project in the Cloud Console.
|
||||
# Create a KMS keyring, in the same location as the default location for your project's buckets.
|
||||
$ gcloud kms keyrings create MY_KEYRING --location MY_LOCATION
|
||||
# Create two keys in the keyring, named key1 and key2.
|
||||
$ gcloud kms keys create key1 --keyring MY_KEYRING --location MY_LOCATION --purpose encryption
|
||||
$ gcloud kms keys create key2 --keyring MY_KEYRING --location MY_LOCATION --purpose encryption
|
||||
# As mentioned above, set the GCLOUD_TESTS_GOLANG_KEYRING environment variable.
|
||||
$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/MY_LOCATION/keyRings/MY_KEYRING
|
||||
# Authorize Google Cloud Storage to encrypt and decrypt using key1.
|
||||
gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1
|
||||
```
|
||||
|
||||
Once you've set the environment variables, you can run the integration tests by
|
||||
running:
|
||||
Once you've done the necessary setup, you can run the integration tests by running:
|
||||
|
||||
``` sh
|
||||
$ go test -v cloud.google.com/go/...
|
||||
|
||||
67
vendor/cloud.google.com/go/README.md
generated
vendored
67
vendor/cloud.google.com/go/README.md
generated
vendored
@@ -33,6 +33,73 @@ make backwards-incompatible changes.
|
||||
|
||||
## News
|
||||
|
||||
_May 18, 2018_
|
||||
|
||||
*v0.23.0*
|
||||
|
||||
- bigquery: Add DDL stats to query statistics.
|
||||
- bigtable:
|
||||
- cbt: Add cells-per-column limit for row lookup.
|
||||
- cbt: Make it possible to combine read filters.
|
||||
- dlp: v2beta2 client removed. Use the v2 client instead.
|
||||
- firestore, spanner: Fix compilation errors due to protobuf changes.
|
||||
|
||||
_May 8, 2018_
|
||||
|
||||
*v0.22.0*
|
||||
|
||||
- bigtable:
|
||||
- cbt: Support cells per column limit for row read.
|
||||
- bttest: Correctly handle empty RowSet.
|
||||
- Fix ReadModifyWrite operation in emulator.
|
||||
- Fix API path in GetCluster.
|
||||
|
||||
- bigquery:
|
||||
- BEHAVIOR CHANGE: Retry on 503 status code.
|
||||
- Add dataset.DeleteWithContents.
|
||||
- Add SchemaUpdateOptions for query jobs.
|
||||
- Add Timeline to QueryStatistics.
|
||||
- Add more stats to ExplainQueryStage.
|
||||
- Support Parquet data format.
|
||||
|
||||
- datastore:
|
||||
- Support omitempty for times.
|
||||
|
||||
- dlp:
|
||||
- **BREAKING CHANGE:** Remove v1beta1 client. Please migrate to the v2 client,
|
||||
which is now out of beta.
|
||||
- Add v2 client.
|
||||
|
||||
- firestore:
|
||||
- BEHAVIOR CHANGE: Treat set({}, MergeAll) as valid.
|
||||
|
||||
- iam:
|
||||
- Support JWT signing via SignJwt callopt.
|
||||
|
||||
- profiler:
|
||||
- BEHAVIOR CHANGE: PollForSerialOutput returns an error when context.Done.
|
||||
- BEHAVIOR CHANGE: Increase the initial backoff to 1 minute.
|
||||
- Avoid returning empty serial port output.
|
||||
|
||||
- pubsub:
|
||||
- BEHAVIOR CHANGE: Don't backoff during next retryable error once stream is healthy.
|
||||
- BEHAVIOR CHANGE: Don't backoff on EOF.
|
||||
- pstest: Support Acknowledge and ModifyAckDeadline RPCs.
|
||||
|
||||
- redis:
|
||||
- Add v1 beta Redis client.
|
||||
|
||||
- spanner:
|
||||
- Support SessionLabels.
|
||||
|
||||
- speech:
|
||||
- Add api v1 beta1 client.
|
||||
|
||||
- storage:
|
||||
- BEHAVIOR CHANGE: Retry reads when retryable error occurs.
|
||||
- Fix delete of object in requester-pays bucket.
|
||||
- Support KMS integration.
|
||||
|
||||
_April 9, 2018_
|
||||
|
||||
*v0.21.0*
|
||||
|
||||
13
vendor/cloud.google.com/go/RELEASING.md
generated
vendored
Normal file
13
vendor/cloud.google.com/go/RELEASING.md
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# How to Release this Repo
|
||||
|
||||
1. Determine the current release version with `git tag -l`. It should look
|
||||
something like `vX.Y.Z`. We'll call the current
|
||||
version `$CV` and the new version `$NV`.
|
||||
1. On master, run `git log $CV..` to list all the changes since the last
|
||||
release.
|
||||
1. Edit the News section of `README.md` to include a summary of the changes.
|
||||
1. Mail the CL containing the `README.md` changes. When the CL is approved, submit it.
|
||||
1. Without submitting any other CLs:
|
||||
a. Switch to master.
|
||||
b. Tag the repo with the next version: `git tag $NV`.
|
||||
c. Push the tag: `git push origin $NV`.
|
||||
49
vendor/cloud.google.com/go/authexample_test.go
generated
vendored
49
vendor/cloud.google.com/go/authexample_test.go
generated
vendored
@@ -16,16 +16,18 @@ package cloud_test
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/datastore"
|
||||
"cloud.google.com/go/pubsub"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
// Google Application Default Credentials is the recommended way to authorize
|
||||
// and authenticate clients.
|
||||
//
|
||||
// For information on how to create and obtain Application Default Credentials, see
|
||||
// https://developers.google.com/identity/protocols/application-default-credentials.
|
||||
func Example_applicationDefaultCredentials() {
|
||||
// Google Application Default Credentials is the recommended way to authorize
|
||||
// and authenticate clients.
|
||||
//
|
||||
// See the following link on how to create and obtain Application Default Credentials:
|
||||
// https://developers.google.com/identity/protocols/application-default-credentials.
|
||||
client, err := datastore.NewClient(context.Background(), "project-id")
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
@@ -33,15 +35,36 @@ func Example_applicationDefaultCredentials() {
|
||||
_ = client // Use the client.
|
||||
}
|
||||
|
||||
func Example_serviceAccountFile() {
|
||||
// Use a JSON key file associated with a Google service account to
|
||||
// authenticate and authorize. Service Account keys can be created and
|
||||
// downloaded from https://console.developers.google.com/permissions/serviceaccounts.
|
||||
//
|
||||
// Note: This example uses the datastore client, but the same steps apply to
|
||||
// the other client libraries underneath this package.
|
||||
// You can use a file with credentials to authenticate and authorize, such as a JSON
|
||||
// key file associated with a Google service account. Service Account keys can be
|
||||
// created and downloaded from
|
||||
// https://console.developers.google.com/permissions/serviceaccounts.
|
||||
//
|
||||
// This example uses the Datastore client, but the same steps apply to
|
||||
// the other client libraries underneath this package.
|
||||
func Example_credentialsFile() {
|
||||
client, err := datastore.NewClient(context.Background(),
|
||||
"project-id", option.WithServiceAccountFile("/path/to/service-account-key.json"))
|
||||
"project-id", option.WithCredentialsFile("/path/to/service-account-key.json"))
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
_ = client // Use the client.
|
||||
}
|
||||
|
||||
// In some cases (for instance, you don't want to store secrets on disk), you can
|
||||
// create credentials from in-memory JSON and use the WithCredentials option.
|
||||
//
|
||||
// The google package in this example is at golang.org/x/oauth2/google.
|
||||
//
|
||||
// This example uses the PubSub client, but the same steps apply to
|
||||
// the other client libraries underneath this package.
|
||||
func Example_credentialsFromJSON() {
|
||||
ctx := context.Background()
|
||||
creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), pubsub.ScopePubSub)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
client, err := pubsub.NewClient(ctx, "project-id", option.WithCredentials(creds))
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
|
||||
7
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
7
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
@@ -147,7 +147,10 @@ func runWithRetry(ctx context.Context, call func() error) error {
|
||||
})
|
||||
}
|
||||
|
||||
// This is the correct definition of retryable according to the BigQuery team.
|
||||
// This is the correct definition of retryable according to the BigQuery team. It
|
||||
// also considers 502 ("Bad Gateway") and 503 ("Service Unavailable") errors
|
||||
// retryable; these are returned by systems between the client and the BigQuery
|
||||
// service.
|
||||
func retryableError(err error) bool {
|
||||
e, ok := err.(*googleapi.Error)
|
||||
if !ok {
|
||||
@@ -157,5 +160,5 @@ func retryableError(err error) bool {
|
||||
if len(e.Errors) > 0 {
|
||||
reason = e.Errors[0].Reason
|
||||
}
|
||||
return e.Code == http.StatusBadGateway || reason == "backendError" || reason == "rateLimitExceeded"
|
||||
return e.Code == http.StatusServiceUnavailable || e.Code == http.StatusBadGateway || reason == "backendError" || reason == "rateLimitExceeded"
|
||||
}
|
||||
|
||||
16
vendor/cloud.google.com/go/bigquery/dataset.go
generated
vendored
16
vendor/cloud.google.com/go/bigquery/dataset.go
generated
vendored
@@ -147,12 +147,21 @@ func accessListToBQ(a []*AccessEntry) ([]*bq.DatasetAccess, error) {
|
||||
return q, nil
|
||||
}
|
||||
|
||||
// Delete deletes the dataset.
|
||||
// Delete deletes the dataset. Delete will fail if the dataset is not empty.
|
||||
func (d *Dataset) Delete(ctx context.Context) (err error) {
|
||||
return d.deleteInternal(ctx, false)
|
||||
}
|
||||
|
||||
// DeleteWithContents deletes the dataset, as well as contained resources.
|
||||
func (d *Dataset) DeleteWithContents(ctx context.Context) (err error) {
|
||||
return d.deleteInternal(ctx, true)
|
||||
}
|
||||
|
||||
func (d *Dataset) deleteInternal(ctx context.Context, deleteContents bool) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Delete")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
call := d.c.bqs.Datasets.Delete(d.ProjectID, d.DatasetID).Context(ctx)
|
||||
call := d.c.bqs.Datasets.Delete(d.ProjectID, d.DatasetID).Context(ctx).DeleteContents(deleteContents)
|
||||
setClientHeader(call.Header())
|
||||
return call.Do()
|
||||
}
|
||||
@@ -336,6 +345,9 @@ func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
}
|
||||
|
||||
func bqToTable(tr *bq.TableReference, c *Client) *Table {
|
||||
if tr == nil {
|
||||
return nil
|
||||
}
|
||||
return &Table{
|
||||
ProjectID: tr.ProjectId,
|
||||
DatasetID: tr.DatasetId,
|
||||
|
||||
8
vendor/cloud.google.com/go/bigquery/doc.go
generated
vendored
8
vendor/cloud.google.com/go/bigquery/doc.go
generated
vendored
@@ -20,6 +20,9 @@ Note: This package is in beta. Some backwards-incompatible changes may occur.
|
||||
The following assumes a basic familiarity with BigQuery concepts.
|
||||
See https://cloud.google.com/bigquery/docs.
|
||||
|
||||
See https://godoc.org/cloud.google.com/go for authentication, timeouts,
|
||||
connection pooling and similar aspects of this package.
|
||||
|
||||
|
||||
Creating a Client
|
||||
|
||||
@@ -294,10 +297,5 @@ Extractor, then optionally configure it, and lastly call its Run method.
|
||||
extractor.DisableHeader = true
|
||||
job, err = extractor.Run(ctx)
|
||||
// Poll the job for completion if desired, as above.
|
||||
|
||||
Authentication
|
||||
|
||||
See examples of authorization and authentication at
|
||||
https://godoc.org/cloud.google.com/go#pkg-examples.
|
||||
*/
|
||||
package bigquery // import "cloud.google.com/go/bigquery"
|
||||
|
||||
1
vendor/cloud.google.com/go/bigquery/external.go
generated
vendored
1
vendor/cloud.google.com/go/bigquery/external.go
generated
vendored
@@ -32,6 +32,7 @@ const (
|
||||
DatastoreBackup DataFormat = "DATASTORE_BACKUP"
|
||||
GoogleSheets DataFormat = "GOOGLE_SHEETS"
|
||||
Bigtable DataFormat = "BIGTABLE"
|
||||
Parquet DataFormat = "PARQUET"
|
||||
)
|
||||
|
||||
// ExternalData is a table which is stored outside of BigQuery. It is implemented by
|
||||
|
||||
4
vendor/cloud.google.com/go/bigquery/file.go
generated
vendored
4
vendor/cloud.google.com/go/bigquery/file.go
generated
vendored
@@ -48,8 +48,8 @@ func (r *ReaderSource) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader
|
||||
// file may live in Google Cloud Storage (see GCSReference), or it may be
|
||||
// loaded into a table via the Table.LoaderFromReader.
|
||||
type FileConfig struct {
|
||||
// SourceFormat is the format of the GCS data to be read.
|
||||
// Allowed values are: CSV, Avro, JSON, DatastoreBackup. The default is CSV.
|
||||
// SourceFormat is the format of the data to be read.
|
||||
// Allowed values are: CSV, Avro, Parquet, JSON, DatastoreBackup. The default is CSV.
|
||||
SourceFormat DataFormat
|
||||
|
||||
// Indicates if we should automatically infer the options and
|
||||
|
||||
51
vendor/cloud.google.com/go/bigquery/integration_test.go
generated
vendored
51
vendor/cloud.google.com/go/bigquery/integration_test.go
generated
vendored
@@ -34,6 +34,7 @@ import (
|
||||
"cloud.google.com/go/internal"
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"cloud.google.com/go/internal/uid"
|
||||
"cloud.google.com/go/storage"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
@@ -55,8 +56,8 @@ var (
|
||||
testTableExpiration time.Time
|
||||
// BigQuery does not accept hyphens in dataset or table IDs, so we create IDs
|
||||
// with underscores.
|
||||
datasetIDs = testutil.NewUIDSpaceSep("dataset", '_')
|
||||
tableIDs = testutil.NewUIDSpaceSep("table", '_')
|
||||
datasetIDs = uid.NewSpace("dataset", &uid.Options{Sep: '_'})
|
||||
tableIDs = uid.NewSpace("table", &uid.Options{Sep: '_'})
|
||||
)
|
||||
|
||||
// Note: integration tests cannot be run in parallel, because TestIntegration_Location
|
||||
@@ -105,28 +106,12 @@ func initIntegrationTest() func() {
|
||||
}
|
||||
testTableExpiration = time.Now().Add(10 * time.Minute).Round(time.Second)
|
||||
return func() {
|
||||
if err := deleteDataset(ctx, dataset); err != nil {
|
||||
if err := dataset.DeleteWithContents(ctx); err != nil {
|
||||
log.Printf("could not delete %s", dataset.DatasetID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func deleteDataset(ctx context.Context, ds *Dataset) error {
|
||||
it := ds.Tables(ctx)
|
||||
for {
|
||||
tbl, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tbl.Delete(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return ds.Delete(ctx)
|
||||
}
|
||||
func TestIntegration_TableCreate(t *testing.T) {
|
||||
// Check that creating a record field with an empty schema is an error.
|
||||
if client == nil {
|
||||
@@ -167,7 +152,9 @@ func TestIntegration_TableCreateView(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("table.create: Did not expect an error, got: %v", err)
|
||||
}
|
||||
view.Delete(ctx)
|
||||
if err := view.Delete(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_TableMetadata(t *testing.T) {
|
||||
@@ -312,6 +299,28 @@ func TestIntegration_DatasetDelete(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_DatasetDeleteWithContents(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
ds := client.Dataset(datasetIDs.New())
|
||||
if err := ds.Create(ctx, nil); err != nil {
|
||||
t.Fatalf("creating dataset %s: %v", ds.DatasetID, err)
|
||||
}
|
||||
table := ds.Table(tableIDs.New())
|
||||
if err := table.Create(ctx, nil); err != nil {
|
||||
t.Fatalf("creating table %s in dataset %s: %v", table.TableID, table.DatasetID, err)
|
||||
}
|
||||
// We expect failure here
|
||||
if err := ds.Delete(ctx); err == nil {
|
||||
t.Fatalf("non-recursive delete of dataset %s succeeded unexpectedly.", ds.DatasetID)
|
||||
}
|
||||
if err := ds.DeleteWithContents(ctx); err != nil {
|
||||
t.Fatalf("deleting recursively dataset %s: %v", ds.DatasetID, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_DatasetUpdateETags(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
@@ -1570,7 +1579,7 @@ func TestIntegration_TableUseLegacySQL(t *testing.T) {
|
||||
} else if !gotErr && test.err {
|
||||
t.Errorf("%+v:\nsucceeded, but want error", test)
|
||||
}
|
||||
view.Delete(ctx)
|
||||
_ = view.Delete(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
139
vendor/cloud.google.com/go/bigquery/job.go
generated
vendored
139
vendor/cloud.google.com/go/bigquery/job.go
generated
vendored
@@ -404,6 +404,9 @@ type QueryStatistics struct {
|
||||
// statements INSERT, UPDATE or DELETE.
|
||||
NumDMLAffectedRows int64
|
||||
|
||||
// Describes a timeline of job execution.
|
||||
Timeline []*QueryTimelineSample
|
||||
|
||||
// ReferencedTables: [Output-only, Experimental] Referenced tables for
|
||||
// the job. Queries that reference more than 50 tables will not have a
|
||||
// complete list.
|
||||
@@ -413,25 +416,59 @@ type QueryStatistics struct {
|
||||
// non-legacy SQL queries.
|
||||
Schema Schema
|
||||
|
||||
// Slot-milliseconds consumed by this query job.
|
||||
SlotMillis int64
|
||||
|
||||
// Standard SQL: list of undeclared query parameter names detected during a
|
||||
// dry run validation.
|
||||
UndeclaredQueryParameterNames []string
|
||||
|
||||
// DDL target table.
|
||||
DDLTargetTable *Table
|
||||
|
||||
// DDL Operation performed on the target table. Used to report how the
|
||||
// query impacted the DDL target table.
|
||||
DDLOperationPerformed string
|
||||
}
|
||||
|
||||
// ExplainQueryStage describes one stage of a query.
|
||||
type ExplainQueryStage struct {
|
||||
// CompletedParallelInputs: Number of parallel input segments completed.
|
||||
CompletedParallelInputs int64
|
||||
|
||||
// ComputeAvg: Duration the average shard spent on CPU-bound tasks.
|
||||
ComputeAvg time.Duration
|
||||
|
||||
// ComputeMax: Duration the slowest shard spent on CPU-bound tasks.
|
||||
ComputeMax time.Duration
|
||||
|
||||
// Relative amount of the total time the average shard spent on CPU-bound tasks.
|
||||
ComputeRatioAvg float64
|
||||
|
||||
// Relative amount of the total time the slowest shard spent on CPU-bound tasks.
|
||||
ComputeRatioMax float64
|
||||
|
||||
// EndTime: Stage end time.
|
||||
EndTime time.Time
|
||||
|
||||
// Unique ID for stage within plan.
|
||||
ID int64
|
||||
|
||||
// InputStages: IDs for stages that are inputs to this stage.
|
||||
InputStages []int64
|
||||
|
||||
// Human-readable name for stage.
|
||||
Name string
|
||||
|
||||
// ParallelInputs: Number of parallel input segments to be processed.
|
||||
ParallelInputs int64
|
||||
|
||||
// ReadAvg: Duration the average shard spent reading input.
|
||||
ReadAvg time.Duration
|
||||
|
||||
// ReadMax: Duration the slowest shard spent reading input.
|
||||
ReadMax time.Duration
|
||||
|
||||
// Relative amount of the total time the average shard spent reading input.
|
||||
ReadRatioAvg float64
|
||||
|
||||
@@ -444,6 +481,16 @@ type ExplainQueryStage struct {
|
||||
// Number of records written by the stage.
|
||||
RecordsWritten int64
|
||||
|
||||
// ShuffleOutputBytes: Total number of bytes written to shuffle.
|
||||
ShuffleOutputBytes int64
|
||||
|
||||
// ShuffleOutputBytesSpilled: Total number of bytes written to shuffle
|
||||
// and spilled to disk.
|
||||
ShuffleOutputBytesSpilled int64
|
||||
|
||||
// StartTime: Stage start time.
|
||||
StartTime time.Time
|
||||
|
||||
// Current status for the stage.
|
||||
Status string
|
||||
|
||||
@@ -451,12 +498,24 @@ type ExplainQueryStage struct {
|
||||
// chronological).
|
||||
Steps []*ExplainQueryStep
|
||||
|
||||
// WaitAvg: Duration the average shard spent waiting to be scheduled.
|
||||
WaitAvg time.Duration
|
||||
|
||||
// WaitMax: Duration the slowest shard spent waiting to be scheduled.
|
||||
WaitMax time.Duration
|
||||
|
||||
// Relative amount of the total time the average shard spent waiting to be scheduled.
|
||||
WaitRatioAvg float64
|
||||
|
||||
// Relative amount of the total time the slowest shard spent waiting to be scheduled.
|
||||
WaitRatioMax float64
|
||||
|
||||
// WriteAvg: Duration the average shard spent on writing output.
|
||||
WriteAvg time.Duration
|
||||
|
||||
// WriteMax: Duration the slowest shard spent on writing output.
|
||||
WriteMax time.Duration
|
||||
|
||||
// Relative amount of the total time the average shard spent on writing output.
|
||||
WriteRatioAvg float64
|
||||
|
||||
@@ -473,6 +532,25 @@ type ExplainQueryStep struct {
|
||||
Substeps []string
|
||||
}
|
||||
|
||||
// QueryTimelineSample represents a sample of execution statistics at a point in time.
|
||||
type QueryTimelineSample struct {
|
||||
|
||||
// Total number of units currently being processed by workers, represented as largest value since last sample.
|
||||
ActiveUnits int64
|
||||
|
||||
// Total parallel units of work completed by this query.
|
||||
CompletedUnits int64
|
||||
|
||||
// Time elapsed since start of query execution.
|
||||
Elapsed time.Duration
|
||||
|
||||
// Total parallel units of work remaining for the active stages.
|
||||
PendingUnits int64
|
||||
|
||||
// Cumulative slot-milliseconds consumed by the query.
|
||||
SlotMillis int64
|
||||
}
|
||||
|
||||
func (*ExtractStatistics) implementsStatistics() {}
|
||||
func (*LoadStatistics) implementsStatistics() {}
|
||||
func (*QueryStatistics) implementsStatistics() {}
|
||||
@@ -667,12 +745,16 @@ func (j *Job) setStatistics(s *bq.JobStatistics, c *Client) {
|
||||
js.Details = &QueryStatistics{
|
||||
BillingTier: s.Query.BillingTier,
|
||||
CacheHit: s.Query.CacheHit,
|
||||
DDLTargetTable: bqToTable(s.Query.DdlTargetTable, c),
|
||||
DDLOperationPerformed: s.Query.DdlOperationPerformed,
|
||||
StatementType: s.Query.StatementType,
|
||||
TotalBytesBilled: s.Query.TotalBytesBilled,
|
||||
TotalBytesProcessed: s.Query.TotalBytesProcessed,
|
||||
NumDMLAffectedRows: s.Query.NumDmlAffectedRows,
|
||||
QueryPlan: queryPlanFromProto(s.Query.QueryPlan),
|
||||
Schema: bqToSchema(s.Query.Schema),
|
||||
SlotMillis: s.Query.TotalSlotMs,
|
||||
Timeline: timelineFromProto(s.Query.Timeline),
|
||||
ReferencedTables: tables,
|
||||
UndeclaredQueryParameterNames: names,
|
||||
}
|
||||
@@ -691,20 +773,49 @@ func queryPlanFromProto(stages []*bq.ExplainQueryStage) []*ExplainQueryStage {
|
||||
})
|
||||
}
|
||||
res = append(res, &ExplainQueryStage{
|
||||
ComputeRatioAvg: s.ComputeRatioAvg,
|
||||
ComputeRatioMax: s.ComputeRatioMax,
|
||||
ID: s.Id,
|
||||
Name: s.Name,
|
||||
ReadRatioAvg: s.ReadRatioAvg,
|
||||
ReadRatioMax: s.ReadRatioMax,
|
||||
RecordsRead: s.RecordsRead,
|
||||
RecordsWritten: s.RecordsWritten,
|
||||
Status: s.Status,
|
||||
Steps: steps,
|
||||
WaitRatioAvg: s.WaitRatioAvg,
|
||||
WaitRatioMax: s.WaitRatioMax,
|
||||
WriteRatioAvg: s.WriteRatioAvg,
|
||||
WriteRatioMax: s.WriteRatioMax,
|
||||
CompletedParallelInputs: s.CompletedParallelInputs,
|
||||
ComputeAvg: time.Duration(s.ComputeMsAvg) * time.Millisecond,
|
||||
ComputeMax: time.Duration(s.ComputeMsMax) * time.Millisecond,
|
||||
ComputeRatioAvg: s.ComputeRatioAvg,
|
||||
ComputeRatioMax: s.ComputeRatioMax,
|
||||
EndTime: time.Unix(0, s.EndMs*1e6),
|
||||
ID: s.Id,
|
||||
InputStages: s.InputStages,
|
||||
Name: s.Name,
|
||||
ParallelInputs: s.ParallelInputs,
|
||||
ReadAvg: time.Duration(s.ReadMsAvg) * time.Millisecond,
|
||||
ReadMax: time.Duration(s.ReadMsMax) * time.Millisecond,
|
||||
ReadRatioAvg: s.ReadRatioAvg,
|
||||
ReadRatioMax: s.ReadRatioMax,
|
||||
RecordsRead: s.RecordsRead,
|
||||
RecordsWritten: s.RecordsWritten,
|
||||
ShuffleOutputBytes: s.ShuffleOutputBytes,
|
||||
ShuffleOutputBytesSpilled: s.ShuffleOutputBytesSpilled,
|
||||
StartTime: time.Unix(0, s.StartMs*1e6),
|
||||
Status: s.Status,
|
||||
Steps: steps,
|
||||
WaitAvg: time.Duration(s.WaitMsAvg) * time.Millisecond,
|
||||
WaitMax: time.Duration(s.WaitMsMax) * time.Millisecond,
|
||||
WaitRatioAvg: s.WaitRatioAvg,
|
||||
WaitRatioMax: s.WaitRatioMax,
|
||||
WriteAvg: time.Duration(s.WriteMsAvg) * time.Millisecond,
|
||||
WriteMax: time.Duration(s.WriteMsMax) * time.Millisecond,
|
||||
WriteRatioAvg: s.WriteRatioAvg,
|
||||
WriteRatioMax: s.WriteRatioMax,
|
||||
})
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func timelineFromProto(timeline []*bq.QueryTimelineSample) []*QueryTimelineSample {
|
||||
var res []*QueryTimelineSample
|
||||
for _, s := range timeline {
|
||||
res = append(res, &QueryTimelineSample{
|
||||
ActiveUnits: s.ActiveUnits,
|
||||
CompletedUnits: s.CompletedUnits,
|
||||
Elapsed: time.Duration(s.ElapsedMs) * time.Millisecond,
|
||||
PendingUnits: s.PendingUnits,
|
||||
SlotMillis: s.TotalSlotMs,
|
||||
})
|
||||
}
|
||||
return res
|
||||
|
||||
4
vendor/cloud.google.com/go/bigquery/load.go
generated
vendored
4
vendor/cloud.google.com/go/bigquery/load.go
generated
vendored
@@ -47,8 +47,8 @@ type LoadConfig struct {
|
||||
// Custom encryption configuration (e.g., Cloud KMS keys).
|
||||
DestinationEncryptionConfig *EncryptionConfig
|
||||
|
||||
// SchemaUpdateOptions allows the schema of the destination table to be
|
||||
// updated as a side effect of the load job.
|
||||
// Allows the schema of the destination table to be updated as a side effect of
|
||||
// the load job.
|
||||
SchemaUpdateOptions []string
|
||||
}
|
||||
|
||||
|
||||
37
vendor/cloud.google.com/go/bigquery/query.go
generated
vendored
37
vendor/cloud.google.com/go/bigquery/query.go
generated
vendored
@@ -119,6 +119,10 @@ type QueryConfig struct {
|
||||
|
||||
// Custom encryption configuration (e.g., Cloud KMS keys).
|
||||
DestinationEncryptionConfig *EncryptionConfig
|
||||
|
||||
// Allows the schema of the destination table to be updated as a side effect of
|
||||
// the query job.
|
||||
SchemaUpdateOptions []string
|
||||
}
|
||||
|
||||
func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) {
|
||||
@@ -131,6 +135,7 @@ func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) {
|
||||
MaximumBytesBilled: qc.MaxBytesBilled,
|
||||
TimePartitioning: qc.TimePartitioning.toBQ(),
|
||||
DestinationEncryptionConfiguration: qc.DestinationEncryptionConfig.toBQ(),
|
||||
SchemaUpdateOptions: qc.SchemaUpdateOptions,
|
||||
}
|
||||
if len(qc.TableDefinitions) > 0 {
|
||||
qconf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
|
||||
@@ -162,11 +167,12 @@ func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) {
|
||||
if len(qc.Parameters) > 0 && qc.UseLegacySQL {
|
||||
return nil, errors.New("bigquery: cannot provide both Parameters (implying standard SQL) and UseLegacySQL")
|
||||
}
|
||||
ptrue := true
|
||||
pfalse := false
|
||||
if qc.UseLegacySQL {
|
||||
qconf.UseLegacySql = true
|
||||
qconf.UseLegacySql = &ptrue
|
||||
} else {
|
||||
qconf.UseLegacySql = false
|
||||
qconf.ForceSendFields = append(qconf.ForceSendFields, "UseLegacySql")
|
||||
qconf.UseLegacySql = &pfalse
|
||||
}
|
||||
if qc.Dst != nil && !qc.Dst.implicitTable() {
|
||||
qconf.DestinationTable = qc.Dst.toBQ()
|
||||
@@ -188,18 +194,21 @@ func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) {
|
||||
func bqToQueryConfig(q *bq.JobConfiguration, c *Client) (*QueryConfig, error) {
|
||||
qq := q.Query
|
||||
qc := &QueryConfig{
|
||||
Labels: q.Labels,
|
||||
DryRun: q.DryRun,
|
||||
Q: qq.Query,
|
||||
CreateDisposition: TableCreateDisposition(qq.CreateDisposition),
|
||||
WriteDisposition: TableWriteDisposition(qq.WriteDisposition),
|
||||
AllowLargeResults: qq.AllowLargeResults,
|
||||
Priority: QueryPriority(qq.Priority),
|
||||
MaxBytesBilled: qq.MaximumBytesBilled,
|
||||
UseLegacySQL: qq.UseLegacySql,
|
||||
UseStandardSQL: !qq.UseLegacySql,
|
||||
TimePartitioning: bqToTimePartitioning(qq.TimePartitioning),
|
||||
Labels: q.Labels,
|
||||
DryRun: q.DryRun,
|
||||
Q: qq.Query,
|
||||
CreateDisposition: TableCreateDisposition(qq.CreateDisposition),
|
||||
WriteDisposition: TableWriteDisposition(qq.WriteDisposition),
|
||||
AllowLargeResults: qq.AllowLargeResults,
|
||||
Priority: QueryPriority(qq.Priority),
|
||||
MaxBytesBilled: qq.MaximumBytesBilled,
|
||||
UseLegacySQL: qq.UseLegacySql == nil || *qq.UseLegacySql,
|
||||
TimePartitioning: bqToTimePartitioning(qq.TimePartitioning),
|
||||
DestinationEncryptionConfig: bqToEncryptionConfig(qq.DestinationEncryptionConfiguration),
|
||||
SchemaUpdateOptions: qq.SchemaUpdateOptions,
|
||||
}
|
||||
qc.UseStandardSQL = !qc.UseLegacySQL
|
||||
|
||||
if len(qq.TableDefinitions) > 0 {
|
||||
qc.TableDefinitions = make(map[string]ExternalData)
|
||||
}
|
||||
|
||||
14
vendor/cloud.google.com/go/bigquery/query_test.go
generated
vendored
14
vendor/cloud.google.com/go/bigquery/query_test.go
generated
vendored
@@ -26,6 +26,7 @@ import (
|
||||
)
|
||||
|
||||
func defaultQueryJob() *bq.Job {
|
||||
pfalse := false
|
||||
return &bq.Job{
|
||||
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
|
||||
Configuration: &bq.JobConfiguration{
|
||||
@@ -40,8 +41,7 @@ func defaultQueryJob() *bq.Job {
|
||||
ProjectId: "def-project-id",
|
||||
DatasetId: "def-dataset-id",
|
||||
},
|
||||
UseLegacySql: false,
|
||||
ForceSendFields: []string{"UseLegacySql"},
|
||||
UseLegacySql: &pfalse,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -271,7 +271,8 @@ func TestQuery(t *testing.T) {
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.UseLegacySql = true
|
||||
ptrue := true
|
||||
j.Configuration.Query.UseLegacySql = &ptrue
|
||||
j.Configuration.Query.ForceSendFields = nil
|
||||
return j
|
||||
}(),
|
||||
@@ -351,9 +352,12 @@ func TestConfiguringQuery(t *testing.T) {
|
||||
query.DefaultDatasetID = "def-dataset-id"
|
||||
query.TimePartitioning = &TimePartitioning{Expiration: 1234 * time.Second, Field: "f"}
|
||||
query.DestinationEncryptionConfig = &EncryptionConfig{KMSKeyName: "keyName"}
|
||||
query.SchemaUpdateOptions = []string{"ALLOW_FIELD_ADDITION"}
|
||||
|
||||
// Note: Other configuration fields are tested in other tests above.
|
||||
// A lot of that can be consolidated once Client.Copy is gone.
|
||||
|
||||
pfalse := false
|
||||
want := &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Query: &bq.JobConfigurationQuery{
|
||||
@@ -362,10 +366,10 @@ func TestConfiguringQuery(t *testing.T) {
|
||||
ProjectId: "def-project-id",
|
||||
DatasetId: "def-dataset-id",
|
||||
},
|
||||
UseLegacySql: false,
|
||||
UseLegacySql: &pfalse,
|
||||
TimePartitioning: &bq.TimePartitioning{ExpirationMs: 1234000, Field: "f", Type: "DAY"},
|
||||
DestinationEncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
|
||||
ForceSendFields: []string{"UseLegacySql"},
|
||||
SchemaUpdateOptions: []string{"ALLOW_FIELD_ADDITION"},
|
||||
},
|
||||
},
|
||||
JobReference: &bq.JobReference{
|
||||
|
||||
2
vendor/cloud.google.com/go/bigtable/admin.go
generated
vendored
2
vendor/cloud.google.com/go/bigtable/admin.go
generated
vendored
@@ -867,7 +867,7 @@ func (iac *InstanceAdminClient) Clusters(ctx context.Context, instanceId string)
|
||||
// GetCluster fetches a cluster in an instance
|
||||
func (iac *InstanceAdminClient) GetCluster(ctx context.Context, instanceID, clusterID string) (*ClusterInfo, error) {
|
||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||
req := &btapb.GetClusterRequest{Name: "projects/" + iac.project + "/instances/" + instanceID + "/clusters" + clusterID}
|
||||
req := &btapb.GetClusterRequest{Name: "projects/" + iac.project + "/instances/" + instanceID + "/clusters/" + clusterID}
|
||||
c, err := iac.iClient.GetCluster(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
11
vendor/cloud.google.com/go/bigtable/admin_test.go
generated
vendored
11
vendor/cloud.google.com/go/bigtable/admin_test.go
generated
vendored
@@ -15,18 +15,17 @@
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"fmt"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func TestAdminIntegration(t *testing.T) {
|
||||
@@ -102,7 +101,7 @@ func TestAdminIntegration(t *testing.T) {
|
||||
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
|
||||
}
|
||||
|
||||
adminClient.WaitForReplication(ctx, "mytable")
|
||||
must(adminClient.WaitForReplication(ctx, "mytable"))
|
||||
|
||||
if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil {
|
||||
t.Fatalf("Deleting table: %v", err)
|
||||
@@ -172,13 +171,13 @@ func TestAdminIntegration(t *testing.T) {
|
||||
}
|
||||
|
||||
var gotRowCount int
|
||||
tbl.ReadRows(ctx, RowRange{}, func(row Row) bool {
|
||||
must(tbl.ReadRows(ctx, RowRange{}, func(row Row) bool {
|
||||
gotRowCount += 1
|
||||
if !strings.HasPrefix(row.Key(), "b") {
|
||||
t.Errorf("Invalid row after dropping range: %v", row)
|
||||
}
|
||||
return true
|
||||
})
|
||||
}))
|
||||
if gotRowCount != 5 {
|
||||
t.Errorf("Invalid row count after dropping range: got %v, want %v", gotRowCount, 5)
|
||||
}
|
||||
|
||||
3
vendor/cloud.google.com/go/bigtable/bigtable.go
generated
vendored
3
vendor/cloud.google.com/go/bigtable/bigtable.go
generated
vendored
@@ -192,6 +192,9 @@ func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts
|
||||
tracePrintf(ctx, attrMap, "Retry details in ReadRows")
|
||||
return err
|
||||
}
|
||||
attrMap["time_secs"] = time.Since(startTime).Seconds()
|
||||
attrMap["rowCount"] = len(res.Chunks)
|
||||
tracePrintf(ctx, attrMap, "Details in ReadRows")
|
||||
|
||||
for _, cc := range res.Chunks {
|
||||
row, err := cr.Process(cc)
|
||||
|
||||
55
vendor/cloud.google.com/go/bigtable/bttest/inmem.go
generated
vendored
55
vendor/cloud.google.com/go/bigtable/bttest/inmem.go
generated
vendored
@@ -317,7 +317,8 @@ func (s *server) ReadRows(req *btpb.ReadRowsRequest, stream btpb.Bigtable_ReadRo
|
||||
return true
|
||||
}
|
||||
|
||||
if req.Rows != nil {
|
||||
if req.Rows != nil &&
|
||||
len(req.Rows.RowKeys)+len(req.Rows.RowRanges) > 0 {
|
||||
// Add the explicitly given keys
|
||||
for _, key := range req.Rows.RowKeys {
|
||||
k := string(key)
|
||||
@@ -698,8 +699,7 @@ func (s *server) MutateRows(req *btpb.MutateRowsRequest, stream btpb.Bigtable_Mu
|
||||
}
|
||||
r.mu.Unlock()
|
||||
}
|
||||
stream.Send(res)
|
||||
return nil
|
||||
return stream.Send(res)
|
||||
}
|
||||
|
||||
func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutateRowRequest) (*btpb.CheckAndMutateRowResponse, error) {
|
||||
@@ -861,12 +861,13 @@ func (s *server) ReadModifyWriteRow(ctx context.Context, req *btpb.ReadModifyWri
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.NotFound, "table %q not found", req.TableName)
|
||||
}
|
||||
updates := make(map[string]cell) // copy of updated cells; keyed by full column name
|
||||
|
||||
fs := tbl.columnFamilies()
|
||||
|
||||
rowKey := string(req.RowKey)
|
||||
r := tbl.mutableRow(rowKey)
|
||||
resultRow := newRow(rowKey) // copy of updated cells
|
||||
|
||||
// This must be done before the row lock, acquired below, is released.
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
@@ -914,35 +915,37 @@ func (s *server) ReadModifyWriteRow(ctx context.Context, req *btpb.ReadModifyWri
|
||||
binary.BigEndian.PutUint64(val[:], uint64(v))
|
||||
newCell = cell{ts: ts, value: val[:]}
|
||||
}
|
||||
key := strings.Join([]string{fam, col}, ":")
|
||||
updates[key] = newCell
|
||||
|
||||
// Store the new cell
|
||||
f.cells[col] = appendOrReplaceCell(f.cellsByColumn(col), newCell)
|
||||
|
||||
// Store a copy for the result row
|
||||
resultFamily := resultRow.getOrCreateFamily(fam, fs[fam].order)
|
||||
resultFamily.cellsByColumn(col) // create the column
|
||||
resultFamily.cells[col] = []cell{newCell} // overwrite the cells
|
||||
}
|
||||
|
||||
// Build the response using the result row
|
||||
res := &btpb.Row{
|
||||
Key: req.RowKey,
|
||||
Key: req.RowKey,
|
||||
Families: make([]*btpb.Family, len(resultRow.families)),
|
||||
}
|
||||
for col, cell := range updates {
|
||||
i := strings.Index(col, ":")
|
||||
fam, qual := col[:i], col[i+1:]
|
||||
var f *btpb.Family
|
||||
for _, ff := range res.Families {
|
||||
if ff.Name == fam {
|
||||
f = ff
|
||||
break
|
||||
|
||||
for i, family := range resultRow.sortedFamilies() {
|
||||
res.Families[i] = &btpb.Family{
|
||||
Name: family.name,
|
||||
Columns: make([]*btpb.Column, len(family.colNames)),
|
||||
}
|
||||
|
||||
for j, colName := range family.colNames {
|
||||
res.Families[i].Columns[j] = &btpb.Column{
|
||||
Qualifier: []byte(colName),
|
||||
Cells: []*btpb.Cell{{
|
||||
TimestampMicros: family.cells[colName][0].ts,
|
||||
Value: family.cells[colName][0].value,
|
||||
}},
|
||||
}
|
||||
}
|
||||
if f == nil {
|
||||
f = &btpb.Family{Name: fam}
|
||||
res.Families = append(res.Families, f)
|
||||
}
|
||||
f.Columns = append(f.Columns, &btpb.Column{
|
||||
Qualifier: []byte(qual),
|
||||
Cells: []*btpb.Cell{{
|
||||
TimestampMicros: cell.ts,
|
||||
Value: cell.value,
|
||||
}},
|
||||
})
|
||||
}
|
||||
return &btpb.ReadModifyWriteRowResponse{Row: res}, nil
|
||||
}
|
||||
|
||||
91
vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go
generated
vendored
91
vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go
generated
vendored
@@ -23,6 +23,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"golang.org/x/net/context"
|
||||
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||
@@ -99,7 +101,9 @@ func TestConcurrentMutationsReadModifyAndGC(t *testing.T) {
|
||||
RowKey: []byte(fmt.Sprint(rand.Intn(100))),
|
||||
Mutations: ms(),
|
||||
}
|
||||
s.MutateRow(ctx, req)
|
||||
if _, err := s.MutateRow(ctx, req); err != nil {
|
||||
panic(err) // can't use t.Fatal in goroutine
|
||||
}
|
||||
}
|
||||
}()
|
||||
wg.Add(1)
|
||||
@@ -548,7 +552,9 @@ func TestReadRowsOrder(t *testing.T) {
|
||||
}
|
||||
}
|
||||
for i := count; i > 0; i-- {
|
||||
s.ReadModifyWriteRow(ctx, rmw(i))
|
||||
if _, err := s.ReadModifyWriteRow(ctx, rmw(i)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
req = &btpb.ReadRowsRequest{
|
||||
TableName: tblInfo.Name,
|
||||
@@ -621,6 +627,87 @@ func TestCheckAndMutateRowWithoutPredicate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_ReadModifyWriteRow(t *testing.T) {
|
||||
s := &server{
|
||||
tables: make(map[string]*table),
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
newTbl := btapb.Table{
|
||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}},
|
||||
},
|
||||
}
|
||||
tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||
if err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
|
||||
req := &btpb.ReadModifyWriteRowRequest{
|
||||
TableName: tbl.Name,
|
||||
RowKey: []byte("row-key"),
|
||||
Rules: []*btpb.ReadModifyWriteRule{
|
||||
{
|
||||
FamilyName: "cf",
|
||||
ColumnQualifier: []byte("q1"),
|
||||
Rule: &btpb.ReadModifyWriteRule_AppendValue{
|
||||
AppendValue: []byte("a"),
|
||||
},
|
||||
},
|
||||
// multiple ops for same cell
|
||||
{
|
||||
FamilyName: "cf",
|
||||
ColumnQualifier: []byte("q1"),
|
||||
Rule: &btpb.ReadModifyWriteRule_AppendValue{
|
||||
AppendValue: []byte("b"),
|
||||
},
|
||||
},
|
||||
// different cell whose qualifier should sort before the prior rules
|
||||
{
|
||||
FamilyName: "cf",
|
||||
ColumnQualifier: []byte("q0"),
|
||||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{
|
||||
IncrementAmount: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
got, err := s.ReadModifyWriteRow(ctx, req)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("ReadModifyWriteRow error: %v", err)
|
||||
}
|
||||
|
||||
want := &btpb.ReadModifyWriteRowResponse{
|
||||
Row: &btpb.Row{
|
||||
Key: []byte("row-key"),
|
||||
Families: []*btpb.Family{{
|
||||
Name: "cf",
|
||||
Columns: []*btpb.Column{
|
||||
{
|
||||
Qualifier: []byte("q0"),
|
||||
Cells: []*btpb.Cell{{
|
||||
Value: []byte{0, 0, 0, 0, 0, 0, 0, 1},
|
||||
}},
|
||||
},
|
||||
{
|
||||
Qualifier: []byte("q1"),
|
||||
Cells: []*btpb.Cell{{
|
||||
Value: []byte("ab"),
|
||||
}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
diff := cmp.Diff(got, want, cmpopts.IgnoreFields(btpb.Cell{}, "TimestampMicros"))
|
||||
if diff != "" {
|
||||
t.Errorf("unexpected response: %s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
// helper function to populate table data
|
||||
func populateTable(ctx context.Context, s *server) (*btapb.Table, error) {
|
||||
newTbl := btapb.Table{
|
||||
|
||||
55
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go
generated
vendored
55
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go
generated
vendored
@@ -332,7 +332,8 @@ var commands = []struct {
|
||||
Name: "lookup",
|
||||
Desc: "Read from a single row",
|
||||
do: doLookup,
|
||||
Usage: "cbt lookup <table> <row> [app-profile=<app profile id>]\n" +
|
||||
Usage: "cbt lookup <table> <row> [cells-per-column=<n>] [app-profile=<app profile id>]\n" +
|
||||
" cells-per-column=<n> Read only this many cells per column\n" +
|
||||
" app-profile=<app profile id> The app profile id to use for the request (replication alpha)\n",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
@@ -356,12 +357,13 @@ var commands = []struct {
|
||||
Desc: "Read rows",
|
||||
do: doRead,
|
||||
Usage: "cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>]" +
|
||||
" [regex=<regex>] [count=<n>] [app-profile=<app profile id>]\n" +
|
||||
" [regex=<regex>] [count=<n>] [cells-per-column=<n>] [app-profile=<app profile id>]\n" +
|
||||
" start=<row> Start reading at this row\n" +
|
||||
" end=<row> Stop reading before this row\n" +
|
||||
" prefix=<prefix> Read rows with this prefix\n" +
|
||||
" regex=<regex> Read rows with keys matching this regex\n" +
|
||||
" count=<n> Read only this many rows\n" +
|
||||
" cells-per-column=<n> Read only this many cells per column\n" +
|
||||
" app-profile=<app profile id> The app profile id to use for the request (replication alpha)\n",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
@@ -850,19 +852,34 @@ func doListClusters(ctx context.Context, args ...string) {
|
||||
|
||||
func doLookup(ctx context.Context, args ...string) {
|
||||
if len(args) < 2 {
|
||||
log.Fatalf("usage: cbt lookup <table> <row> [app-profile=<app profile id>]")
|
||||
log.Fatalf("usage: cbt lookup <table> <row> [cells-per-column=<n>] [app-profile=<app profile id>]")
|
||||
}
|
||||
var appProfile string
|
||||
if len(args) > 2 {
|
||||
i := strings.Index(args[2], "=")
|
||||
|
||||
parsed := make(map[string]string)
|
||||
for _, arg := range args[2:] {
|
||||
i := strings.Index(arg, "=")
|
||||
if i < 0 {
|
||||
log.Fatalf("Bad arg %q", args[2])
|
||||
log.Fatalf("Bad arg %q", arg)
|
||||
}
|
||||
appProfile = strings.Split(args[2], "=")[1]
|
||||
key, val := arg[:i], arg[i+1:]
|
||||
switch key {
|
||||
default:
|
||||
log.Fatalf("Unknown arg key %q", key)
|
||||
case "cells-per-column", "app-profile":
|
||||
parsed[key] = val
|
||||
}
|
||||
}
|
||||
var opts []bigtable.ReadOption
|
||||
if cellsPerColumn := parsed["cells-per-column"]; cellsPerColumn != "" {
|
||||
n, err := strconv.Atoi(cellsPerColumn)
|
||||
if err != nil {
|
||||
log.Fatalf("Bad number of cells per column %q: %v", cellsPerColumn, err)
|
||||
}
|
||||
opts = append(opts, bigtable.RowFilter(bigtable.LatestNFilter(n)))
|
||||
}
|
||||
table, row := args[0], args[1]
|
||||
tbl := getClient(bigtable.ClientConfig{AppProfile: appProfile}).Open(table)
|
||||
r, err := tbl.ReadRow(ctx, row)
|
||||
tbl := getClient(bigtable.ClientConfig{AppProfile: parsed["app-profile"]}).Open(table)
|
||||
r, err := tbl.ReadRow(ctx, row, opts...)
|
||||
if err != nil {
|
||||
log.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
@@ -995,7 +1012,7 @@ func doRead(ctx context.Context, args ...string) {
|
||||
case "limit":
|
||||
// Be nicer; we used to support this, but renamed it to "end".
|
||||
log.Fatalf("Unknown arg key %q; did you mean %q?", key, "end")
|
||||
case "start", "end", "prefix", "count", "regex", "app-profile":
|
||||
case "start", "end", "prefix", "count", "cells-per-column", "regex", "app-profile":
|
||||
parsed[key] = val
|
||||
}
|
||||
}
|
||||
@@ -1021,8 +1038,22 @@ func doRead(ctx context.Context, args ...string) {
|
||||
}
|
||||
opts = append(opts, bigtable.LimitRows(n))
|
||||
}
|
||||
|
||||
var filters []bigtable.Filter
|
||||
if cellsPerColumn := parsed["cells-per-column"]; cellsPerColumn != "" {
|
||||
n, err := strconv.Atoi(cellsPerColumn)
|
||||
if err != nil {
|
||||
log.Fatalf("Bad number of cells per column %q: %v", cellsPerColumn, err)
|
||||
}
|
||||
filters = append(filters, bigtable.LatestNFilter(n))
|
||||
}
|
||||
if regex := parsed["regex"]; regex != "" {
|
||||
opts = append(opts, bigtable.RowFilter(bigtable.RowKeyFilter(regex)))
|
||||
filters = append(filters, bigtable.RowKeyFilter(regex))
|
||||
}
|
||||
if len(filters) > 1 {
|
||||
opts = append(opts, bigtable.RowFilter(bigtable.ChainFilters(filters...)))
|
||||
} else if len(filters) == 1 {
|
||||
opts = append(opts, bigtable.RowFilter(filters[0]))
|
||||
}
|
||||
|
||||
// TODO(dsymonds): Support filters.
|
||||
|
||||
2
vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go
generated
vendored
2
vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go
generated
vendored
@@ -123,7 +123,7 @@ func main() {
|
||||
go func() {
|
||||
s := <-c
|
||||
log.Printf("Caught %v, cleaning scratch table.", s)
|
||||
adminClient.DeleteTable(context.Background(), *scratchTable)
|
||||
_ = adminClient.DeleteTable(context.Background(), *scratchTable)
|
||||
os.Exit(1)
|
||||
}()
|
||||
|
||||
|
||||
10
vendor/cloud.google.com/go/bigtable/doc.go
generated
vendored
10
vendor/cloud.google.com/go/bigtable/doc.go
generated
vendored
@@ -19,6 +19,10 @@ Package bigtable is an API to Google Cloud Bigtable.
|
||||
|
||||
See https://cloud.google.com/bigtable/docs/ for general product documentation.
|
||||
|
||||
See https://godoc.org/cloud.google.com/go for authentication, timeouts,
|
||||
connection pooling and similar aspects of this package.
|
||||
|
||||
|
||||
Setup and Credentials
|
||||
|
||||
Use NewClient or NewAdminClient to create a client that can be used to access
|
||||
@@ -92,12 +96,6 @@ If a read or write operation encounters a transient error it will be retried unt
|
||||
response, an unretryable error or the context deadline is reached. Non-idempotent writes (where
|
||||
the timestamp is set to ServerTime) will not be retried. In the case of ReadRows, retried calls
|
||||
will not re-scan rows that have already been processed.
|
||||
|
||||
Authentication
|
||||
|
||||
See examples of authorization and authentication at
|
||||
https://godoc.org/cloud.google.com/go#pkg-examples.
|
||||
|
||||
*/
|
||||
package bigtable // import "cloud.google.com/go/bigtable"
|
||||
|
||||
|
||||
2
vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go
generated
vendored
2
vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go
generated
vendored
@@ -34,7 +34,7 @@ func TestRandomizedDelays(t *testing.T) {
|
||||
deadline := time.Now().Add(1 * time.Second)
|
||||
ctx, _ := context.WithDeadline(context.Background(), deadline)
|
||||
var invokeTime time.Time
|
||||
Invoke(ctx, func(childCtx context.Context) error {
|
||||
_ = Invoke(ctx, func(childCtx context.Context) error {
|
||||
// Keep failing, make sure we never slept more than max (plus a fudge factor)
|
||||
if !invokeTime.IsZero() {
|
||||
if got, want := time.Since(invokeTime), max; got > (want + 20*time.Millisecond) {
|
||||
|
||||
11
vendor/cloud.google.com/go/bigtable/internal/stat/stats.go
generated
vendored
11
vendor/cloud.google.com/go/bigtable/internal/stat/stats.go
generated
vendored
@@ -123,10 +123,15 @@ func (agg *Aggregate) String() string {
|
||||
|
||||
// WriteCSV writes a csv file to the given Writer,
|
||||
// with a header row and one row per aggregate.
|
||||
func WriteCSV(aggs []*Aggregate, iow io.Writer) error {
|
||||
func WriteCSV(aggs []*Aggregate, iow io.Writer) (err error) {
|
||||
w := csv.NewWriter(iow)
|
||||
defer w.Flush()
|
||||
err := w.Write([]string{"name", "count", "errors", "min", "median", "max", "p75", "p90", "p95", "p99"})
|
||||
defer func() {
|
||||
w.Flush()
|
||||
if err == nil {
|
||||
err = w.Error()
|
||||
}
|
||||
}()
|
||||
err = w.Write([]string{"name", "count", "errors", "min", "median", "max", "p75", "p90", "p95", "p99"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
33
vendor/cloud.google.com/go/bigtable/reader_test.go
generated
vendored
33
vendor/cloud.google.com/go/bigtable/reader_test.go
generated
vendored
@@ -60,10 +60,10 @@ func TestSingleCell(t *testing.T) {
|
||||
func TestMultipleCells(t *testing.T) {
|
||||
cr := newChunkReader()
|
||||
|
||||
cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false))
|
||||
cr.Process(cc("rs", "fm1", "col1", 1, "val2", 0, false))
|
||||
cr.Process(cc("rs", "fm1", "col2", 0, "val3", 0, false))
|
||||
cr.Process(cc("rs", "fm2", "col1", 0, "val4", 0, false))
|
||||
mustProcess(t, cr, cc("rs", "fm1", "col1", 0, "val1", 0, false))
|
||||
mustProcess(t, cr, cc("rs", "fm1", "col1", 1, "val2", 0, false))
|
||||
mustProcess(t, cr, cc("rs", "fm1", "col2", 0, "val3", 0, false))
|
||||
mustProcess(t, cr, cc("rs", "fm2", "col1", 0, "val4", 0, false))
|
||||
row, err := cr.Process(cc("rs", "fm2", "col2", 1, "extralongval5", 0, true))
|
||||
if err != nil {
|
||||
t.Fatalf("Processing chunk: %v", err)
|
||||
@@ -95,8 +95,8 @@ func TestMultipleCells(t *testing.T) {
|
||||
func TestSplitCells(t *testing.T) {
|
||||
cr := newChunkReader()
|
||||
|
||||
cr.Process(cc("rs", "fm1", "col1", 0, "hello ", 11, false))
|
||||
cr.Process(ccData("world", 0, false))
|
||||
mustProcess(t, cr, cc("rs", "fm1", "col1", 0, "hello ", 11, false))
|
||||
mustProcess(t, cr, ccData("world", 0, false))
|
||||
row, err := cr.Process(cc("rs", "fm1", "col2", 0, "val2", 0, true))
|
||||
if err != nil {
|
||||
t.Fatalf("Processing chunk: %v", err)
|
||||
@@ -171,12 +171,11 @@ func TestBlankQualifier(t *testing.T) {
|
||||
|
||||
func TestReset(t *testing.T) {
|
||||
cr := newChunkReader()
|
||||
|
||||
cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false))
|
||||
cr.Process(cc("rs", "fm1", "col1", 1, "val2", 0, false))
|
||||
cr.Process(cc("rs", "fm1", "col2", 0, "val3", 0, false))
|
||||
cr.Process(ccReset())
|
||||
row, _ := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true))
|
||||
mustProcess(t, cr, cc("rs", "fm1", "col1", 0, "val1", 0, false))
|
||||
mustProcess(t, cr, cc("rs", "fm1", "col1", 1, "val2", 0, false))
|
||||
mustProcess(t, cr, cc("rs", "fm1", "col2", 0, "val3", 0, false))
|
||||
mustProcess(t, cr, ccReset())
|
||||
row := mustProcess(t, cr, cc("rs1", "fm1", "col1", 1, "val1", 0, true))
|
||||
want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")}
|
||||
if !testutil.Equal(row["fm1"], want) {
|
||||
t.Fatalf("Reset: got: %v\nwant: %v\n", row["fm1"], want)
|
||||
@@ -189,13 +188,21 @@ func TestReset(t *testing.T) {
|
||||
func TestNewFamEmptyQualifier(t *testing.T) {
|
||||
cr := newChunkReader()
|
||||
|
||||
cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false))
|
||||
mustProcess(t, cr, cc("rs", "fm1", "col1", 0, "val1", 0, false))
|
||||
_, err := cr.Process(cc(nilStr, "fm2", nilStr, 0, "val2", 0, true))
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error on second chunk with no qualifier set")
|
||||
}
|
||||
}
|
||||
|
||||
func mustProcess(t *testing.T, cr *chunkReader, cc *btspb.ReadRowsResponse_CellChunk) Row {
|
||||
row, err := cr.Process(cc)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return row
|
||||
}
|
||||
|
||||
// The read rows acceptance test reads a json file specifying a number of tests,
|
||||
// each consisting of one or more cell chunk text protos and one or more resulting
|
||||
// cells or errors.
|
||||
|
||||
31
vendor/cloud.google.com/go/bigtable/retry_test.go
generated
vendored
31
vendor/cloud.google.com/go/bigtable/retry_test.go
generated
vendored
@@ -179,28 +179,28 @@ func TestRetryApplyBulk(t *testing.T) {
|
||||
f = func(ss grpc.ServerStream) error {
|
||||
var err error
|
||||
req := new(btpb.MutateRowsRequest)
|
||||
ss.RecvMsg(req)
|
||||
must(ss.RecvMsg(req))
|
||||
switch errCount {
|
||||
case 0:
|
||||
// Retryable request failure
|
||||
err = status.Errorf(codes.Unavailable, "")
|
||||
case 1:
|
||||
// Two mutations fail
|
||||
writeMutateRowsResponse(ss, codes.Unavailable, codes.OK, codes.Aborted)
|
||||
must(writeMutateRowsResponse(ss, codes.Unavailable, codes.OK, codes.Aborted))
|
||||
err = nil
|
||||
case 2:
|
||||
// Two failures were retried. One will succeed.
|
||||
if want, got := 2, len(req.Entries); want != got {
|
||||
t.Errorf("2 bulk retries, got: %d, want %d", got, want)
|
||||
}
|
||||
writeMutateRowsResponse(ss, codes.OK, codes.Aborted)
|
||||
must(writeMutateRowsResponse(ss, codes.OK, codes.Aborted))
|
||||
err = nil
|
||||
case 3:
|
||||
// One failure was retried and will succeed.
|
||||
if want, got := 1, len(req.Entries); want != got {
|
||||
t.Errorf("1 bulk retry, got: %d, want %d", got, want)
|
||||
}
|
||||
writeMutateRowsResponse(ss, codes.OK)
|
||||
must(writeMutateRowsResponse(ss, codes.OK))
|
||||
err = nil
|
||||
}
|
||||
errCount++
|
||||
@@ -218,12 +218,12 @@ func TestRetryApplyBulk(t *testing.T) {
|
||||
f = func(ss grpc.ServerStream) error {
|
||||
var err error
|
||||
req := new(btpb.MutateRowsRequest)
|
||||
ss.RecvMsg(req)
|
||||
must(ss.RecvMsg(req))
|
||||
switch errCount {
|
||||
case 0:
|
||||
// Give non-idempotent mutation a retryable error code.
|
||||
// Nothing should be retried.
|
||||
writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.Aborted)
|
||||
must(writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.Aborted))
|
||||
err = nil
|
||||
case 1:
|
||||
t.Errorf("unretryable errors: got one retry, want no retries")
|
||||
@@ -245,8 +245,7 @@ func TestRetryApplyBulk(t *testing.T) {
|
||||
|
||||
// Test individual errors and a deadline exceeded
|
||||
f = func(ss grpc.ServerStream) error {
|
||||
writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.OK, codes.Aborted)
|
||||
return nil
|
||||
return writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.OK, codes.Aborted)
|
||||
}
|
||||
ctx, _ = context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3})
|
||||
@@ -320,7 +319,7 @@ func TestRetryReadRows(t *testing.T) {
|
||||
f = func(ss grpc.ServerStream) error {
|
||||
var err error
|
||||
req := new(btpb.ReadRowsRequest)
|
||||
ss.RecvMsg(req)
|
||||
must(ss.RecvMsg(req))
|
||||
switch errCount {
|
||||
case 0:
|
||||
// Retryable request failure
|
||||
@@ -330,7 +329,7 @@ func TestRetryReadRows(t *testing.T) {
|
||||
if want, got := "a", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got {
|
||||
t.Errorf("first retry, no data received yet: got %q, want %q", got, want)
|
||||
}
|
||||
writeReadRowsResponse(ss, "a", "b")
|
||||
must(writeReadRowsResponse(ss, "a", "b"))
|
||||
err = status.Errorf(codes.Unavailable, "")
|
||||
case 2:
|
||||
// Retryable request failure
|
||||
@@ -340,7 +339,7 @@ func TestRetryReadRows(t *testing.T) {
|
||||
err = status.Errorf(codes.Unavailable, "")
|
||||
case 3:
|
||||
// Write two more rows
|
||||
writeReadRowsResponse(ss, "c", "d")
|
||||
must(writeReadRowsResponse(ss, "c", "d"))
|
||||
err = nil
|
||||
}
|
||||
errCount++
|
||||
@@ -348,10 +347,10 @@ func TestRetryReadRows(t *testing.T) {
|
||||
}
|
||||
|
||||
var got []string
|
||||
tbl.ReadRows(ctx, NewRange("a", "z"), func(r Row) bool {
|
||||
must(tbl.ReadRows(ctx, NewRange("a", "z"), func(r Row) bool {
|
||||
got = append(got, r.Key())
|
||||
return true
|
||||
})
|
||||
}))
|
||||
want := []string{"a", "b", "c", "d"}
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("retry range integration: got %v, want %v", got, want)
|
||||
@@ -370,3 +369,9 @@ func writeReadRowsResponse(ss grpc.ServerStream, rowKeys ...string) error {
|
||||
}
|
||||
return ss.SendMsg(&btpb.ReadRowsResponse{Chunks: chunks})
|
||||
}
|
||||
|
||||
func must(err error) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
35
vendor/cloud.google.com/go/cloud.go
generated
vendored
35
vendor/cloud.google.com/go/cloud.go
generated
vendored
@@ -17,8 +17,26 @@ Package cloud is the root of the packages used to access Google Cloud
|
||||
Services. See https://godoc.org/cloud.google.com/go for a full list
|
||||
of sub-packages.
|
||||
|
||||
Examples in this package show ways to authorize and authenticate the
|
||||
sub packages.
|
||||
|
||||
Authentication and Authorization
|
||||
|
||||
All the clients in sub-packages support authentication via Google Application Default
|
||||
Credentials (see https://cloud.google.com/docs/authentication/production), or
|
||||
by providing a JSON key file for a Service Account. See the authentication examples
|
||||
in this package for details.
|
||||
|
||||
|
||||
Timeouts and Cancellation
|
||||
|
||||
By default, all requests in sub-packages will run indefinitely, retrying on transient
|
||||
errors when correctness allows. To set timeouts or arrange for cancellation, use
|
||||
contexts. See the examples for details.
|
||||
|
||||
Do not attempt to control the initial connection (dialing) of a service by setting a
|
||||
timeout on the context passed to NewClient. Dialing is non-blocking, so timeouts
|
||||
would be ineffective and would only interfere with credential refreshing, which uses
|
||||
the same context.
|
||||
|
||||
|
||||
Connection Pooling
|
||||
|
||||
@@ -36,5 +54,18 @@ of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a clie
|
||||
option to NewClient calls. This configures the underlying gRPC connections to be
|
||||
pooled and addressed in a round robin fashion.
|
||||
|
||||
|
||||
Using the Libraries with Docker
|
||||
|
||||
Minimal docker images like Alpine lack CA certificates. This causes RPCs to appear to
|
||||
hang, because gRPC retries indefinitely. See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/928
|
||||
for more information.
|
||||
|
||||
Debugging
|
||||
|
||||
To see gRPC logs, set the environment variable GRPC_GO_LOG_SEVERITY_LEVEL. See
|
||||
https://godoc.org/google.golang.org/grpc/grpclog for more information.
|
||||
|
||||
For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2".
|
||||
*/
|
||||
package cloud // import "cloud.google.com/go"
|
||||
|
||||
763
vendor/cloud.google.com/go/cloudtasks/apiv2beta2/cloud_tasks_client.go
generated
vendored
Normal file
763
vendor/cloud.google.com/go/cloudtasks/apiv2beta2/cloud_tasks_client.go
generated
vendored
Normal file
@@ -0,0 +1,763 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package cloudtasks
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/version"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
taskspb "google.golang.org/genproto/googleapis/cloud/tasks/v2beta2"
|
||||
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// CallOptions contains the retry settings for each method of Client.
|
||||
type CallOptions struct {
|
||||
ListQueues []gax.CallOption
|
||||
GetQueue []gax.CallOption
|
||||
CreateQueue []gax.CallOption
|
||||
UpdateQueue []gax.CallOption
|
||||
DeleteQueue []gax.CallOption
|
||||
PurgeQueue []gax.CallOption
|
||||
PauseQueue []gax.CallOption
|
||||
ResumeQueue []gax.CallOption
|
||||
GetIamPolicy []gax.CallOption
|
||||
SetIamPolicy []gax.CallOption
|
||||
TestIamPermissions []gax.CallOption
|
||||
ListTasks []gax.CallOption
|
||||
GetTask []gax.CallOption
|
||||
CreateTask []gax.CallOption
|
||||
DeleteTask []gax.CallOption
|
||||
LeaseTasks []gax.CallOption
|
||||
AcknowledgeTask []gax.CallOption
|
||||
RenewLease []gax.CallOption
|
||||
CancelLease []gax.CallOption
|
||||
RunTask []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("cloudtasks.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultCallOptions() *CallOptions {
|
||||
retry := map[[2]string][]gax.CallOption{
|
||||
{"default", "idempotent"}: {
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.3,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
return &CallOptions{
|
||||
ListQueues: retry[[2]string{"default", "idempotent"}],
|
||||
GetQueue: retry[[2]string{"default", "idempotent"}],
|
||||
CreateQueue: retry[[2]string{"default", "non_idempotent"}],
|
||||
UpdateQueue: retry[[2]string{"default", "non_idempotent"}],
|
||||
DeleteQueue: retry[[2]string{"default", "non_idempotent"}],
|
||||
PurgeQueue: retry[[2]string{"default", "non_idempotent"}],
|
||||
PauseQueue: retry[[2]string{"default", "non_idempotent"}],
|
||||
ResumeQueue: retry[[2]string{"default", "non_idempotent"}],
|
||||
GetIamPolicy: retry[[2]string{"default", "idempotent"}],
|
||||
SetIamPolicy: retry[[2]string{"default", "non_idempotent"}],
|
||||
TestIamPermissions: retry[[2]string{"default", "idempotent"}],
|
||||
ListTasks: retry[[2]string{"default", "idempotent"}],
|
||||
GetTask: retry[[2]string{"default", "idempotent"}],
|
||||
CreateTask: retry[[2]string{"default", "non_idempotent"}],
|
||||
DeleteTask: retry[[2]string{"default", "idempotent"}],
|
||||
LeaseTasks: retry[[2]string{"default", "non_idempotent"}],
|
||||
AcknowledgeTask: retry[[2]string{"default", "non_idempotent"}],
|
||||
RenewLease: retry[[2]string{"default", "non_idempotent"}],
|
||||
CancelLease: retry[[2]string{"default", "non_idempotent"}],
|
||||
RunTask: retry[[2]string{"default", "non_idempotent"}],
|
||||
}
|
||||
}
|
||||
|
||||
// Client is a client for interacting with Cloud Tasks API.
|
||||
type Client struct {
|
||||
// The connection to the service.
|
||||
conn *grpc.ClientConn
|
||||
|
||||
// The gRPC API client.
|
||||
client taskspb.CloudTasksClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *CallOptions
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewClient creates a new cloud tasks client.
|
||||
//
|
||||
// Cloud Tasks allows developers to manage the execution of background
|
||||
// work in their applications.
|
||||
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
|
||||
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
conn: conn,
|
||||
CallOptions: defaultCallOptions(),
|
||||
|
||||
client: taskspb.NewCloudTasksClient(conn),
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Connection returns the client's connection to the API service.
|
||||
func (c *Client) Connection() *grpc.ClientConn {
|
||||
return c.conn
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *Client) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *Client) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ListQueues lists queues.
|
||||
//
|
||||
// Queues are returned in lexicographical order.
|
||||
func (c *Client) ListQueues(ctx context.Context, req *taskspb.ListQueuesRequest, opts ...gax.CallOption) *QueueIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListQueues[0:len(c.CallOptions.ListQueues):len(c.CallOptions.ListQueues)], opts...)
|
||||
it := &QueueIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*taskspb.Queue, string, error) {
|
||||
var resp *taskspb.ListQueuesResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.ListQueues(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.Queues, resp.NextPageToken, nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
return it
|
||||
}
|
||||
|
||||
// GetQueue gets a queue.
|
||||
func (c *Client) GetQueue(ctx context.Context, req *taskspb.GetQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetQueue[0:len(c.CallOptions.GetQueue):len(c.CallOptions.GetQueue)], opts...)
|
||||
var resp *taskspb.Queue
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.GetQueue(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// CreateQueue creates a queue.
|
||||
//
|
||||
// Queues created with this method allow tasks to live for a maximum of 31
|
||||
// days. After a task is 31 days old, the task will be deleted regardless of whether
|
||||
// it was dispatched or not.
|
||||
//
|
||||
// WARNING: Using this method may have unintended side effects if you are
|
||||
// using an App Engine queue.yaml or queue.xml file to manage your queues.
|
||||
// Read
|
||||
// Overview of Queue Management and queue.yaml (at /cloud-tasks/docs/queue-yaml)
|
||||
// before using this method.
|
||||
func (c *Client) CreateQueue(ctx context.Context, req *taskspb.CreateQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.CreateQueue[0:len(c.CallOptions.CreateQueue):len(c.CallOptions.CreateQueue)], opts...)
|
||||
var resp *taskspb.Queue
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.CreateQueue(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// UpdateQueue updates a queue.
|
||||
//
|
||||
// This method creates the queue if it does not exist and updates
|
||||
// the queue if it does exist.
|
||||
//
|
||||
// Queues created with this method allow tasks to live for a maximum of 31
|
||||
// days. After a task is 31 days old, the task will be deleted regardless of whether
|
||||
// it was dispatched or not.
|
||||
//
|
||||
// WARNING: Using this method may have unintended side effects if you are
|
||||
// using an App Engine queue.yaml or queue.xml file to manage your queues.
|
||||
// Read
|
||||
// Overview of Queue Management and queue.yaml (at /cloud-tasks/docs/queue-yaml)
|
||||
// before using this method.
|
||||
func (c *Client) UpdateQueue(ctx context.Context, req *taskspb.UpdateQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.UpdateQueue[0:len(c.CallOptions.UpdateQueue):len(c.CallOptions.UpdateQueue)], opts...)
|
||||
var resp *taskspb.Queue
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.UpdateQueue(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// DeleteQueue deletes a queue.
|
||||
//
|
||||
// This command will delete the queue even if it has tasks in it.
|
||||
//
|
||||
// Note: If you delete a queue, a queue with the same name can't be created
|
||||
// for 7 days.
|
||||
//
|
||||
// WARNING: Using this method may have unintended side effects if you are
|
||||
// using an App Engine queue.yaml or queue.xml file to manage your queues.
|
||||
// Read
|
||||
// Overview of Queue Management and queue.yaml (at /cloud-tasks/docs/queue-yaml)
|
||||
// before using this method.
|
||||
func (c *Client) DeleteQueue(ctx context.Context, req *taskspb.DeleteQueueRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DeleteQueue[0:len(c.CallOptions.DeleteQueue):len(c.CallOptions.DeleteQueue)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.client.DeleteQueue(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// PurgeQueue purges a queue by deleting all of its tasks.
|
||||
//
|
||||
// All tasks created before this method is called are permanently deleted.
|
||||
//
|
||||
// Purge operations can take up to one minute to take effect. Tasks
|
||||
// might be dispatched before the purge takes effect. A purge is irreversible.
|
||||
func (c *Client) PurgeQueue(ctx context.Context, req *taskspb.PurgeQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.PurgeQueue[0:len(c.CallOptions.PurgeQueue):len(c.CallOptions.PurgeQueue)], opts...)
|
||||
var resp *taskspb.Queue
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.PurgeQueue(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// PauseQueue pauses the queue.
|
||||
//
|
||||
// If a queue is paused then the system will stop dispatching tasks
|
||||
// until the queue is resumed via
|
||||
// [ResumeQueue][google.cloud.tasks.v2beta2.CloudTasks.ResumeQueue]. Tasks can still be added
|
||||
// when the queue is paused. A queue is paused if its
|
||||
// [state][google.cloud.tasks.v2beta2.Queue.state] is [PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED].
|
||||
func (c *Client) PauseQueue(ctx context.Context, req *taskspb.PauseQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.PauseQueue[0:len(c.CallOptions.PauseQueue):len(c.CallOptions.PauseQueue)], opts...)
|
||||
var resp *taskspb.Queue
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.PauseQueue(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ResumeQueue resume a queue.
|
||||
//
|
||||
// This method resumes a queue after it has been
|
||||
// [PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED] or
|
||||
// [DISABLED][google.cloud.tasks.v2beta2.Queue.State.DISABLED]. The state of a queue is stored
|
||||
// in the queue's [state][google.cloud.tasks.v2beta2.Queue.state]; after calling this method it
|
||||
// will be set to [RUNNING][google.cloud.tasks.v2beta2.Queue.State.RUNNING].
|
||||
//
|
||||
// WARNING: Resuming many high-QPS queues at the same time can
|
||||
// lead to target overloading. If you are resuming high-QPS
|
||||
// queues, follow the 500/50/5 pattern described in
|
||||
// Managing Cloud Tasks Scaling Risks (at /cloud-tasks/pdfs/managing-cloud-tasks-scaling-risks-2017-06-05.pdf).
|
||||
func (c *Client) ResumeQueue(ctx context.Context, req *taskspb.ResumeQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ResumeQueue[0:len(c.CallOptions.ResumeQueue):len(c.CallOptions.ResumeQueue)], opts...)
|
||||
var resp *taskspb.Queue
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.ResumeQueue(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetIamPolicy gets the access control policy for a [Queue][google.cloud.tasks.v2beta2.Queue].
|
||||
// Returns an empty policy if the resource exists and does not have a policy
|
||||
// set.
|
||||
//
|
||||
// Authorization requires the following Google IAM (at /iam) permission on the
|
||||
// specified resource parent:
|
||||
//
|
||||
// cloudtasks.queues.getIamPolicy
|
||||
func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetIamPolicy[0:len(c.CallOptions.GetIamPolicy):len(c.CallOptions.GetIamPolicy)], opts...)
|
||||
var resp *iampb.Policy
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.GetIamPolicy(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// SetIamPolicy sets the access control policy for a [Queue][google.cloud.tasks.v2beta2.Queue]. Replaces any existing
|
||||
// policy.
|
||||
//
|
||||
// Note: The Cloud Console does not check queue-level IAM permissions yet.
|
||||
// Project-level permissions are required to use the Cloud Console.
|
||||
//
|
||||
// Authorization requires the following Google IAM (at /iam) permission on the
|
||||
// specified resource parent:
|
||||
//
|
||||
// cloudtasks.queues.setIamPolicy
|
||||
func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.SetIamPolicy[0:len(c.CallOptions.SetIamPolicy):len(c.CallOptions.SetIamPolicy)], opts...)
|
||||
var resp *iampb.Policy
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.SetIamPolicy(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// TestIamPermissions returns permissions that a caller has on a [Queue][google.cloud.tasks.v2beta2.Queue].
|
||||
// If the resource does not exist, this will return an empty set of
|
||||
// permissions, not a [NOT_FOUND][google.rpc.Code.NOT_FOUND] error.
|
||||
//
|
||||
// Note: This operation is designed to be used for building permission-aware
|
||||
// UIs and command-line tools, not for authorization checking. This operation
|
||||
// may "fail open" without warning.
|
||||
func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.TestIamPermissions[0:len(c.CallOptions.TestIamPermissions):len(c.CallOptions.TestIamPermissions)], opts...)
|
||||
var resp *iampb.TestIamPermissionsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.TestIamPermissions(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ListTasks lists the tasks in a queue.
|
||||
//
|
||||
// By default, only the [BASIC][google.cloud.tasks.v2beta2.Task.View.BASIC] view is retrieved
|
||||
// due to performance considerations;
|
||||
// [response_view][google.cloud.tasks.v2beta2.ListTasksRequest.response_view] controls the
|
||||
// subset of information which is returned.
|
||||
func (c *Client) ListTasks(ctx context.Context, req *taskspb.ListTasksRequest, opts ...gax.CallOption) *TaskIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListTasks[0:len(c.CallOptions.ListTasks):len(c.CallOptions.ListTasks)], opts...)
|
||||
it := &TaskIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*taskspb.Task, string, error) {
|
||||
var resp *taskspb.ListTasksResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.ListTasks(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.Tasks, resp.NextPageToken, nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
return it
|
||||
}
|
||||
|
||||
// GetTask gets a task.
|
||||
func (c *Client) GetTask(ctx context.Context, req *taskspb.GetTaskRequest, opts ...gax.CallOption) (*taskspb.Task, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetTask[0:len(c.CallOptions.GetTask):len(c.CallOptions.GetTask)], opts...)
|
||||
var resp *taskspb.Task
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.GetTask(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// CreateTask creates a task and adds it to a queue.
|
||||
//
|
||||
// To add multiple tasks at the same time, use
|
||||
// HTTP batching (at /storage/docs/json_api/v1/how-tos/batch)
|
||||
// or the batching documentation for your client library, for example
|
||||
// https://developers.google.com/api-client-library/python/guide/batch.
|
||||
//
|
||||
// Tasks cannot be updated after creation; there is no UpdateTask command.
|
||||
//
|
||||
// For App Engine queues (at google.cloud.tasks.v2beta2.AppEngineHttpTarget),
|
||||
// the maximum task size is 100KB.
|
||||
//
|
||||
// For pull queues (at google.cloud.tasks.v2beta2.PullTarget), this
|
||||
// the maximum task size is 1MB.
|
||||
func (c *Client) CreateTask(ctx context.Context, req *taskspb.CreateTaskRequest, opts ...gax.CallOption) (*taskspb.Task, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.CreateTask[0:len(c.CallOptions.CreateTask):len(c.CallOptions.CreateTask)], opts...)
|
||||
var resp *taskspb.Task
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.CreateTask(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// DeleteTask deletes a task.
|
||||
//
|
||||
// A task can be deleted if it is scheduled or dispatched. A task
|
||||
// cannot be deleted if it has completed successfully or permanently
|
||||
// failed.
|
||||
func (c *Client) DeleteTask(ctx context.Context, req *taskspb.DeleteTaskRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DeleteTask[0:len(c.CallOptions.DeleteTask):len(c.CallOptions.DeleteTask)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.client.DeleteTask(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// LeaseTasks leases tasks from a pull queue for
|
||||
// [lease_duration][google.cloud.tasks.v2beta2.LeaseTasksRequest.lease_duration].
|
||||
//
|
||||
// This method is invoked by the worker to obtain a lease. The
|
||||
// worker must acknowledge the task via
|
||||
// [AcknowledgeTask][google.cloud.tasks.v2beta2.CloudTasks.AcknowledgeTask] after they have
|
||||
// performed the work associated with the task.
|
||||
//
|
||||
// The [payload][google.cloud.tasks.v2beta2.PullMessage.payload] is intended to store data that
|
||||
// the worker needs to perform the work associated with the task. To
|
||||
// return the payloads in the [response][google.cloud.tasks.v2beta2.LeaseTasksResponse], set
|
||||
// [response_view][google.cloud.tasks.v2beta2.LeaseTasksRequest.response_view] to
|
||||
// [FULL][google.cloud.tasks.v2beta2.Task.View.FULL].
|
||||
//
|
||||
// A maximum of 10 qps of [LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks]
|
||||
// requests are allowed per
|
||||
// queue. [RESOURCE_EXHAUSTED][google.rpc.Code.RESOURCE_EXHAUSTED]
|
||||
// is returned when this limit is
|
||||
// exceeded. [RESOURCE_EXHAUSTED][google.rpc.Code.RESOURCE_EXHAUSTED]
|
||||
// is also returned when
|
||||
// [max_tasks_dispatched_per_second][google.cloud.tasks.v2beta2.RateLimits.max_tasks_dispatched_per_second]
|
||||
// is exceeded.
|
||||
func (c *Client) LeaseTasks(ctx context.Context, req *taskspb.LeaseTasksRequest, opts ...gax.CallOption) (*taskspb.LeaseTasksResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.LeaseTasks[0:len(c.CallOptions.LeaseTasks):len(c.CallOptions.LeaseTasks)], opts...)
|
||||
var resp *taskspb.LeaseTasksResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.LeaseTasks(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// AcknowledgeTask acknowledges a pull task.
|
||||
//
|
||||
// The worker, that is, the entity that
|
||||
// [leased][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks] this task must call this method
|
||||
// to indicate that the work associated with the task has finished.
|
||||
//
|
||||
// The worker must acknowledge a task within the
|
||||
// [lease_duration][google.cloud.tasks.v2beta2.LeaseTasksRequest.lease_duration] or the lease
|
||||
// will expire and the task will become available to be leased
|
||||
// again. After the task is acknowledged, it will not be returned
|
||||
// by a later [LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks],
|
||||
// [GetTask][google.cloud.tasks.v2beta2.CloudTasks.GetTask], or
|
||||
// [ListTasks][google.cloud.tasks.v2beta2.CloudTasks.ListTasks].
|
||||
//
|
||||
// To acknowledge multiple tasks at the same time, use
|
||||
// HTTP batching (at /storage/docs/json_api/v1/how-tos/batch)
|
||||
// or the batching documentation for your client library, for example
|
||||
// https://developers.google.com/api-client-library/python/guide/batch.
|
||||
func (c *Client) AcknowledgeTask(ctx context.Context, req *taskspb.AcknowledgeTaskRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.AcknowledgeTask[0:len(c.CallOptions.AcknowledgeTask):len(c.CallOptions.AcknowledgeTask)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.client.AcknowledgeTask(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// RenewLease renew the current lease of a pull task.
|
||||
//
|
||||
// The worker can use this method to extend the lease by a new
|
||||
// duration, starting from now. The new task lease will be
|
||||
// returned in the task's [schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time].
|
||||
func (c *Client) RenewLease(ctx context.Context, req *taskspb.RenewLeaseRequest, opts ...gax.CallOption) (*taskspb.Task, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.RenewLease[0:len(c.CallOptions.RenewLease):len(c.CallOptions.RenewLease)], opts...)
|
||||
var resp *taskspb.Task
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.RenewLease(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// CancelLease cancel a pull task's lease.
|
||||
//
|
||||
// The worker can use this method to cancel a task's lease by
|
||||
// setting its [schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time] to now. This will
|
||||
// make the task available to be leased to the next caller of
|
||||
// [LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks].
|
||||
func (c *Client) CancelLease(ctx context.Context, req *taskspb.CancelLeaseRequest, opts ...gax.CallOption) (*taskspb.Task, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.CancelLease[0:len(c.CallOptions.CancelLease):len(c.CallOptions.CancelLease)], opts...)
|
||||
var resp *taskspb.Task
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.CancelLease(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// RunTask forces a task to run now.
|
||||
//
|
||||
// When this method is called, Cloud Tasks will dispatch the task, even if
|
||||
// the task is already running, the queue has reached its [RateLimits][google.cloud.tasks.v2beta2.RateLimits] or
|
||||
// is [PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED].
|
||||
//
|
||||
// This command is meant to be used for manual debugging. For
|
||||
// example, [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] can be used to retry a failed
|
||||
// task after a fix has been made or to manually force a task to be
|
||||
// dispatched now.
|
||||
//
|
||||
// The dispatched task is returned. That is, the task that is returned
|
||||
// contains the [status][google.cloud.tasks.v2beta2.Task.status] after the task is dispatched but
|
||||
// before the task is received by its target.
|
||||
//
|
||||
// If Cloud Tasks receives a successful response from the task's
|
||||
// target, then the task will be deleted; otherwise the task's
|
||||
// [schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time] will be reset to the time that
|
||||
// [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] was called plus the retry delay specified
|
||||
// in the queue's [RetryConfig][google.cloud.tasks.v2beta2.RetryConfig].
|
||||
//
|
||||
// [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] returns
|
||||
// [NOT_FOUND][google.rpc.Code.NOT_FOUND] when it is called on a
|
||||
// task that has already succeeded or permanently failed.
|
||||
//
|
||||
// [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] cannot be called on a
|
||||
// [pull task][google.cloud.tasks.v2beta2.PullMessage].
|
||||
func (c *Client) RunTask(ctx context.Context, req *taskspb.RunTaskRequest, opts ...gax.CallOption) (*taskspb.Task, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.RunTask[0:len(c.CallOptions.RunTask):len(c.CallOptions.RunTask)], opts...)
|
||||
var resp *taskspb.Task
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.RunTask(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// QueueIterator manages a stream of *taskspb.Queue.
|
||||
type QueueIterator struct {
|
||||
items []*taskspb.Queue
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*taskspb.Queue, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *QueueIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *QueueIterator) Next() (*taskspb.Queue, error) {
|
||||
var item *taskspb.Queue
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *QueueIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *QueueIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// TaskIterator manages a stream of *taskspb.Task.
|
||||
type TaskIterator struct {
|
||||
items []*taskspb.Task
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*taskspb.Task, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *TaskIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *TaskIterator) Next() (*taskspb.Task, error) {
|
||||
var item *taskspb.Task
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *TaskIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *TaskIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
401
vendor/cloud.google.com/go/cloudtasks/apiv2beta2/cloud_tasks_client_example_test.go
generated
vendored
Normal file
401
vendor/cloud.google.com/go/cloudtasks/apiv2beta2/cloud_tasks_client_example_test.go
generated
vendored
Normal file
@@ -0,0 +1,401 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package cloudtasks_test
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/cloudtasks/apiv2beta2"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
taskspb "google.golang.org/genproto/googleapis/cloud/tasks/v2beta2"
|
||||
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
||||
)
|
||||
|
||||
func ExampleNewClient() {
|
||||
ctx := context.Background()
|
||||
c, err := cloudtasks.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use client.
|
||||
_ = c
|
||||
}
|
||||
|
||||
func ExampleClient_ListQueues() {
|
||||
ctx := context.Background()
|
||||
c, err := cloudtasks.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &taskspb.ListQueuesRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListQueues(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_GetQueue() {
|
||||
ctx := context.Background()
|
||||
c, err := cloudtasks.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &taskspb.GetQueueRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetQueue(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_CreateQueue() {
|
||||
ctx := context.Background()
|
||||
c, err := cloudtasks.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &taskspb.CreateQueueRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CreateQueue(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_UpdateQueue() {
|
||||
ctx := context.Background()
|
||||
c, err := cloudtasks.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &taskspb.UpdateQueueRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.UpdateQueue(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_DeleteQueue() {
|
||||
ctx := context.Background()
|
||||
c, err := cloudtasks.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &taskspb.DeleteQueueRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
err = c.DeleteQueue(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_PurgeQueue() {
|
||||
ctx := context.Background()
|
||||
c, err := cloudtasks.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &taskspb.PurgeQueueRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.PurgeQueue(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_PauseQueue() {
|
||||
ctx := context.Background()
|
||||
c, err := cloudtasks.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &taskspb.PauseQueueRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.PauseQueue(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_ResumeQueue() {
|
||||
ctx := context.Background()
|
||||
c, err := cloudtasks.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &taskspb.ResumeQueueRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.ResumeQueue(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_GetIamPolicy() {
|
||||
ctx := context.Background()
|
||||
c, err := cloudtasks.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &iampb.GetIamPolicyRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetIamPolicy(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_SetIamPolicy() {
|
||||
ctx := context.Background()
|
||||
c, err := cloudtasks.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &iampb.SetIamPolicyRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetIamPolicy(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_TestIamPermissions() {
|
||||
ctx := context.Background()
|
||||
c, err := cloudtasks.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &iampb.TestIamPermissionsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.TestIamPermissions(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_ListTasks() {
|
||||
ctx := context.Background()
|
||||
c, err := cloudtasks.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &taskspb.ListTasksRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListTasks(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_GetTask() {
|
||||
ctx := context.Background()
|
||||
c, err := cloudtasks.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &taskspb.GetTaskRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetTask(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_CreateTask() {
|
||||
ctx := context.Background()
|
||||
c, err := cloudtasks.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &taskspb.CreateTaskRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CreateTask(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_DeleteTask() {
|
||||
ctx := context.Background()
|
||||
c, err := cloudtasks.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &taskspb.DeleteTaskRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
err = c.DeleteTask(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_LeaseTasks() {
|
||||
ctx := context.Background()
|
||||
c, err := cloudtasks.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &taskspb.LeaseTasksRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.LeaseTasks(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_AcknowledgeTask() {
|
||||
ctx := context.Background()
|
||||
c, err := cloudtasks.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &taskspb.AcknowledgeTaskRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
err = c.AcknowledgeTask(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_RenewLease() {
|
||||
ctx := context.Background()
|
||||
c, err := cloudtasks.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &taskspb.RenewLeaseRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.RenewLease(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_CancelLease() {
|
||||
ctx := context.Background()
|
||||
c, err := cloudtasks.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &taskspb.CancelLeaseRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CancelLease(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_RunTask() {
|
||||
ctx := context.Background()
|
||||
c, err := cloudtasks.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &taskspb.RunTaskRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.RunTask(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
48
vendor/cloud.google.com/go/cloudtasks/apiv2beta2/doc.go
generated
vendored
Normal file
48
vendor/cloud.google.com/go/cloudtasks/apiv2beta2/doc.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package cloudtasks is an auto-generated package for the
|
||||
// Cloud Tasks API.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
||||
//
|
||||
// Manages the execution of large numbers of distributed requests. Cloud
|
||||
// Tasks
|
||||
// is in Alpha.
|
||||
package cloudtasks // import "cloud.google.com/go/cloudtasks/apiv2beta2"
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
func DefaultAuthScopes() []string {
|
||||
return []string{
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
}
|
||||
}
|
||||
1554
vendor/cloud.google.com/go/cloudtasks/apiv2beta2/mock_test.go
generated
vendored
Normal file
1554
vendor/cloud.google.com/go/cloudtasks/apiv2beta2/mock_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
18
vendor/cloud.google.com/go/datastore/datastore_test.go
generated
vendored
18
vendor/cloud.google.com/go/datastore/datastore_test.go
generated
vendored
@@ -152,17 +152,19 @@ type NoOmit struct {
|
||||
}
|
||||
|
||||
type OmitAll struct {
|
||||
A string `datastore:",omitempty"`
|
||||
B int `datastore:"Bb,omitempty"`
|
||||
C bool `datastore:",omitempty,noindex"`
|
||||
F []int `datastore:",omitempty"`
|
||||
A string `datastore:",omitempty"`
|
||||
B int `datastore:"Bb,omitempty"`
|
||||
C bool `datastore:",omitempty,noindex"`
|
||||
D time.Time `datastore:",omitempty"`
|
||||
F []int `datastore:",omitempty"`
|
||||
}
|
||||
|
||||
type Omit struct {
|
||||
A string `datastore:",omitempty"`
|
||||
B int `datastore:"Bb,omitempty"`
|
||||
C bool `datastore:",omitempty,noindex"`
|
||||
F []int `datastore:",omitempty"`
|
||||
A string `datastore:",omitempty"`
|
||||
B int `datastore:"Bb,omitempty"`
|
||||
C bool `datastore:",omitempty,noindex"`
|
||||
D time.Time `datastore:",omitempty"`
|
||||
F []int `datastore:",omitempty"`
|
||||
S `datastore:",omitempty"`
|
||||
}
|
||||
|
||||
|
||||
9
vendor/cloud.google.com/go/datastore/doc.go
generated
vendored
9
vendor/cloud.google.com/go/datastore/doc.go
generated
vendored
@@ -15,6 +15,9 @@
|
||||
/*
|
||||
Package datastore provides a client for Google Cloud Datastore.
|
||||
|
||||
See https://godoc.org/cloud.google.com/go for authentication, timeouts,
|
||||
connection pooling and similar aspects of this package.
|
||||
|
||||
|
||||
Basic Operations
|
||||
|
||||
@@ -481,11 +484,5 @@ directed to the emulator instead of the production Datastore service.
|
||||
|
||||
To install and set up the emulator and its environment variables, see the documentation
|
||||
at https://cloud.google.com/datastore/docs/tools/datastore-emulator.
|
||||
|
||||
Authentication
|
||||
|
||||
See examples of authorization and authentication at
|
||||
https://godoc.org/cloud.google.com/go#pkg-examples.
|
||||
|
||||
*/
|
||||
package datastore // import "cloud.google.com/go/datastore"
|
||||
|
||||
1
vendor/cloud.google.com/go/datastore/query_test.go
generated
vendored
1
vendor/cloud.google.com/go/datastore/query_test.go
generated
vendored
@@ -475,6 +475,7 @@ func TestNamespaceQuery(t *testing.T) {
|
||||
|
||||
var gs []Gopher
|
||||
|
||||
// Ignore errors for the rest of this test.
|
||||
client.GetAll(ctx, NewQuery("gopher"), &gs)
|
||||
if got, want := <-gotNamespace, ""; got != want {
|
||||
t.Errorf("GetAll: got namespace %q, want %q", got, want)
|
||||
|
||||
4
vendor/cloud.google.com/go/datastore/save.go
generated
vendored
4
vendor/cloud.google.com/go/datastore/save.go
generated
vendored
@@ -438,6 +438,10 @@ func isEmptyValue(v reflect.Value) bool {
|
||||
return v.Float() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
case reflect.Struct:
|
||||
if t, ok := v.Interface().(time.Time); ok {
|
||||
return t.IsZero()
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
2
vendor/cloud.google.com/go/datastore/transaction.go
generated
vendored
2
vendor/cloud.google.com/go/datastore/transaction.go
generated
vendored
@@ -167,7 +167,7 @@ func (c *Client) RunInTransaction(ctx context.Context, f func(tx *Transaction) e
|
||||
return nil, err
|
||||
}
|
||||
if err := f(tx); err != nil {
|
||||
tx.Rollback()
|
||||
_ = tx.Rollback()
|
||||
return nil, err
|
||||
}
|
||||
if cmt, err := tx.Commit(); err != ErrConcurrentTransaction {
|
||||
|
||||
599
vendor/cloud.google.com/go/dialogflow/apiv2/agents_client.go
generated
vendored
Normal file
599
vendor/cloud.google.com/go/dialogflow/apiv2/agents_client.go
generated
vendored
Normal file
@@ -0,0 +1,599 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package dialogflow
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/version"
|
||||
"cloud.google.com/go/longrunning"
|
||||
lroauto "cloud.google.com/go/longrunning/autogen"
|
||||
structpbpb "github.com/golang/protobuf/ptypes/struct"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
dialogflowpb "google.golang.org/genproto/googleapis/cloud/dialogflow/v2"
|
||||
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// AgentsCallOptions contains the retry settings for each method of AgentsClient.
|
||||
type AgentsCallOptions struct {
|
||||
GetAgent []gax.CallOption
|
||||
SearchAgents []gax.CallOption
|
||||
TrainAgent []gax.CallOption
|
||||
ExportAgent []gax.CallOption
|
||||
ImportAgent []gax.CallOption
|
||||
RestoreAgent []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultAgentsClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("dialogflow.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultAgentsCallOptions() *AgentsCallOptions {
|
||||
retry := map[[2]string][]gax.CallOption{
|
||||
{"default", "idempotent"}: {
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.3,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
return &AgentsCallOptions{
|
||||
GetAgent: retry[[2]string{"default", "idempotent"}],
|
||||
SearchAgents: retry[[2]string{"default", "idempotent"}],
|
||||
TrainAgent: retry[[2]string{"default", "idempotent"}],
|
||||
ExportAgent: retry[[2]string{"default", "idempotent"}],
|
||||
ImportAgent: retry[[2]string{"default", "non_idempotent"}],
|
||||
RestoreAgent: retry[[2]string{"default", "idempotent"}],
|
||||
}
|
||||
}
|
||||
|
||||
// AgentsClient is a client for interacting with Dialogflow API.
|
||||
type AgentsClient struct {
|
||||
// The connection to the service.
|
||||
conn *grpc.ClientConn
|
||||
|
||||
// The gRPC API client.
|
||||
agentsClient dialogflowpb.AgentsClient
|
||||
|
||||
// LROClient is used internally to handle longrunning operations.
|
||||
// It is exposed so that its CallOptions can be modified if required.
|
||||
// Users should not Close this client.
|
||||
LROClient *lroauto.OperationsClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *AgentsCallOptions
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewAgentsClient creates a new agents client.
|
||||
//
|
||||
// Agents are best described as Natural Language Understanding (NLU) modules
|
||||
// that transform user requests into actionable data. You can include agents
|
||||
// in your app, product, or service to determine user intent and respond to the
|
||||
// user in a natural way.
|
||||
//
|
||||
// After you create an agent, you can add [Intents][google.cloud.dialogflow.v2.Intents], [Contexts][google.cloud.dialogflow.v2.Contexts],
|
||||
// [Entity Types][google.cloud.dialogflow.v2.EntityTypes], [Webhooks][google.cloud.dialogflow.v2.WebhookRequest], and so on to
|
||||
// manage the flow of a conversation and match user input to predefined intents
|
||||
// and actions.
|
||||
//
|
||||
// You can create an agent using both Dialogflow Standard Edition and
|
||||
// Dialogflow Enterprise Edition. For details, see
|
||||
// Dialogflow Editions (at /dialogflow-enterprise/docs/editions).
|
||||
//
|
||||
// You can save your agent for backup or versioning by exporting the agent by
|
||||
// using the [ExportAgent][google.cloud.dialogflow.v2.Agents.ExportAgent] method. You can import a saved
|
||||
// agent by using the [ImportAgent][google.cloud.dialogflow.v2.Agents.ImportAgent] method.
|
||||
//
|
||||
// Dialogflow provides several
|
||||
// prebuilt agents (at https://dialogflow.com/docs/prebuilt-agents) for common
|
||||
// conversation scenarios such as determining a date and time, converting
|
||||
// currency, and so on.
|
||||
//
|
||||
// For more information about agents, see the
|
||||
// Dialogflow documentation (at https://dialogflow.com/docs/agents).
|
||||
func NewAgentsClient(ctx context.Context, opts ...option.ClientOption) (*AgentsClient, error) {
|
||||
conn, err := transport.DialGRPC(ctx, append(defaultAgentsClientOptions(), opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &AgentsClient{
|
||||
conn: conn,
|
||||
CallOptions: defaultAgentsCallOptions(),
|
||||
|
||||
agentsClient: dialogflowpb.NewAgentsClient(conn),
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
|
||||
c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
|
||||
if err != nil {
|
||||
// This error "should not happen", since we are just reusing old connection
|
||||
// and never actually need to dial.
|
||||
// If this does happen, we could leak conn. However, we cannot close conn:
|
||||
// If the user invoked the function with option.WithGRPCConn,
|
||||
// we would close a connection that's still in use.
|
||||
// TODO(pongad): investigate error conditions.
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Connection returns the client's connection to the API service.
|
||||
func (c *AgentsClient) Connection() *grpc.ClientConn {
|
||||
return c.conn
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *AgentsClient) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *AgentsClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// GetAgent retrieves the specified agent.
|
||||
func (c *AgentsClient) GetAgent(ctx context.Context, req *dialogflowpb.GetAgentRequest, opts ...gax.CallOption) (*dialogflowpb.Agent, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetAgent[0:len(c.CallOptions.GetAgent):len(c.CallOptions.GetAgent)], opts...)
|
||||
var resp *dialogflowpb.Agent
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.agentsClient.GetAgent(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// SearchAgents returns the list of agents.
|
||||
//
|
||||
// Since there is at most one conversational agent per project, this method is
|
||||
// useful primarily for listing all agents across projects the caller has
|
||||
// access to. One can achieve that with a wildcard project collection id "-".
|
||||
// Refer to List
|
||||
// Sub-Collections (at https://cloud.google.com/apis/design/design_patterns#list_sub-collections).
|
||||
func (c *AgentsClient) SearchAgents(ctx context.Context, req *dialogflowpb.SearchAgentsRequest, opts ...gax.CallOption) *AgentIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.SearchAgents[0:len(c.CallOptions.SearchAgents):len(c.CallOptions.SearchAgents)], opts...)
|
||||
it := &AgentIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*dialogflowpb.Agent, string, error) {
|
||||
var resp *dialogflowpb.SearchAgentsResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.agentsClient.SearchAgents(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.Agents, resp.NextPageToken, nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
return it
|
||||
}
|
||||
|
||||
// TrainAgent trains the specified agent.
|
||||
//
|
||||
// Operation <response: [google.protobuf.Empty][google.protobuf.Empty],
|
||||
// metadata: [google.protobuf.Struct][google.protobuf.Struct]>
|
||||
func (c *AgentsClient) TrainAgent(ctx context.Context, req *dialogflowpb.TrainAgentRequest, opts ...gax.CallOption) (*TrainAgentOperation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.TrainAgent[0:len(c.CallOptions.TrainAgent):len(c.CallOptions.TrainAgent)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.agentsClient.TrainAgent(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &TrainAgentOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, resp),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExportAgent exports the specified agent to a ZIP file.
|
||||
//
|
||||
// Operation <response: [ExportAgentResponse][google.cloud.dialogflow.v2.ExportAgentResponse],
|
||||
// metadata: [google.protobuf.Struct][google.protobuf.Struct]>
|
||||
func (c *AgentsClient) ExportAgent(ctx context.Context, req *dialogflowpb.ExportAgentRequest, opts ...gax.CallOption) (*ExportAgentOperation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ExportAgent[0:len(c.CallOptions.ExportAgent):len(c.CallOptions.ExportAgent)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.agentsClient.ExportAgent(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ExportAgentOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, resp),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ImportAgent imports the specified agent from a ZIP file.
|
||||
//
|
||||
// Uploads new intents and entity types without deleting the existing ones.
|
||||
// Intents and entity types with the same name are replaced with the new
|
||||
// versions from ImportAgentRequest.
|
||||
//
|
||||
// Operation <response: [google.protobuf.Empty][google.protobuf.Empty],
|
||||
// metadata: [google.protobuf.Struct][google.protobuf.Struct]>
|
||||
func (c *AgentsClient) ImportAgent(ctx context.Context, req *dialogflowpb.ImportAgentRequest, opts ...gax.CallOption) (*ImportAgentOperation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ImportAgent[0:len(c.CallOptions.ImportAgent):len(c.CallOptions.ImportAgent)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.agentsClient.ImportAgent(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ImportAgentOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, resp),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// RestoreAgent restores the specified agent from a ZIP file.
|
||||
//
|
||||
// Replaces the current agent version with a new one. All the intents and
|
||||
// entity types in the older version are deleted.
|
||||
//
|
||||
// Operation <response: [google.protobuf.Empty][google.protobuf.Empty],
|
||||
// metadata: [google.protobuf.Struct][google.protobuf.Struct]>
|
||||
func (c *AgentsClient) RestoreAgent(ctx context.Context, req *dialogflowpb.RestoreAgentRequest, opts ...gax.CallOption) (*RestoreAgentOperation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.RestoreAgent[0:len(c.CallOptions.RestoreAgent):len(c.CallOptions.RestoreAgent)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.agentsClient.RestoreAgent(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &RestoreAgentOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, resp),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AgentIterator manages a stream of *dialogflowpb.Agent.
|
||||
type AgentIterator struct {
|
||||
items []*dialogflowpb.Agent
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*dialogflowpb.Agent, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *AgentIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *AgentIterator) Next() (*dialogflowpb.Agent, error) {
|
||||
var item *dialogflowpb.Agent
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *AgentIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *AgentIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// ExportAgentOperation manages a long-running operation from ExportAgent.
|
||||
type ExportAgentOperation struct {
|
||||
lro *longrunning.Operation
|
||||
}
|
||||
|
||||
// ExportAgentOperation returns a new ExportAgentOperation from a given name.
|
||||
// The name must be that of a previously created ExportAgentOperation, possibly from a different process.
|
||||
func (c *AgentsClient) ExportAgentOperation(name string) *ExportAgentOperation {
|
||||
return &ExportAgentOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
|
||||
//
|
||||
// See documentation of Poll for error-handling information.
|
||||
func (op *ExportAgentOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*dialogflowpb.ExportAgentResponse, error) {
|
||||
var resp dialogflowpb.ExportAgentResponse
|
||||
if err := op.lro.WaitWithInterval(ctx, &resp, 5000*time.Millisecond, opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Poll fetches the latest state of the long-running operation.
|
||||
//
|
||||
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
|
||||
//
|
||||
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
|
||||
// the operation has completed with failure, the error is returned and op.Done will return true.
|
||||
// If Poll succeeds and the operation has completed successfully,
|
||||
// op.Done will return true, and the response of the operation is returned.
|
||||
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
|
||||
func (op *ExportAgentOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*dialogflowpb.ExportAgentResponse, error) {
|
||||
var resp dialogflowpb.ExportAgentResponse
|
||||
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !op.Done() {
|
||||
return nil, nil
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Metadata returns metadata associated with the long-running operation.
|
||||
// Metadata itself does not contact the server, but Poll does.
|
||||
// To get the latest metadata, call this method after a successful call to Poll.
|
||||
// If the metadata is not available, the returned metadata and error are both nil.
|
||||
func (op *ExportAgentOperation) Metadata() (*structpbpb.Struct, error) {
|
||||
var meta structpbpb.Struct
|
||||
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
// Done reports whether the long-running operation has completed.
|
||||
func (op *ExportAgentOperation) Done() bool {
|
||||
return op.lro.Done()
|
||||
}
|
||||
|
||||
// Name returns the name of the long-running operation.
|
||||
// The name is assigned by the server and is unique within the service from which the operation is created.
|
||||
func (op *ExportAgentOperation) Name() string {
|
||||
return op.lro.Name()
|
||||
}
|
||||
|
||||
// ImportAgentOperation manages a long-running operation from ImportAgent.
|
||||
type ImportAgentOperation struct {
|
||||
lro *longrunning.Operation
|
||||
}
|
||||
|
||||
// ImportAgentOperation returns a new ImportAgentOperation from a given name.
|
||||
// The name must be that of a previously created ImportAgentOperation, possibly from a different process.
|
||||
func (c *AgentsClient) ImportAgentOperation(name string) *ImportAgentOperation {
|
||||
return &ImportAgentOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the long-running operation is completed, returning any error encountered.
|
||||
//
|
||||
// See documentation of Poll for error-handling information.
|
||||
func (op *ImportAgentOperation) Wait(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.WaitWithInterval(ctx, nil, 5000*time.Millisecond, opts...)
|
||||
}
|
||||
|
||||
// Poll fetches the latest state of the long-running operation.
|
||||
//
|
||||
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
|
||||
//
|
||||
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
|
||||
// the operation has completed with failure, the error is returned and op.Done will return true.
|
||||
// If Poll succeeds and the operation has completed successfully, op.Done will return true.
|
||||
func (op *ImportAgentOperation) Poll(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.Poll(ctx, nil, opts...)
|
||||
}
|
||||
|
||||
// Metadata returns metadata associated with the long-running operation.
|
||||
// Metadata itself does not contact the server, but Poll does.
|
||||
// To get the latest metadata, call this method after a successful call to Poll.
|
||||
// If the metadata is not available, the returned metadata and error are both nil.
|
||||
func (op *ImportAgentOperation) Metadata() (*structpbpb.Struct, error) {
|
||||
var meta structpbpb.Struct
|
||||
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
// Done reports whether the long-running operation has completed.
|
||||
func (op *ImportAgentOperation) Done() bool {
|
||||
return op.lro.Done()
|
||||
}
|
||||
|
||||
// Name returns the name of the long-running operation.
|
||||
// The name is assigned by the server and is unique within the service from which the operation is created.
|
||||
func (op *ImportAgentOperation) Name() string {
|
||||
return op.lro.Name()
|
||||
}
|
||||
|
||||
// RestoreAgentOperation manages a long-running operation from RestoreAgent.
|
||||
type RestoreAgentOperation struct {
|
||||
lro *longrunning.Operation
|
||||
}
|
||||
|
||||
// RestoreAgentOperation returns a new RestoreAgentOperation from a given name.
|
||||
// The name must be that of a previously created RestoreAgentOperation, possibly from a different process.
|
||||
func (c *AgentsClient) RestoreAgentOperation(name string) *RestoreAgentOperation {
|
||||
return &RestoreAgentOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the long-running operation is completed, returning any error encountered.
|
||||
//
|
||||
// See documentation of Poll for error-handling information.
|
||||
func (op *RestoreAgentOperation) Wait(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.WaitWithInterval(ctx, nil, 5000*time.Millisecond, opts...)
|
||||
}
|
||||
|
||||
// Poll fetches the latest state of the long-running operation.
|
||||
//
|
||||
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
|
||||
//
|
||||
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
|
||||
// the operation has completed with failure, the error is returned and op.Done will return true.
|
||||
// If Poll succeeds and the operation has completed successfully, op.Done will return true.
|
||||
func (op *RestoreAgentOperation) Poll(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.Poll(ctx, nil, opts...)
|
||||
}
|
||||
|
||||
// Metadata returns metadata associated with the long-running operation.
|
||||
// Metadata itself does not contact the server, but Poll does.
|
||||
// To get the latest metadata, call this method after a successful call to Poll.
|
||||
// If the metadata is not available, the returned metadata and error are both nil.
|
||||
func (op *RestoreAgentOperation) Metadata() (*structpbpb.Struct, error) {
|
||||
var meta structpbpb.Struct
|
||||
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
// Done reports whether the long-running operation has completed.
|
||||
func (op *RestoreAgentOperation) Done() bool {
|
||||
return op.lro.Done()
|
||||
}
|
||||
|
||||
// Name returns the name of the long-running operation.
|
||||
// The name is assigned by the server and is unique within the service from which the operation is created.
|
||||
func (op *RestoreAgentOperation) Name() string {
|
||||
return op.lro.Name()
|
||||
}
|
||||
|
||||
// TrainAgentOperation manages a long-running operation from TrainAgent.
|
||||
type TrainAgentOperation struct {
|
||||
lro *longrunning.Operation
|
||||
}
|
||||
|
||||
// TrainAgentOperation returns a new TrainAgentOperation from a given name.
|
||||
// The name must be that of a previously created TrainAgentOperation, possibly from a different process.
|
||||
func (c *AgentsClient) TrainAgentOperation(name string) *TrainAgentOperation {
|
||||
return &TrainAgentOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the long-running operation is completed, returning any error encountered.
|
||||
//
|
||||
// See documentation of Poll for error-handling information.
|
||||
func (op *TrainAgentOperation) Wait(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.WaitWithInterval(ctx, nil, 5000*time.Millisecond, opts...)
|
||||
}
|
||||
|
||||
// Poll fetches the latest state of the long-running operation.
|
||||
//
|
||||
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
|
||||
//
|
||||
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
|
||||
// the operation has completed with failure, the error is returned and op.Done will return true.
|
||||
// If Poll succeeds and the operation has completed successfully, op.Done will return true.
|
||||
func (op *TrainAgentOperation) Poll(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.Poll(ctx, nil, opts...)
|
||||
}
|
||||
|
||||
// Metadata returns metadata associated with the long-running operation.
|
||||
// Metadata itself does not contact the server, but Poll does.
|
||||
// To get the latest metadata, call this method after a successful call to Poll.
|
||||
// If the metadata is not available, the returned metadata and error are both nil.
|
||||
func (op *TrainAgentOperation) Metadata() (*structpbpb.Struct, error) {
|
||||
var meta structpbpb.Struct
|
||||
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
// Done reports whether the long-running operation has completed.
|
||||
func (op *TrainAgentOperation) Done() bool {
|
||||
return op.lro.Done()
|
||||
}
|
||||
|
||||
// Name returns the name of the long-running operation.
|
||||
// The name is assigned by the server and is unique within the service from which the operation is created.
|
||||
func (op *TrainAgentOperation) Name() string {
|
||||
return op.lro.Name()
|
||||
}
|
||||
156
vendor/cloud.google.com/go/dialogflow/apiv2/agents_client_example_test.go
generated
vendored
Normal file
156
vendor/cloud.google.com/go/dialogflow/apiv2/agents_client_example_test.go
generated
vendored
Normal file
@@ -0,0 +1,156 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package dialogflow_test
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/dialogflow/apiv2"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
dialogflowpb "google.golang.org/genproto/googleapis/cloud/dialogflow/v2"
|
||||
)
|
||||
|
||||
func ExampleNewAgentsClient() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewAgentsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use client.
|
||||
_ = c
|
||||
}
|
||||
|
||||
func ExampleAgentsClient_GetAgent() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewAgentsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.GetAgentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetAgent(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleAgentsClient_SearchAgents() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewAgentsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.SearchAgentsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.SearchAgents(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleAgentsClient_TrainAgent() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewAgentsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.TrainAgentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
op, err := c.TrainAgent(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
err = op.Wait(ctx)
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
func ExampleAgentsClient_ExportAgent() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewAgentsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.ExportAgentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
op, err := c.ExportAgent(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
resp, err := op.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleAgentsClient_ImportAgent() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewAgentsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.ImportAgentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
op, err := c.ImportAgent(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
err = op.Wait(ctx)
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
func ExampleAgentsClient_RestoreAgent() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewAgentsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.RestoreAgentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
op, err := c.RestoreAgent(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
err = op.Wait(ctx)
|
||||
// TODO: Handle error.
|
||||
}
|
||||
293
vendor/cloud.google.com/go/dialogflow/apiv2/contexts_client.go
generated
vendored
Normal file
293
vendor/cloud.google.com/go/dialogflow/apiv2/contexts_client.go
generated
vendored
Normal file
@@ -0,0 +1,293 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package dialogflow
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/version"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
dialogflowpb "google.golang.org/genproto/googleapis/cloud/dialogflow/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// ContextsCallOptions contains the retry settings for each method of ContextsClient.
|
||||
type ContextsCallOptions struct {
|
||||
ListContexts []gax.CallOption
|
||||
GetContext []gax.CallOption
|
||||
CreateContext []gax.CallOption
|
||||
UpdateContext []gax.CallOption
|
||||
DeleteContext []gax.CallOption
|
||||
DeleteAllContexts []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultContextsClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("dialogflow.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultContextsCallOptions() *ContextsCallOptions {
|
||||
retry := map[[2]string][]gax.CallOption{
|
||||
{"default", "idempotent"}: {
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.3,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
return &ContextsCallOptions{
|
||||
ListContexts: retry[[2]string{"default", "idempotent"}],
|
||||
GetContext: retry[[2]string{"default", "idempotent"}],
|
||||
CreateContext: retry[[2]string{"default", "non_idempotent"}],
|
||||
UpdateContext: retry[[2]string{"default", "non_idempotent"}],
|
||||
DeleteContext: retry[[2]string{"default", "idempotent"}],
|
||||
DeleteAllContexts: retry[[2]string{"default", "idempotent"}],
|
||||
}
|
||||
}
|
||||
|
||||
// ContextsClient is a client for interacting with Dialogflow API.
|
||||
type ContextsClient struct {
|
||||
// The connection to the service.
|
||||
conn *grpc.ClientConn
|
||||
|
||||
// The gRPC API client.
|
||||
contextsClient dialogflowpb.ContextsClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *ContextsCallOptions
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewContextsClient creates a new contexts client.
|
||||
//
|
||||
// A context represents additional information included with user input or with
|
||||
// an intent returned by the Dialogflow API. Contexts are helpful for
|
||||
// differentiating user input which may be vague or have a different meaning
|
||||
// depending on additional details from your application such as user setting
|
||||
// and preferences, previous user input, where the user is in your application,
|
||||
// geographic location, and so on.
|
||||
//
|
||||
// You can include contexts as input parameters of a
|
||||
// [DetectIntent][google.cloud.dialogflow.v2.Sessions.DetectIntent] (or
|
||||
// [StreamingDetectIntent][google.cloud.dialogflow.v2.Sessions.StreamingDetectIntent]) request,
|
||||
// or as output contexts included in the returned intent.
|
||||
// Contexts expire when an intent is matched, after the number of DetectIntent
|
||||
// requests specified by the lifespan_count parameter, or after 10 minutes
|
||||
// if no intents are matched for a DetectIntent request.
|
||||
//
|
||||
// For more information about contexts, see the
|
||||
// Dialogflow documentation (at https://dialogflow.com/docs/contexts).
|
||||
func NewContextsClient(ctx context.Context, opts ...option.ClientOption) (*ContextsClient, error) {
|
||||
conn, err := transport.DialGRPC(ctx, append(defaultContextsClientOptions(), opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &ContextsClient{
|
||||
conn: conn,
|
||||
CallOptions: defaultContextsCallOptions(),
|
||||
|
||||
contextsClient: dialogflowpb.NewContextsClient(conn),
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Connection returns the client's connection to the API service.
|
||||
func (c *ContextsClient) Connection() *grpc.ClientConn {
|
||||
return c.conn
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *ContextsClient) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *ContextsClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ListContexts returns the list of all contexts in the specified session.
|
||||
func (c *ContextsClient) ListContexts(ctx context.Context, req *dialogflowpb.ListContextsRequest, opts ...gax.CallOption) *ContextIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListContexts[0:len(c.CallOptions.ListContexts):len(c.CallOptions.ListContexts)], opts...)
|
||||
it := &ContextIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*dialogflowpb.Context, string, error) {
|
||||
var resp *dialogflowpb.ListContextsResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.contextsClient.ListContexts(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.Contexts, resp.NextPageToken, nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
return it
|
||||
}
|
||||
|
||||
// GetContext retrieves the specified context.
|
||||
func (c *ContextsClient) GetContext(ctx context.Context, req *dialogflowpb.GetContextRequest, opts ...gax.CallOption) (*dialogflowpb.Context, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetContext[0:len(c.CallOptions.GetContext):len(c.CallOptions.GetContext)], opts...)
|
||||
var resp *dialogflowpb.Context
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.contextsClient.GetContext(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// CreateContext creates a context.
|
||||
func (c *ContextsClient) CreateContext(ctx context.Context, req *dialogflowpb.CreateContextRequest, opts ...gax.CallOption) (*dialogflowpb.Context, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.CreateContext[0:len(c.CallOptions.CreateContext):len(c.CallOptions.CreateContext)], opts...)
|
||||
var resp *dialogflowpb.Context
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.contextsClient.CreateContext(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// UpdateContext updates the specified context.
|
||||
func (c *ContextsClient) UpdateContext(ctx context.Context, req *dialogflowpb.UpdateContextRequest, opts ...gax.CallOption) (*dialogflowpb.Context, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.UpdateContext[0:len(c.CallOptions.UpdateContext):len(c.CallOptions.UpdateContext)], opts...)
|
||||
var resp *dialogflowpb.Context
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.contextsClient.UpdateContext(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// DeleteContext deletes the specified context.
|
||||
func (c *ContextsClient) DeleteContext(ctx context.Context, req *dialogflowpb.DeleteContextRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DeleteContext[0:len(c.CallOptions.DeleteContext):len(c.CallOptions.DeleteContext)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.contextsClient.DeleteContext(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteAllContexts deletes all active contexts in the specified session.
|
||||
func (c *ContextsClient) DeleteAllContexts(ctx context.Context, req *dialogflowpb.DeleteAllContextsRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DeleteAllContexts[0:len(c.CallOptions.DeleteAllContexts):len(c.CallOptions.DeleteAllContexts)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.contextsClient.DeleteAllContexts(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// ContextIterator manages a stream of *dialogflowpb.Context.
|
||||
type ContextIterator struct {
|
||||
items []*dialogflowpb.Context
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*dialogflowpb.Context, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *ContextIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *ContextIterator) Next() (*dialogflowpb.Context, error) {
|
||||
var item *dialogflowpb.Context
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *ContextIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *ContextIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
144
vendor/cloud.google.com/go/dialogflow/apiv2/contexts_client_example_test.go
generated
vendored
Normal file
144
vendor/cloud.google.com/go/dialogflow/apiv2/contexts_client_example_test.go
generated
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package dialogflow_test
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/dialogflow/apiv2"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
dialogflowpb "google.golang.org/genproto/googleapis/cloud/dialogflow/v2"
|
||||
)
|
||||
|
||||
func ExampleNewContextsClient() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewContextsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use client.
|
||||
_ = c
|
||||
}
|
||||
|
||||
func ExampleContextsClient_ListContexts() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewContextsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.ListContextsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListContexts(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleContextsClient_GetContext() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewContextsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.GetContextRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetContext(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleContextsClient_CreateContext() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewContextsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.CreateContextRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CreateContext(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleContextsClient_UpdateContext() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewContextsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.UpdateContextRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.UpdateContext(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleContextsClient_DeleteContext() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewContextsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.DeleteContextRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
err = c.DeleteContext(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleContextsClient_DeleteAllContexts() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewContextsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.DeleteAllContextsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
err = c.DeleteAllContexts(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
47
vendor/cloud.google.com/go/dialogflow/apiv2/doc.go
generated
vendored
Normal file
47
vendor/cloud.google.com/go/dialogflow/apiv2/doc.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package dialogflow is an auto-generated package for the
|
||||
// Dialogflow API.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
||||
//
|
||||
// An end-to-end development suite for conversational interfaces (e.g.,
|
||||
// chatbots, voice-powered apps and devices).
|
||||
package dialogflow // import "cloud.google.com/go/dialogflow/apiv2"
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
func DefaultAuthScopes() []string {
|
||||
return []string{
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
}
|
||||
}
|
||||
718
vendor/cloud.google.com/go/dialogflow/apiv2/entity_types_client.go
generated
vendored
Normal file
718
vendor/cloud.google.com/go/dialogflow/apiv2/entity_types_client.go
generated
vendored
Normal file
@@ -0,0 +1,718 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package dialogflow
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/version"
|
||||
"cloud.google.com/go/longrunning"
|
||||
lroauto "cloud.google.com/go/longrunning/autogen"
|
||||
structpbpb "github.com/golang/protobuf/ptypes/struct"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
dialogflowpb "google.golang.org/genproto/googleapis/cloud/dialogflow/v2"
|
||||
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// EntityTypesCallOptions contains the retry settings for each method of EntityTypesClient.
|
||||
type EntityTypesCallOptions struct {
|
||||
ListEntityTypes []gax.CallOption
|
||||
GetEntityType []gax.CallOption
|
||||
CreateEntityType []gax.CallOption
|
||||
UpdateEntityType []gax.CallOption
|
||||
DeleteEntityType []gax.CallOption
|
||||
BatchUpdateEntityTypes []gax.CallOption
|
||||
BatchDeleteEntityTypes []gax.CallOption
|
||||
BatchCreateEntities []gax.CallOption
|
||||
BatchUpdateEntities []gax.CallOption
|
||||
BatchDeleteEntities []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultEntityTypesClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("dialogflow.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultEntityTypesCallOptions() *EntityTypesCallOptions {
|
||||
retry := map[[2]string][]gax.CallOption{
|
||||
{"default", "idempotent"}: {
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.3,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
return &EntityTypesCallOptions{
|
||||
ListEntityTypes: retry[[2]string{"default", "idempotent"}],
|
||||
GetEntityType: retry[[2]string{"default", "idempotent"}],
|
||||
CreateEntityType: retry[[2]string{"default", "non_idempotent"}],
|
||||
UpdateEntityType: retry[[2]string{"default", "non_idempotent"}],
|
||||
DeleteEntityType: retry[[2]string{"default", "idempotent"}],
|
||||
BatchUpdateEntityTypes: retry[[2]string{"default", "non_idempotent"}],
|
||||
BatchDeleteEntityTypes: retry[[2]string{"default", "idempotent"}],
|
||||
BatchCreateEntities: retry[[2]string{"default", "non_idempotent"}],
|
||||
BatchUpdateEntities: retry[[2]string{"default", "non_idempotent"}],
|
||||
BatchDeleteEntities: retry[[2]string{"default", "idempotent"}],
|
||||
}
|
||||
}
|
||||
|
||||
// EntityTypesClient is a client for interacting with Dialogflow API.
|
||||
type EntityTypesClient struct {
|
||||
// The connection to the service.
|
||||
conn *grpc.ClientConn
|
||||
|
||||
// The gRPC API client.
|
||||
entityTypesClient dialogflowpb.EntityTypesClient
|
||||
|
||||
// LROClient is used internally to handle longrunning operations.
|
||||
// It is exposed so that its CallOptions can be modified if required.
|
||||
// Users should not Close this client.
|
||||
LROClient *lroauto.OperationsClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *EntityTypesCallOptions
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewEntityTypesClient creates a new entity types client.
|
||||
//
|
||||
// Entities are extracted from user input and represent parameters that are
|
||||
// meaningful to your application. For example, a date range, a proper name
|
||||
// such as a geographic location or landmark, and so on. Entities represent
|
||||
// actionable data for your application.
|
||||
//
|
||||
// When you define an entity, you can also include synonyms that all map to
|
||||
// that entity. For example, "soft drink", "soda", "pop", and so on.
|
||||
//
|
||||
// There are three types of entities:
|
||||
//
|
||||
// * **System** - entities that are defined by the Dialogflow API for common
|
||||
// data types such as date, time, currency, and so on. A system entity is
|
||||
// represented by the `EntityType` type.
|
||||
//
|
||||
// * **Developer** - entities that are defined by you that represent
|
||||
// actionable data that is meaningful to your application. For example,
|
||||
// you could define a `pizza.sauce` entity for red or white pizza sauce,
|
||||
// a `pizza.cheese` entity for the different types of cheese on a pizza,
|
||||
// a `pizza.topping` entity for different toppings, and so on. A developer
|
||||
// entity is represented by the `EntityType` type.
|
||||
//
|
||||
// * **User** - entities that are built for an individual user such as
|
||||
// favorites, preferences, playlists, and so on. A user entity is
|
||||
// represented by the [SessionEntityType][google.cloud.dialogflow.v2.SessionEntityType] type.
|
||||
//
|
||||
// For more information about entity types, see the
|
||||
// [Dialogflow documentation](https://dialogflow.com/docs/entities).
|
||||
func NewEntityTypesClient(ctx context.Context, opts ...option.ClientOption) (*EntityTypesClient, error) {
|
||||
conn, err := transport.DialGRPC(ctx, append(defaultEntityTypesClientOptions(), opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &EntityTypesClient{
|
||||
conn: conn,
|
||||
CallOptions: defaultEntityTypesCallOptions(),
|
||||
|
||||
entityTypesClient: dialogflowpb.NewEntityTypesClient(conn),
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
|
||||
c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
|
||||
if err != nil {
|
||||
// This error "should not happen", since we are just reusing old connection
|
||||
// and never actually need to dial.
|
||||
// If this does happen, we could leak conn. However, we cannot close conn:
|
||||
// If the user invoked the function with option.WithGRPCConn,
|
||||
// we would close a connection that's still in use.
|
||||
// TODO(pongad): investigate error conditions.
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Connection returns the client's connection to the API service.
|
||||
func (c *EntityTypesClient) Connection() *grpc.ClientConn {
|
||||
return c.conn
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *EntityTypesClient) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *EntityTypesClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ListEntityTypes returns the list of all entity types in the specified agent.
|
||||
func (c *EntityTypesClient) ListEntityTypes(ctx context.Context, req *dialogflowpb.ListEntityTypesRequest, opts ...gax.CallOption) *EntityTypeIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListEntityTypes[0:len(c.CallOptions.ListEntityTypes):len(c.CallOptions.ListEntityTypes)], opts...)
|
||||
it := &EntityTypeIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*dialogflowpb.EntityType, string, error) {
|
||||
var resp *dialogflowpb.ListEntityTypesResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.entityTypesClient.ListEntityTypes(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.EntityTypes, resp.NextPageToken, nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
return it
|
||||
}
|
||||
|
||||
// GetEntityType retrieves the specified entity type.
|
||||
func (c *EntityTypesClient) GetEntityType(ctx context.Context, req *dialogflowpb.GetEntityTypeRequest, opts ...gax.CallOption) (*dialogflowpb.EntityType, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetEntityType[0:len(c.CallOptions.GetEntityType):len(c.CallOptions.GetEntityType)], opts...)
|
||||
var resp *dialogflowpb.EntityType
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.entityTypesClient.GetEntityType(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// CreateEntityType creates an entity type in the specified agent.
|
||||
func (c *EntityTypesClient) CreateEntityType(ctx context.Context, req *dialogflowpb.CreateEntityTypeRequest, opts ...gax.CallOption) (*dialogflowpb.EntityType, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.CreateEntityType[0:len(c.CallOptions.CreateEntityType):len(c.CallOptions.CreateEntityType)], opts...)
|
||||
var resp *dialogflowpb.EntityType
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.entityTypesClient.CreateEntityType(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// UpdateEntityType updates the specified entity type.
|
||||
func (c *EntityTypesClient) UpdateEntityType(ctx context.Context, req *dialogflowpb.UpdateEntityTypeRequest, opts ...gax.CallOption) (*dialogflowpb.EntityType, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.UpdateEntityType[0:len(c.CallOptions.UpdateEntityType):len(c.CallOptions.UpdateEntityType)], opts...)
|
||||
var resp *dialogflowpb.EntityType
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.entityTypesClient.UpdateEntityType(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// DeleteEntityType deletes the specified entity type.
|
||||
func (c *EntityTypesClient) DeleteEntityType(ctx context.Context, req *dialogflowpb.DeleteEntityTypeRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DeleteEntityType[0:len(c.CallOptions.DeleteEntityType):len(c.CallOptions.DeleteEntityType)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.entityTypesClient.DeleteEntityType(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// BatchUpdateEntityTypes updates/Creates multiple entity types in the specified agent.
|
||||
//
|
||||
// Operation <response: [BatchUpdateEntityTypesResponse][google.cloud.dialogflow.v2.BatchUpdateEntityTypesResponse],
|
||||
// metadata: [google.protobuf.Struct][google.protobuf.Struct]>
|
||||
func (c *EntityTypesClient) BatchUpdateEntityTypes(ctx context.Context, req *dialogflowpb.BatchUpdateEntityTypesRequest, opts ...gax.CallOption) (*BatchUpdateEntityTypesOperation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.BatchUpdateEntityTypes[0:len(c.CallOptions.BatchUpdateEntityTypes):len(c.CallOptions.BatchUpdateEntityTypes)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.entityTypesClient.BatchUpdateEntityTypes(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &BatchUpdateEntityTypesOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, resp),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BatchDeleteEntityTypes deletes entity types in the specified agent.
|
||||
//
|
||||
// Operation <response: [google.protobuf.Empty][google.protobuf.Empty],
|
||||
// metadata: [google.protobuf.Struct][google.protobuf.Struct]>
|
||||
func (c *EntityTypesClient) BatchDeleteEntityTypes(ctx context.Context, req *dialogflowpb.BatchDeleteEntityTypesRequest, opts ...gax.CallOption) (*BatchDeleteEntityTypesOperation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.BatchDeleteEntityTypes[0:len(c.CallOptions.BatchDeleteEntityTypes):len(c.CallOptions.BatchDeleteEntityTypes)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.entityTypesClient.BatchDeleteEntityTypes(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &BatchDeleteEntityTypesOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, resp),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BatchCreateEntities creates multiple new entities in the specified entity type (extends the
|
||||
// existing collection of entries).
|
||||
//
|
||||
// Operation <response: [google.protobuf.Empty][google.protobuf.Empty]>
|
||||
func (c *EntityTypesClient) BatchCreateEntities(ctx context.Context, req *dialogflowpb.BatchCreateEntitiesRequest, opts ...gax.CallOption) (*BatchCreateEntitiesOperation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.BatchCreateEntities[0:len(c.CallOptions.BatchCreateEntities):len(c.CallOptions.BatchCreateEntities)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.entityTypesClient.BatchCreateEntities(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &BatchCreateEntitiesOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, resp),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BatchUpdateEntities updates entities in the specified entity type (replaces the existing
|
||||
// collection of entries).
|
||||
//
|
||||
// Operation <response: [google.protobuf.Empty][google.protobuf.Empty],
|
||||
// metadata: [google.protobuf.Struct][google.protobuf.Struct]>
|
||||
func (c *EntityTypesClient) BatchUpdateEntities(ctx context.Context, req *dialogflowpb.BatchUpdateEntitiesRequest, opts ...gax.CallOption) (*BatchUpdateEntitiesOperation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.BatchUpdateEntities[0:len(c.CallOptions.BatchUpdateEntities):len(c.CallOptions.BatchUpdateEntities)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.entityTypesClient.BatchUpdateEntities(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &BatchUpdateEntitiesOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, resp),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BatchDeleteEntities deletes entities in the specified entity type.
|
||||
//
|
||||
// Operation <response: [google.protobuf.Empty][google.protobuf.Empty],
|
||||
// metadata: [google.protobuf.Struct][google.protobuf.Struct]>
|
||||
func (c *EntityTypesClient) BatchDeleteEntities(ctx context.Context, req *dialogflowpb.BatchDeleteEntitiesRequest, opts ...gax.CallOption) (*BatchDeleteEntitiesOperation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.BatchDeleteEntities[0:len(c.CallOptions.BatchDeleteEntities):len(c.CallOptions.BatchDeleteEntities)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.entityTypesClient.BatchDeleteEntities(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &BatchDeleteEntitiesOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, resp),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// EntityTypeIterator manages a stream of *dialogflowpb.EntityType.
|
||||
type EntityTypeIterator struct {
|
||||
items []*dialogflowpb.EntityType
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*dialogflowpb.EntityType, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *EntityTypeIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *EntityTypeIterator) Next() (*dialogflowpb.EntityType, error) {
|
||||
var item *dialogflowpb.EntityType
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *EntityTypeIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *EntityTypeIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// BatchCreateEntitiesOperation manages a long-running operation from BatchCreateEntities.
|
||||
type BatchCreateEntitiesOperation struct {
|
||||
lro *longrunning.Operation
|
||||
}
|
||||
|
||||
// BatchCreateEntitiesOperation returns a new BatchCreateEntitiesOperation from a given name.
|
||||
// The name must be that of a previously created BatchCreateEntitiesOperation, possibly from a different process.
|
||||
func (c *EntityTypesClient) BatchCreateEntitiesOperation(name string) *BatchCreateEntitiesOperation {
|
||||
return &BatchCreateEntitiesOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the long-running operation is completed, returning any error encountered.
|
||||
//
|
||||
// See documentation of Poll for error-handling information.
|
||||
func (op *BatchCreateEntitiesOperation) Wait(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.WaitWithInterval(ctx, nil, 5000*time.Millisecond, opts...)
|
||||
}
|
||||
|
||||
// Poll fetches the latest state of the long-running operation.
|
||||
//
|
||||
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
|
||||
//
|
||||
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
|
||||
// the operation has completed with failure, the error is returned and op.Done will return true.
|
||||
// If Poll succeeds and the operation has completed successfully, op.Done will return true.
|
||||
func (op *BatchCreateEntitiesOperation) Poll(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.Poll(ctx, nil, opts...)
|
||||
}
|
||||
|
||||
// Metadata returns metadata associated with the long-running operation.
|
||||
// Metadata itself does not contact the server, but Poll does.
|
||||
// To get the latest metadata, call this method after a successful call to Poll.
|
||||
// If the metadata is not available, the returned metadata and error are both nil.
|
||||
func (op *BatchCreateEntitiesOperation) Metadata() (*structpbpb.Struct, error) {
|
||||
var meta structpbpb.Struct
|
||||
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
// Done reports whether the long-running operation has completed.
|
||||
func (op *BatchCreateEntitiesOperation) Done() bool {
|
||||
return op.lro.Done()
|
||||
}
|
||||
|
||||
// Name returns the name of the long-running operation.
|
||||
// The name is assigned by the server and is unique within the service from which the operation is created.
|
||||
func (op *BatchCreateEntitiesOperation) Name() string {
|
||||
return op.lro.Name()
|
||||
}
|
||||
|
||||
// BatchDeleteEntitiesOperation manages a long-running operation from BatchDeleteEntities.
|
||||
type BatchDeleteEntitiesOperation struct {
|
||||
lro *longrunning.Operation
|
||||
}
|
||||
|
||||
// BatchDeleteEntitiesOperation returns a new BatchDeleteEntitiesOperation from a given name.
|
||||
// The name must be that of a previously created BatchDeleteEntitiesOperation, possibly from a different process.
|
||||
func (c *EntityTypesClient) BatchDeleteEntitiesOperation(name string) *BatchDeleteEntitiesOperation {
|
||||
return &BatchDeleteEntitiesOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the long-running operation is completed, returning any error encountered.
|
||||
//
|
||||
// See documentation of Poll for error-handling information.
|
||||
func (op *BatchDeleteEntitiesOperation) Wait(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.WaitWithInterval(ctx, nil, 5000*time.Millisecond, opts...)
|
||||
}
|
||||
|
||||
// Poll fetches the latest state of the long-running operation.
|
||||
//
|
||||
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
|
||||
//
|
||||
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
|
||||
// the operation has completed with failure, the error is returned and op.Done will return true.
|
||||
// If Poll succeeds and the operation has completed successfully, op.Done will return true.
|
||||
func (op *BatchDeleteEntitiesOperation) Poll(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.Poll(ctx, nil, opts...)
|
||||
}
|
||||
|
||||
// Metadata returns metadata associated with the long-running operation.
|
||||
// Metadata itself does not contact the server, but Poll does.
|
||||
// To get the latest metadata, call this method after a successful call to Poll.
|
||||
// If the metadata is not available, the returned metadata and error are both nil.
|
||||
func (op *BatchDeleteEntitiesOperation) Metadata() (*structpbpb.Struct, error) {
|
||||
var meta structpbpb.Struct
|
||||
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
// Done reports whether the long-running operation has completed.
|
||||
func (op *BatchDeleteEntitiesOperation) Done() bool {
|
||||
return op.lro.Done()
|
||||
}
|
||||
|
||||
// Name returns the name of the long-running operation.
|
||||
// The name is assigned by the server and is unique within the service from which the operation is created.
|
||||
func (op *BatchDeleteEntitiesOperation) Name() string {
|
||||
return op.lro.Name()
|
||||
}
|
||||
|
||||
// BatchDeleteEntityTypesOperation manages a long-running operation from BatchDeleteEntityTypes.
|
||||
type BatchDeleteEntityTypesOperation struct {
|
||||
lro *longrunning.Operation
|
||||
}
|
||||
|
||||
// BatchDeleteEntityTypesOperation returns a new BatchDeleteEntityTypesOperation from a given name.
|
||||
// The name must be that of a previously created BatchDeleteEntityTypesOperation, possibly from a different process.
|
||||
func (c *EntityTypesClient) BatchDeleteEntityTypesOperation(name string) *BatchDeleteEntityTypesOperation {
|
||||
return &BatchDeleteEntityTypesOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the long-running operation is completed, returning any error encountered.
|
||||
//
|
||||
// See documentation of Poll for error-handling information.
|
||||
func (op *BatchDeleteEntityTypesOperation) Wait(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.WaitWithInterval(ctx, nil, 5000*time.Millisecond, opts...)
|
||||
}
|
||||
|
||||
// Poll fetches the latest state of the long-running operation.
|
||||
//
|
||||
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
|
||||
//
|
||||
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
|
||||
// the operation has completed with failure, the error is returned and op.Done will return true.
|
||||
// If Poll succeeds and the operation has completed successfully, op.Done will return true.
|
||||
func (op *BatchDeleteEntityTypesOperation) Poll(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.Poll(ctx, nil, opts...)
|
||||
}
|
||||
|
||||
// Metadata returns metadata associated with the long-running operation.
|
||||
// Metadata itself does not contact the server, but Poll does.
|
||||
// To get the latest metadata, call this method after a successful call to Poll.
|
||||
// If the metadata is not available, the returned metadata and error are both nil.
|
||||
func (op *BatchDeleteEntityTypesOperation) Metadata() (*structpbpb.Struct, error) {
|
||||
var meta structpbpb.Struct
|
||||
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
// Done reports whether the long-running operation has completed.
|
||||
func (op *BatchDeleteEntityTypesOperation) Done() bool {
|
||||
return op.lro.Done()
|
||||
}
|
||||
|
||||
// Name returns the name of the long-running operation.
|
||||
// The name is assigned by the server and is unique within the service from which the operation is created.
|
||||
func (op *BatchDeleteEntityTypesOperation) Name() string {
|
||||
return op.lro.Name()
|
||||
}
|
||||
|
||||
// BatchUpdateEntitiesOperation manages a long-running operation from BatchUpdateEntities.
|
||||
type BatchUpdateEntitiesOperation struct {
|
||||
lro *longrunning.Operation
|
||||
}
|
||||
|
||||
// BatchUpdateEntitiesOperation returns a new BatchUpdateEntitiesOperation from a given name.
|
||||
// The name must be that of a previously created BatchUpdateEntitiesOperation, possibly from a different process.
|
||||
func (c *EntityTypesClient) BatchUpdateEntitiesOperation(name string) *BatchUpdateEntitiesOperation {
|
||||
return &BatchUpdateEntitiesOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the long-running operation is completed, returning any error encountered.
|
||||
//
|
||||
// See documentation of Poll for error-handling information.
|
||||
func (op *BatchUpdateEntitiesOperation) Wait(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.WaitWithInterval(ctx, nil, 5000*time.Millisecond, opts...)
|
||||
}
|
||||
|
||||
// Poll fetches the latest state of the long-running operation.
|
||||
//
|
||||
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
|
||||
//
|
||||
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
|
||||
// the operation has completed with failure, the error is returned and op.Done will return true.
|
||||
// If Poll succeeds and the operation has completed successfully, op.Done will return true.
|
||||
func (op *BatchUpdateEntitiesOperation) Poll(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.Poll(ctx, nil, opts...)
|
||||
}
|
||||
|
||||
// Metadata returns metadata associated with the long-running operation.
|
||||
// Metadata itself does not contact the server, but Poll does.
|
||||
// To get the latest metadata, call this method after a successful call to Poll.
|
||||
// If the metadata is not available, the returned metadata and error are both nil.
|
||||
func (op *BatchUpdateEntitiesOperation) Metadata() (*structpbpb.Struct, error) {
|
||||
var meta structpbpb.Struct
|
||||
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
// Done reports whether the long-running operation has completed.
|
||||
func (op *BatchUpdateEntitiesOperation) Done() bool {
|
||||
return op.lro.Done()
|
||||
}
|
||||
|
||||
// Name returns the name of the long-running operation.
|
||||
// The name is assigned by the server and is unique within the service from which the operation is created.
|
||||
func (op *BatchUpdateEntitiesOperation) Name() string {
|
||||
return op.lro.Name()
|
||||
}
|
||||
|
||||
// BatchUpdateEntityTypesOperation manages a long-running operation from BatchUpdateEntityTypes.
|
||||
type BatchUpdateEntityTypesOperation struct {
|
||||
lro *longrunning.Operation
|
||||
}
|
||||
|
||||
// BatchUpdateEntityTypesOperation returns a new BatchUpdateEntityTypesOperation from a given name.
|
||||
// The name must be that of a previously created BatchUpdateEntityTypesOperation, possibly from a different process.
|
||||
func (c *EntityTypesClient) BatchUpdateEntityTypesOperation(name string) *BatchUpdateEntityTypesOperation {
|
||||
return &BatchUpdateEntityTypesOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
|
||||
//
|
||||
// See documentation of Poll for error-handling information.
|
||||
func (op *BatchUpdateEntityTypesOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*dialogflowpb.BatchUpdateEntityTypesResponse, error) {
|
||||
var resp dialogflowpb.BatchUpdateEntityTypesResponse
|
||||
if err := op.lro.WaitWithInterval(ctx, &resp, 5000*time.Millisecond, opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Poll fetches the latest state of the long-running operation.
|
||||
//
|
||||
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
|
||||
//
|
||||
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
|
||||
// the operation has completed with failure, the error is returned and op.Done will return true.
|
||||
// If Poll succeeds and the operation has completed successfully,
|
||||
// op.Done will return true, and the response of the operation is returned.
|
||||
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
|
||||
func (op *BatchUpdateEntityTypesOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*dialogflowpb.BatchUpdateEntityTypesResponse, error) {
|
||||
var resp dialogflowpb.BatchUpdateEntityTypesResponse
|
||||
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !op.Done() {
|
||||
return nil, nil
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Metadata returns metadata associated with the long-running operation.
|
||||
// Metadata itself does not contact the server, but Poll does.
|
||||
// To get the latest metadata, call this method after a successful call to Poll.
|
||||
// If the metadata is not available, the returned metadata and error are both nil.
|
||||
func (op *BatchUpdateEntityTypesOperation) Metadata() (*structpbpb.Struct, error) {
|
||||
var meta structpbpb.Struct
|
||||
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
// Done reports whether the long-running operation has completed.
|
||||
func (op *BatchUpdateEntityTypesOperation) Done() bool {
|
||||
return op.lro.Done()
|
||||
}
|
||||
|
||||
// Name returns the name of the long-running operation.
|
||||
// The name is assigned by the server and is unique within the service from which the operation is created.
|
||||
func (op *BatchUpdateEntityTypesOperation) Name() string {
|
||||
return op.lro.Name()
|
||||
}
|
||||
227
vendor/cloud.google.com/go/dialogflow/apiv2/entity_types_client_example_test.go
generated
vendored
Normal file
227
vendor/cloud.google.com/go/dialogflow/apiv2/entity_types_client_example_test.go
generated
vendored
Normal file
@@ -0,0 +1,227 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package dialogflow_test
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/dialogflow/apiv2"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
dialogflowpb "google.golang.org/genproto/googleapis/cloud/dialogflow/v2"
|
||||
)
|
||||
|
||||
func ExampleNewEntityTypesClient() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewEntityTypesClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use client.
|
||||
_ = c
|
||||
}
|
||||
|
||||
func ExampleEntityTypesClient_ListEntityTypes() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewEntityTypesClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.ListEntityTypesRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListEntityTypes(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleEntityTypesClient_GetEntityType() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewEntityTypesClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.GetEntityTypeRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetEntityType(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleEntityTypesClient_CreateEntityType() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewEntityTypesClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.CreateEntityTypeRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CreateEntityType(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleEntityTypesClient_UpdateEntityType() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewEntityTypesClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.UpdateEntityTypeRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.UpdateEntityType(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleEntityTypesClient_DeleteEntityType() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewEntityTypesClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.DeleteEntityTypeRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
err = c.DeleteEntityType(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleEntityTypesClient_BatchUpdateEntityTypes() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewEntityTypesClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.BatchUpdateEntityTypesRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
op, err := c.BatchUpdateEntityTypes(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
resp, err := op.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleEntityTypesClient_BatchDeleteEntityTypes() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewEntityTypesClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.BatchDeleteEntityTypesRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
op, err := c.BatchDeleteEntityTypes(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
err = op.Wait(ctx)
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
func ExampleEntityTypesClient_BatchCreateEntities() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewEntityTypesClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.BatchCreateEntitiesRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
op, err := c.BatchCreateEntities(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
err = op.Wait(ctx)
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
func ExampleEntityTypesClient_BatchUpdateEntities() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewEntityTypesClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.BatchUpdateEntitiesRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
op, err := c.BatchUpdateEntities(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
err = op.Wait(ctx)
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
func ExampleEntityTypesClient_BatchDeleteEntities() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewEntityTypesClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.BatchDeleteEntitiesRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
op, err := c.BatchDeleteEntities(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
err = op.Wait(ctx)
|
||||
// TODO: Handle error.
|
||||
}
|
||||
482
vendor/cloud.google.com/go/dialogflow/apiv2/intents_client.go
generated
vendored
Normal file
482
vendor/cloud.google.com/go/dialogflow/apiv2/intents_client.go
generated
vendored
Normal file
@@ -0,0 +1,482 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package dialogflow
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/version"
|
||||
"cloud.google.com/go/longrunning"
|
||||
lroauto "cloud.google.com/go/longrunning/autogen"
|
||||
structpbpb "github.com/golang/protobuf/ptypes/struct"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
dialogflowpb "google.golang.org/genproto/googleapis/cloud/dialogflow/v2"
|
||||
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// IntentsCallOptions contains the retry settings for each method of IntentsClient.
|
||||
type IntentsCallOptions struct {
|
||||
ListIntents []gax.CallOption
|
||||
GetIntent []gax.CallOption
|
||||
CreateIntent []gax.CallOption
|
||||
UpdateIntent []gax.CallOption
|
||||
DeleteIntent []gax.CallOption
|
||||
BatchUpdateIntents []gax.CallOption
|
||||
BatchDeleteIntents []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultIntentsClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("dialogflow.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultIntentsCallOptions() *IntentsCallOptions {
|
||||
retry := map[[2]string][]gax.CallOption{
|
||||
{"default", "idempotent"}: {
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.3,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
return &IntentsCallOptions{
|
||||
ListIntents: retry[[2]string{"default", "idempotent"}],
|
||||
GetIntent: retry[[2]string{"default", "idempotent"}],
|
||||
CreateIntent: retry[[2]string{"default", "non_idempotent"}],
|
||||
UpdateIntent: retry[[2]string{"default", "non_idempotent"}],
|
||||
DeleteIntent: retry[[2]string{"default", "idempotent"}],
|
||||
BatchUpdateIntents: retry[[2]string{"default", "non_idempotent"}],
|
||||
BatchDeleteIntents: retry[[2]string{"default", "idempotent"}],
|
||||
}
|
||||
}
|
||||
|
||||
// IntentsClient is a client for interacting with Dialogflow API.
|
||||
type IntentsClient struct {
|
||||
// The connection to the service.
|
||||
conn *grpc.ClientConn
|
||||
|
||||
// The gRPC API client.
|
||||
intentsClient dialogflowpb.IntentsClient
|
||||
|
||||
// LROClient is used internally to handle longrunning operations.
|
||||
// It is exposed so that its CallOptions can be modified if required.
|
||||
// Users should not Close this client.
|
||||
LROClient *lroauto.OperationsClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *IntentsCallOptions
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewIntentsClient creates a new intents client.
|
||||
//
|
||||
// An intent represents a mapping between input from a user and an action to
|
||||
// be taken by your application. When you pass user input to the
|
||||
// [DetectIntent][google.cloud.dialogflow.v2.Sessions.DetectIntent] (or
|
||||
// [StreamingDetectIntent][google.cloud.dialogflow.v2.Sessions.StreamingDetectIntent]) method, the
|
||||
// Dialogflow API analyzes the input and searches
|
||||
// for a matching intent. If no match is found, the Dialogflow API returns a
|
||||
// fallback intent (`is_fallback` = true).
|
||||
//
|
||||
// You can provide additional information for the Dialogflow API to use to
|
||||
// match user input to an intent by adding the following to your intent.
|
||||
//
|
||||
// * **Contexts** - provide additional context for intent analysis. For
|
||||
// example, if an intent is related to an object in your application that
|
||||
// plays music, you can provide a context to determine when to match the
|
||||
// intent if the user input is “turn it off”. You can include a context
|
||||
// that matches the intent when there is previous user input of
|
||||
// "play music", and not when there is previous user input of
|
||||
// "turn on the light".
|
||||
//
|
||||
// * **Events** - allow for matching an intent by using an event name
|
||||
// instead of user input. Your application can provide an event name and
|
||||
// related parameters to the Dialogflow API to match an intent. For
|
||||
// example, when your application starts, you can send a welcome event
|
||||
// with a user name parameter to the Dialogflow API to match an intent with
|
||||
// a personalized welcome message for the user.
|
||||
//
|
||||
// * **Training phrases** - provide examples of user input to train the
|
||||
// Dialogflow API agent to better match intents.
|
||||
//
|
||||
// For more information about intents, see the
|
||||
// [Dialogflow documentation](https://dialogflow.com/docs/intents).
|
||||
func NewIntentsClient(ctx context.Context, opts ...option.ClientOption) (*IntentsClient, error) {
|
||||
conn, err := transport.DialGRPC(ctx, append(defaultIntentsClientOptions(), opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &IntentsClient{
|
||||
conn: conn,
|
||||
CallOptions: defaultIntentsCallOptions(),
|
||||
|
||||
intentsClient: dialogflowpb.NewIntentsClient(conn),
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
|
||||
c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
|
||||
if err != nil {
|
||||
// This error "should not happen", since we are just reusing old connection
|
||||
// and never actually need to dial.
|
||||
// If this does happen, we could leak conn. However, we cannot close conn:
|
||||
// If the user invoked the function with option.WithGRPCConn,
|
||||
// we would close a connection that's still in use.
|
||||
// TODO(pongad): investigate error conditions.
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Connection returns the client's connection to the API service.
|
||||
func (c *IntentsClient) Connection() *grpc.ClientConn {
|
||||
return c.conn
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *IntentsClient) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *IntentsClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ListIntents returns the list of all intents in the specified agent.
|
||||
func (c *IntentsClient) ListIntents(ctx context.Context, req *dialogflowpb.ListIntentsRequest, opts ...gax.CallOption) *IntentIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListIntents[0:len(c.CallOptions.ListIntents):len(c.CallOptions.ListIntents)], opts...)
|
||||
it := &IntentIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*dialogflowpb.Intent, string, error) {
|
||||
var resp *dialogflowpb.ListIntentsResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.intentsClient.ListIntents(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.Intents, resp.NextPageToken, nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
return it
|
||||
}
|
||||
|
||||
// GetIntent retrieves the specified intent.
|
||||
func (c *IntentsClient) GetIntent(ctx context.Context, req *dialogflowpb.GetIntentRequest, opts ...gax.CallOption) (*dialogflowpb.Intent, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetIntent[0:len(c.CallOptions.GetIntent):len(c.CallOptions.GetIntent)], opts...)
|
||||
var resp *dialogflowpb.Intent
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.intentsClient.GetIntent(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// CreateIntent creates an intent in the specified agent.
|
||||
func (c *IntentsClient) CreateIntent(ctx context.Context, req *dialogflowpb.CreateIntentRequest, opts ...gax.CallOption) (*dialogflowpb.Intent, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.CreateIntent[0:len(c.CallOptions.CreateIntent):len(c.CallOptions.CreateIntent)], opts...)
|
||||
var resp *dialogflowpb.Intent
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.intentsClient.CreateIntent(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// UpdateIntent updates the specified intent.
|
||||
func (c *IntentsClient) UpdateIntent(ctx context.Context, req *dialogflowpb.UpdateIntentRequest, opts ...gax.CallOption) (*dialogflowpb.Intent, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.UpdateIntent[0:len(c.CallOptions.UpdateIntent):len(c.CallOptions.UpdateIntent)], opts...)
|
||||
var resp *dialogflowpb.Intent
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.intentsClient.UpdateIntent(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// DeleteIntent deletes the specified intent.
|
||||
func (c *IntentsClient) DeleteIntent(ctx context.Context, req *dialogflowpb.DeleteIntentRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DeleteIntent[0:len(c.CallOptions.DeleteIntent):len(c.CallOptions.DeleteIntent)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.intentsClient.DeleteIntent(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// BatchUpdateIntents updates/Creates multiple intents in the specified agent.
|
||||
//
|
||||
// Operation <response: [BatchUpdateIntentsResponse][google.cloud.dialogflow.v2.BatchUpdateIntentsResponse]>
|
||||
func (c *IntentsClient) BatchUpdateIntents(ctx context.Context, req *dialogflowpb.BatchUpdateIntentsRequest, opts ...gax.CallOption) (*BatchUpdateIntentsOperation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.BatchUpdateIntents[0:len(c.CallOptions.BatchUpdateIntents):len(c.CallOptions.BatchUpdateIntents)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.intentsClient.BatchUpdateIntents(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &BatchUpdateIntentsOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, resp),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BatchDeleteIntents deletes intents in the specified agent.
|
||||
//
|
||||
// Operation <response: [google.protobuf.Empty][google.protobuf.Empty]>
|
||||
func (c *IntentsClient) BatchDeleteIntents(ctx context.Context, req *dialogflowpb.BatchDeleteIntentsRequest, opts ...gax.CallOption) (*BatchDeleteIntentsOperation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.BatchDeleteIntents[0:len(c.CallOptions.BatchDeleteIntents):len(c.CallOptions.BatchDeleteIntents)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.intentsClient.BatchDeleteIntents(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &BatchDeleteIntentsOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, resp),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IntentIterator manages a stream of *dialogflowpb.Intent.
|
||||
type IntentIterator struct {
|
||||
items []*dialogflowpb.Intent
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*dialogflowpb.Intent, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *IntentIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *IntentIterator) Next() (*dialogflowpb.Intent, error) {
|
||||
var item *dialogflowpb.Intent
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *IntentIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *IntentIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// BatchDeleteIntentsOperation manages a long-running operation from BatchDeleteIntents.
|
||||
type BatchDeleteIntentsOperation struct {
|
||||
lro *longrunning.Operation
|
||||
}
|
||||
|
||||
// BatchDeleteIntentsOperation returns a new BatchDeleteIntentsOperation from a given name.
|
||||
// The name must be that of a previously created BatchDeleteIntentsOperation, possibly from a different process.
|
||||
func (c *IntentsClient) BatchDeleteIntentsOperation(name string) *BatchDeleteIntentsOperation {
|
||||
return &BatchDeleteIntentsOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the long-running operation is completed, returning any error encountered.
|
||||
//
|
||||
// See documentation of Poll for error-handling information.
|
||||
func (op *BatchDeleteIntentsOperation) Wait(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.WaitWithInterval(ctx, nil, 5000*time.Millisecond, opts...)
|
||||
}
|
||||
|
||||
// Poll fetches the latest state of the long-running operation.
|
||||
//
|
||||
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
|
||||
//
|
||||
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
|
||||
// the operation has completed with failure, the error is returned and op.Done will return true.
|
||||
// If Poll succeeds and the operation has completed successfully, op.Done will return true.
|
||||
func (op *BatchDeleteIntentsOperation) Poll(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.Poll(ctx, nil, opts...)
|
||||
}
|
||||
|
||||
// Metadata returns metadata associated with the long-running operation.
|
||||
// Metadata itself does not contact the server, but Poll does.
|
||||
// To get the latest metadata, call this method after a successful call to Poll.
|
||||
// If the metadata is not available, the returned metadata and error are both nil.
|
||||
func (op *BatchDeleteIntentsOperation) Metadata() (*structpbpb.Struct, error) {
|
||||
var meta structpbpb.Struct
|
||||
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
// Done reports whether the long-running operation has completed.
|
||||
func (op *BatchDeleteIntentsOperation) Done() bool {
|
||||
return op.lro.Done()
|
||||
}
|
||||
|
||||
// Name returns the name of the long-running operation.
|
||||
// The name is assigned by the server and is unique within the service from which the operation is created.
|
||||
func (op *BatchDeleteIntentsOperation) Name() string {
|
||||
return op.lro.Name()
|
||||
}
|
||||
|
||||
// BatchUpdateIntentsOperation manages a long-running operation from BatchUpdateIntents.
|
||||
type BatchUpdateIntentsOperation struct {
|
||||
lro *longrunning.Operation
|
||||
}
|
||||
|
||||
// BatchUpdateIntentsOperation returns a new BatchUpdateIntentsOperation from a given name.
|
||||
// The name must be that of a previously created BatchUpdateIntentsOperation, possibly from a different process.
|
||||
func (c *IntentsClient) BatchUpdateIntentsOperation(name string) *BatchUpdateIntentsOperation {
|
||||
return &BatchUpdateIntentsOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
|
||||
//
|
||||
// See documentation of Poll for error-handling information.
|
||||
func (op *BatchUpdateIntentsOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*dialogflowpb.BatchUpdateIntentsResponse, error) {
|
||||
var resp dialogflowpb.BatchUpdateIntentsResponse
|
||||
if err := op.lro.WaitWithInterval(ctx, &resp, 5000*time.Millisecond, opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Poll fetches the latest state of the long-running operation.
|
||||
//
|
||||
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
|
||||
//
|
||||
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
|
||||
// the operation has completed with failure, the error is returned and op.Done will return true.
|
||||
// If Poll succeeds and the operation has completed successfully,
|
||||
// op.Done will return true, and the response of the operation is returned.
|
||||
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
|
||||
func (op *BatchUpdateIntentsOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*dialogflowpb.BatchUpdateIntentsResponse, error) {
|
||||
var resp dialogflowpb.BatchUpdateIntentsResponse
|
||||
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !op.Done() {
|
||||
return nil, nil
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Metadata returns metadata associated with the long-running operation.
|
||||
// Metadata itself does not contact the server, but Poll does.
|
||||
// To get the latest metadata, call this method after a successful call to Poll.
|
||||
// If the metadata is not available, the returned metadata and error are both nil.
|
||||
func (op *BatchUpdateIntentsOperation) Metadata() (*structpbpb.Struct, error) {
|
||||
var meta structpbpb.Struct
|
||||
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
// Done reports whether the long-running operation has completed.
|
||||
func (op *BatchUpdateIntentsOperation) Done() bool {
|
||||
return op.lro.Done()
|
||||
}
|
||||
|
||||
// Name returns the name of the long-running operation.
|
||||
// The name is assigned by the server and is unique within the service from which the operation is created.
|
||||
func (op *BatchUpdateIntentsOperation) Name() string {
|
||||
return op.lro.Name()
|
||||
}
|
||||
170
vendor/cloud.google.com/go/dialogflow/apiv2/intents_client_example_test.go
generated
vendored
Normal file
170
vendor/cloud.google.com/go/dialogflow/apiv2/intents_client_example_test.go
generated
vendored
Normal file
@@ -0,0 +1,170 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package dialogflow_test
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/dialogflow/apiv2"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
dialogflowpb "google.golang.org/genproto/googleapis/cloud/dialogflow/v2"
|
||||
)
|
||||
|
||||
func ExampleNewIntentsClient() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewIntentsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use client.
|
||||
_ = c
|
||||
}
|
||||
|
||||
func ExampleIntentsClient_ListIntents() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewIntentsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.ListIntentsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListIntents(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleIntentsClient_GetIntent() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewIntentsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.GetIntentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetIntent(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleIntentsClient_CreateIntent() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewIntentsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.CreateIntentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CreateIntent(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleIntentsClient_UpdateIntent() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewIntentsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.UpdateIntentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.UpdateIntent(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleIntentsClient_DeleteIntent() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewIntentsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.DeleteIntentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
err = c.DeleteIntent(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleIntentsClient_BatchUpdateIntents() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewIntentsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.BatchUpdateIntentsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
op, err := c.BatchUpdateIntents(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
resp, err := op.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleIntentsClient_BatchDeleteIntents() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewIntentsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.BatchDeleteIntentsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
op, err := c.BatchDeleteIntents(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
err = op.Wait(ctx)
|
||||
// TODO: Handle error.
|
||||
}
|
||||
3149
vendor/cloud.google.com/go/dialogflow/apiv2/mock_test.go
generated
vendored
Normal file
3149
vendor/cloud.google.com/go/dialogflow/apiv2/mock_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
274
vendor/cloud.google.com/go/dialogflow/apiv2/session_entity_types_client.go
generated
vendored
Normal file
274
vendor/cloud.google.com/go/dialogflow/apiv2/session_entity_types_client.go
generated
vendored
Normal file
@@ -0,0 +1,274 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package dialogflow
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/version"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
dialogflowpb "google.golang.org/genproto/googleapis/cloud/dialogflow/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// SessionEntityTypesCallOptions contains the retry settings for each method of SessionEntityTypesClient.
|
||||
type SessionEntityTypesCallOptions struct {
|
||||
ListSessionEntityTypes []gax.CallOption
|
||||
GetSessionEntityType []gax.CallOption
|
||||
CreateSessionEntityType []gax.CallOption
|
||||
UpdateSessionEntityType []gax.CallOption
|
||||
DeleteSessionEntityType []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultSessionEntityTypesClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("dialogflow.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultSessionEntityTypesCallOptions() *SessionEntityTypesCallOptions {
|
||||
retry := map[[2]string][]gax.CallOption{
|
||||
{"default", "idempotent"}: {
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.3,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
return &SessionEntityTypesCallOptions{
|
||||
ListSessionEntityTypes: retry[[2]string{"default", "idempotent"}],
|
||||
GetSessionEntityType: retry[[2]string{"default", "idempotent"}],
|
||||
CreateSessionEntityType: retry[[2]string{"default", "non_idempotent"}],
|
||||
UpdateSessionEntityType: retry[[2]string{"default", "non_idempotent"}],
|
||||
DeleteSessionEntityType: retry[[2]string{"default", "idempotent"}],
|
||||
}
|
||||
}
|
||||
|
||||
// SessionEntityTypesClient is a client for interacting with Dialogflow API.
|
||||
type SessionEntityTypesClient struct {
|
||||
// The connection to the service.
|
||||
conn *grpc.ClientConn
|
||||
|
||||
// The gRPC API client.
|
||||
sessionEntityTypesClient dialogflowpb.SessionEntityTypesClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *SessionEntityTypesCallOptions
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewSessionEntityTypesClient creates a new session entity types client.
|
||||
//
|
||||
// Entities are extracted from user input and represent parameters that are
|
||||
// meaningful to your application. For example, a date range, a proper name
|
||||
// such as a geographic location or landmark, and so on. Entities represent
|
||||
// actionable data for your application.
|
||||
//
|
||||
// Session entity types are referred to as **User** entity types and are
|
||||
// entities that are built for an individual user such as
|
||||
// favorites, preferences, playlists, and so on. You can redefine a session
|
||||
// entity type at the session level.
|
||||
//
|
||||
// For more information about entity types, see the
|
||||
// [Dialogflow documentation](https://dialogflow.com/docs/entities).
|
||||
func NewSessionEntityTypesClient(ctx context.Context, opts ...option.ClientOption) (*SessionEntityTypesClient, error) {
|
||||
conn, err := transport.DialGRPC(ctx, append(defaultSessionEntityTypesClientOptions(), opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &SessionEntityTypesClient{
|
||||
conn: conn,
|
||||
CallOptions: defaultSessionEntityTypesCallOptions(),
|
||||
|
||||
sessionEntityTypesClient: dialogflowpb.NewSessionEntityTypesClient(conn),
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Connection returns the client's connection to the API service.
|
||||
func (c *SessionEntityTypesClient) Connection() *grpc.ClientConn {
|
||||
return c.conn
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *SessionEntityTypesClient) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *SessionEntityTypesClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ListSessionEntityTypes returns the list of all session entity types in the specified session.
|
||||
func (c *SessionEntityTypesClient) ListSessionEntityTypes(ctx context.Context, req *dialogflowpb.ListSessionEntityTypesRequest, opts ...gax.CallOption) *SessionEntityTypeIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListSessionEntityTypes[0:len(c.CallOptions.ListSessionEntityTypes):len(c.CallOptions.ListSessionEntityTypes)], opts...)
|
||||
it := &SessionEntityTypeIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*dialogflowpb.SessionEntityType, string, error) {
|
||||
var resp *dialogflowpb.ListSessionEntityTypesResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.sessionEntityTypesClient.ListSessionEntityTypes(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.SessionEntityTypes, resp.NextPageToken, nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
return it
|
||||
}
|
||||
|
||||
// GetSessionEntityType retrieves the specified session entity type.
|
||||
func (c *SessionEntityTypesClient) GetSessionEntityType(ctx context.Context, req *dialogflowpb.GetSessionEntityTypeRequest, opts ...gax.CallOption) (*dialogflowpb.SessionEntityType, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetSessionEntityType[0:len(c.CallOptions.GetSessionEntityType):len(c.CallOptions.GetSessionEntityType)], opts...)
|
||||
var resp *dialogflowpb.SessionEntityType
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.sessionEntityTypesClient.GetSessionEntityType(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// CreateSessionEntityType creates a session entity type.
|
||||
func (c *SessionEntityTypesClient) CreateSessionEntityType(ctx context.Context, req *dialogflowpb.CreateSessionEntityTypeRequest, opts ...gax.CallOption) (*dialogflowpb.SessionEntityType, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.CreateSessionEntityType[0:len(c.CallOptions.CreateSessionEntityType):len(c.CallOptions.CreateSessionEntityType)], opts...)
|
||||
var resp *dialogflowpb.SessionEntityType
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.sessionEntityTypesClient.CreateSessionEntityType(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// UpdateSessionEntityType updates the specified session entity type.
|
||||
func (c *SessionEntityTypesClient) UpdateSessionEntityType(ctx context.Context, req *dialogflowpb.UpdateSessionEntityTypeRequest, opts ...gax.CallOption) (*dialogflowpb.SessionEntityType, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.UpdateSessionEntityType[0:len(c.CallOptions.UpdateSessionEntityType):len(c.CallOptions.UpdateSessionEntityType)], opts...)
|
||||
var resp *dialogflowpb.SessionEntityType
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.sessionEntityTypesClient.UpdateSessionEntityType(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// DeleteSessionEntityType deletes the specified session entity type.
|
||||
func (c *SessionEntityTypesClient) DeleteSessionEntityType(ctx context.Context, req *dialogflowpb.DeleteSessionEntityTypeRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DeleteSessionEntityType[0:len(c.CallOptions.DeleteSessionEntityType):len(c.CallOptions.DeleteSessionEntityType)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.sessionEntityTypesClient.DeleteSessionEntityType(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// SessionEntityTypeIterator manages a stream of *dialogflowpb.SessionEntityType.
|
||||
type SessionEntityTypeIterator struct {
|
||||
items []*dialogflowpb.SessionEntityType
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*dialogflowpb.SessionEntityType, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *SessionEntityTypeIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *SessionEntityTypeIterator) Next() (*dialogflowpb.SessionEntityType, error) {
|
||||
var item *dialogflowpb.SessionEntityType
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *SessionEntityTypeIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *SessionEntityTypeIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
128
vendor/cloud.google.com/go/dialogflow/apiv2/session_entity_types_client_example_test.go
generated
vendored
Normal file
128
vendor/cloud.google.com/go/dialogflow/apiv2/session_entity_types_client_example_test.go
generated
vendored
Normal file
@@ -0,0 +1,128 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package dialogflow_test
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/dialogflow/apiv2"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
dialogflowpb "google.golang.org/genproto/googleapis/cloud/dialogflow/v2"
|
||||
)
|
||||
|
||||
func ExampleNewSessionEntityTypesClient() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewSessionEntityTypesClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use client.
|
||||
_ = c
|
||||
}
|
||||
|
||||
func ExampleSessionEntityTypesClient_ListSessionEntityTypes() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewSessionEntityTypesClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.ListSessionEntityTypesRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListSessionEntityTypes(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleSessionEntityTypesClient_GetSessionEntityType() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewSessionEntityTypesClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.GetSessionEntityTypeRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetSessionEntityType(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleSessionEntityTypesClient_CreateSessionEntityType() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewSessionEntityTypesClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.CreateSessionEntityTypeRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CreateSessionEntityType(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleSessionEntityTypesClient_UpdateSessionEntityType() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewSessionEntityTypesClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.UpdateSessionEntityTypeRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.UpdateSessionEntityType(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleSessionEntityTypesClient_DeleteSessionEntityType() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewSessionEntityTypesClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.DeleteSessionEntityTypeRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
err = c.DeleteSessionEntityType(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
142
vendor/cloud.google.com/go/dialogflow/apiv2/sessions_client.go
generated
vendored
Normal file
142
vendor/cloud.google.com/go/dialogflow/apiv2/sessions_client.go
generated
vendored
Normal file
@@ -0,0 +1,142 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package dialogflow
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/internal/version"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
dialogflowpb "google.golang.org/genproto/googleapis/cloud/dialogflow/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// SessionsCallOptions contains the retry settings for each method of SessionsClient.
|
||||
type SessionsCallOptions struct {
|
||||
DetectIntent []gax.CallOption
|
||||
StreamingDetectIntent []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultSessionsClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("dialogflow.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultSessionsCallOptions() *SessionsCallOptions {
|
||||
retry := map[[2]string][]gax.CallOption{}
|
||||
return &SessionsCallOptions{
|
||||
DetectIntent: retry[[2]string{"default", "non_idempotent"}],
|
||||
StreamingDetectIntent: retry[[2]string{"default", "non_idempotent"}],
|
||||
}
|
||||
}
|
||||
|
||||
// SessionsClient is a client for interacting with Dialogflow API.
|
||||
type SessionsClient struct {
|
||||
// The connection to the service.
|
||||
conn *grpc.ClientConn
|
||||
|
||||
// The gRPC API client.
|
||||
sessionsClient dialogflowpb.SessionsClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *SessionsCallOptions
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewSessionsClient creates a new sessions client.
|
||||
//
|
||||
// A session represents an interaction with a user. You retrieve user input
|
||||
// and pass it to the [DetectIntent][google.cloud.dialogflow.v2.Sessions.DetectIntent] (or
|
||||
// [StreamingDetectIntent][google.cloud.dialogflow.v2.Sessions.StreamingDetectIntent]) method to determine
|
||||
// user intent and respond.
|
||||
func NewSessionsClient(ctx context.Context, opts ...option.ClientOption) (*SessionsClient, error) {
|
||||
conn, err := transport.DialGRPC(ctx, append(defaultSessionsClientOptions(), opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &SessionsClient{
|
||||
conn: conn,
|
||||
CallOptions: defaultSessionsCallOptions(),
|
||||
|
||||
sessionsClient: dialogflowpb.NewSessionsClient(conn),
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Connection returns the client's connection to the API service.
|
||||
func (c *SessionsClient) Connection() *grpc.ClientConn {
|
||||
return c.conn
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *SessionsClient) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *SessionsClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// DetectIntent processes a natural language query and returns structured, actionable data
|
||||
// as a result. This method is not idempotent, because it may cause contexts
|
||||
// and session entity types to be updated, which in turn might affect
|
||||
// results of future queries.
|
||||
func (c *SessionsClient) DetectIntent(ctx context.Context, req *dialogflowpb.DetectIntentRequest, opts ...gax.CallOption) (*dialogflowpb.DetectIntentResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DetectIntent[0:len(c.CallOptions.DetectIntent):len(c.CallOptions.DetectIntent)], opts...)
|
||||
var resp *dialogflowpb.DetectIntentResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.sessionsClient.DetectIntent(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// StreamingDetectIntent processes a natural language query in audio format in a streaming fashion
|
||||
// and returns structured, actionable data as a result. This method is only
|
||||
// available via the gRPC API (not REST).
|
||||
func (c *SessionsClient) StreamingDetectIntent(ctx context.Context, opts ...gax.CallOption) (dialogflowpb.Sessions_StreamingDetectIntentClient, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.StreamingDetectIntent[0:len(c.CallOptions.StreamingDetectIntent):len(c.CallOptions.StreamingDetectIntent)], opts...)
|
||||
var resp dialogflowpb.Sessions_StreamingDetectIntentClient
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.sessionsClient.StreamingDetectIntent(ctx, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
87
vendor/cloud.google.com/go/dialogflow/apiv2/sessions_client_example_test.go
generated
vendored
Normal file
87
vendor/cloud.google.com/go/dialogflow/apiv2/sessions_client_example_test.go
generated
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package dialogflow_test
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"cloud.google.com/go/dialogflow/apiv2"
|
||||
"golang.org/x/net/context"
|
||||
dialogflowpb "google.golang.org/genproto/googleapis/cloud/dialogflow/v2"
|
||||
)
|
||||
|
||||
func ExampleNewSessionsClient() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewSessionsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use client.
|
||||
_ = c
|
||||
}
|
||||
|
||||
func ExampleSessionsClient_DetectIntent() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewSessionsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dialogflowpb.DetectIntentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.DetectIntent(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleSessionsClient_StreamingDetectIntent() {
|
||||
ctx := context.Background()
|
||||
c, err := dialogflow.NewSessionsClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
stream, err := c.StreamingDetectIntent(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
go func() {
|
||||
reqs := []*dialogflowpb.StreamingDetectIntentRequest{
|
||||
// TODO: Create requests.
|
||||
}
|
||||
for _, req := range reqs {
|
||||
if err := stream.Send(req); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
stream.CloseSend()
|
||||
}()
|
||||
for {
|
||||
resp, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
26
vendor/cloud.google.com/go/dlp/apiv2/dlp_client.go
generated
vendored
26
vendor/cloud.google.com/go/dlp/apiv2/dlp_client.go
generated
vendored
@@ -259,7 +259,7 @@ func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequ
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// CreateInspectTemplate creates an inspect template for re-using frequently used configuration
|
||||
// CreateInspectTemplate creates an InspectTemplate for re-using frequently used configuration
|
||||
// for inspecting content, images, and storage.
|
||||
func (c *Client) CreateInspectTemplate(ctx context.Context, req *dlppb.CreateInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
@@ -276,7 +276,7 @@ func (c *Client) CreateInspectTemplate(ctx context.Context, req *dlppb.CreateIns
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// UpdateInspectTemplate updates the inspect template.
|
||||
// UpdateInspectTemplate updates the InspectTemplate.
|
||||
func (c *Client) UpdateInspectTemplate(ctx context.Context, req *dlppb.UpdateInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.UpdateInspectTemplate[0:len(c.CallOptions.UpdateInspectTemplate):len(c.CallOptions.UpdateInspectTemplate)], opts...)
|
||||
@@ -292,7 +292,7 @@ func (c *Client) UpdateInspectTemplate(ctx context.Context, req *dlppb.UpdateIns
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetInspectTemplate gets an inspect template.
|
||||
// GetInspectTemplate gets an InspectTemplate.
|
||||
func (c *Client) GetInspectTemplate(ctx context.Context, req *dlppb.GetInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetInspectTemplate[0:len(c.CallOptions.GetInspectTemplate):len(c.CallOptions.GetInspectTemplate)], opts...)
|
||||
@@ -308,7 +308,7 @@ func (c *Client) GetInspectTemplate(ctx context.Context, req *dlppb.GetInspectTe
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ListInspectTemplates lists inspect templates.
|
||||
// ListInspectTemplates lists InspectTemplates.
|
||||
func (c *Client) ListInspectTemplates(ctx context.Context, req *dlppb.ListInspectTemplatesRequest, opts ...gax.CallOption) *InspectTemplateIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListInspectTemplates[0:len(c.CallOptions.ListInspectTemplates):len(c.CallOptions.ListInspectTemplates)], opts...)
|
||||
@@ -343,7 +343,7 @@ func (c *Client) ListInspectTemplates(ctx context.Context, req *dlppb.ListInspec
|
||||
return it
|
||||
}
|
||||
|
||||
// DeleteInspectTemplate deletes an inspect template.
|
||||
// DeleteInspectTemplate deletes an InspectTemplate.
|
||||
func (c *Client) DeleteInspectTemplate(ctx context.Context, req *dlppb.DeleteInspectTemplateRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DeleteInspectTemplate[0:len(c.CallOptions.DeleteInspectTemplate):len(c.CallOptions.DeleteInspectTemplate)], opts...)
|
||||
@@ -355,8 +355,8 @@ func (c *Client) DeleteInspectTemplate(ctx context.Context, req *dlppb.DeleteIns
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateDeidentifyTemplate creates a de-identify template for re-using frequently used configuration
|
||||
// for Deidentifying content, images, and storage.
|
||||
// CreateDeidentifyTemplate creates a DeidentifyTemplate for re-using frequently used configuration
|
||||
// for de-identifying content, images, and storage.
|
||||
func (c *Client) CreateDeidentifyTemplate(ctx context.Context, req *dlppb.CreateDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.CreateDeidentifyTemplate[0:len(c.CallOptions.CreateDeidentifyTemplate):len(c.CallOptions.CreateDeidentifyTemplate)], opts...)
|
||||
@@ -372,7 +372,7 @@ func (c *Client) CreateDeidentifyTemplate(ctx context.Context, req *dlppb.Create
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// UpdateDeidentifyTemplate updates the de-identify template.
|
||||
// UpdateDeidentifyTemplate updates the DeidentifyTemplate.
|
||||
func (c *Client) UpdateDeidentifyTemplate(ctx context.Context, req *dlppb.UpdateDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.UpdateDeidentifyTemplate[0:len(c.CallOptions.UpdateDeidentifyTemplate):len(c.CallOptions.UpdateDeidentifyTemplate)], opts...)
|
||||
@@ -388,7 +388,7 @@ func (c *Client) UpdateDeidentifyTemplate(ctx context.Context, req *dlppb.Update
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetDeidentifyTemplate gets a de-identify template.
|
||||
// GetDeidentifyTemplate gets a DeidentifyTemplate.
|
||||
func (c *Client) GetDeidentifyTemplate(ctx context.Context, req *dlppb.GetDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetDeidentifyTemplate[0:len(c.CallOptions.GetDeidentifyTemplate):len(c.CallOptions.GetDeidentifyTemplate)], opts...)
|
||||
@@ -404,7 +404,7 @@ func (c *Client) GetDeidentifyTemplate(ctx context.Context, req *dlppb.GetDeiden
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ListDeidentifyTemplates lists de-identify templates.
|
||||
// ListDeidentifyTemplates lists DeidentifyTemplates.
|
||||
func (c *Client) ListDeidentifyTemplates(ctx context.Context, req *dlppb.ListDeidentifyTemplatesRequest, opts ...gax.CallOption) *DeidentifyTemplateIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListDeidentifyTemplates[0:len(c.CallOptions.ListDeidentifyTemplates):len(c.CallOptions.ListDeidentifyTemplates)], opts...)
|
||||
@@ -439,7 +439,7 @@ func (c *Client) ListDeidentifyTemplates(ctx context.Context, req *dlppb.ListDei
|
||||
return it
|
||||
}
|
||||
|
||||
// DeleteDeidentifyTemplate deletes a de-identify template.
|
||||
// DeleteDeidentifyTemplate deletes a DeidentifyTemplate.
|
||||
func (c *Client) DeleteDeidentifyTemplate(ctx context.Context, req *dlppb.DeleteDeidentifyTemplateRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DeleteDeidentifyTemplate[0:len(c.CallOptions.DeleteDeidentifyTemplate):len(c.CallOptions.DeleteDeidentifyTemplate)], opts...)
|
||||
@@ -451,8 +451,8 @@ func (c *Client) DeleteDeidentifyTemplate(ctx context.Context, req *dlppb.Delete
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateDlpJob creates a new job to inspect storage or calculate risk metrics How-to
|
||||
// guide (at /dlp/docs/compute-risk-analysis).
|
||||
// CreateDlpJob creates a new job to inspect storage or calculate risk metrics.
|
||||
// How-to guide (at /dlp/docs/compute-risk-analysis).
|
||||
func (c *Client) CreateDlpJob(ctx context.Context, req *dlppb.CreateDlpJobRequest, opts ...gax.CallOption) (*dlppb.DlpJob, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.CreateDlpJob[0:len(c.CallOptions.CreateDlpJob):len(c.CallOptions.CreateDlpJob)], opts...)
|
||||
|
||||
5
vendor/cloud.google.com/go/dlp/apiv2/doc.go
generated
vendored
5
vendor/cloud.google.com/go/dlp/apiv2/doc.go
generated
vendored
@@ -19,8 +19,9 @@
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
||||
//
|
||||
// Provides methods for detection of privacy-sensitive fragments in text,
|
||||
// images, and Google Cloud Platform storage repositories.
|
||||
// Provides methods for detection, risk analysis, and de-identification of
|
||||
// privacy-sensitive fragments in text, images, and Google Cloud Platform
|
||||
// storage repositories.
|
||||
package dlp // import "cloud.google.com/go/dlp/apiv2"
|
||||
|
||||
import (
|
||||
|
||||
79
vendor/cloud.google.com/go/dlp/apiv2beta1/InspectContent_smoke_test.go
generated
vendored
79
vendor/cloud.google.com/go/dlp/apiv2beta1/InspectContent_smoke_test.go
generated
vendored
@@ -1,79 +0,0 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package dlp
|
||||
|
||||
import (
|
||||
dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1"
|
||||
)
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
var _ = fmt.Sprintf
|
||||
var _ = iterator.Done
|
||||
var _ = strconv.FormatUint
|
||||
var _ = time.Now
|
||||
|
||||
func TestDlpServiceSmoke(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping smoke test in short mode")
|
||||
}
|
||||
ctx := context.Background()
|
||||
ts := testutil.TokenSource(ctx, DefaultAuthScopes()...)
|
||||
if ts == nil {
|
||||
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
|
||||
}
|
||||
|
||||
projectId := testutil.ProjID()
|
||||
_ = projectId
|
||||
|
||||
c, err := NewClient(ctx, option.WithTokenSource(ts))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var minLikelihood dlppb.Likelihood = dlppb.Likelihood_POSSIBLE
|
||||
var inspectConfig = &dlppb.InspectConfig{
|
||||
MinLikelihood: minLikelihood,
|
||||
}
|
||||
var type_ string = "text/plain"
|
||||
var value string = "my phone number is 215-512-1212"
|
||||
var itemsElement = &dlppb.ContentItem{
|
||||
Type: type_,
|
||||
DataItem: &dlppb.ContentItem_Value{
|
||||
Value: value,
|
||||
},
|
||||
}
|
||||
var items = []*dlppb.ContentItem{itemsElement}
|
||||
var request = &dlppb.InspectContentRequest{
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
}
|
||||
|
||||
if _, err := c.InspectContent(ctx, request); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
429
vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client.go
generated
vendored
429
vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client.go
generated
vendored
@@ -1,429 +0,0 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package dlp
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/version"
|
||||
"cloud.google.com/go/longrunning"
|
||||
lroauto "cloud.google.com/go/longrunning/autogen"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
|
||||
dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// CallOptions contains the retry settings for each method of Client.
|
||||
type CallOptions struct {
|
||||
InspectContent []gax.CallOption
|
||||
RedactContent []gax.CallOption
|
||||
DeidentifyContent []gax.CallOption
|
||||
AnalyzeDataSourceRisk []gax.CallOption
|
||||
CreateInspectOperation []gax.CallOption
|
||||
ListInspectFindings []gax.CallOption
|
||||
ListInfoTypes []gax.CallOption
|
||||
ListRootCategories []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("dlp.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultCallOptions() *CallOptions {
|
||||
retry := map[[2]string][]gax.CallOption{
|
||||
{"default", "idempotent"}: {
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.3,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
return &CallOptions{
|
||||
InspectContent: retry[[2]string{"default", "non_idempotent"}],
|
||||
RedactContent: retry[[2]string{"default", "non_idempotent"}],
|
||||
DeidentifyContent: retry[[2]string{"default", "idempotent"}],
|
||||
AnalyzeDataSourceRisk: retry[[2]string{"default", "idempotent"}],
|
||||
CreateInspectOperation: retry[[2]string{"default", "non_idempotent"}],
|
||||
ListInspectFindings: retry[[2]string{"default", "idempotent"}],
|
||||
ListInfoTypes: retry[[2]string{"default", "idempotent"}],
|
||||
ListRootCategories: retry[[2]string{"default", "idempotent"}],
|
||||
}
|
||||
}
|
||||
|
||||
// Client is a client for interacting with DLP API.
|
||||
type Client struct {
|
||||
// The connection to the service.
|
||||
conn *grpc.ClientConn
|
||||
|
||||
// The gRPC API client.
|
||||
client dlppb.DlpServiceClient
|
||||
|
||||
// LROClient is used internally to handle longrunning operations.
|
||||
// It is exposed so that its CallOptions can be modified if required.
|
||||
// Users should not Close this client.
|
||||
LROClient *lroauto.OperationsClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *CallOptions
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewClient creates a new dlp service client.
|
||||
//
|
||||
// The DLP API is a service that allows clients
|
||||
// to detect the presence of Personally Identifiable Information (PII) and other
|
||||
// privacy-sensitive data in user-supplied, unstructured data streams, like text
|
||||
// blocks or images.
|
||||
// The service also includes methods for sensitive data redaction and
|
||||
// scheduling of data scans on Google Cloud Platform based data sets.
|
||||
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
|
||||
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
conn: conn,
|
||||
CallOptions: defaultCallOptions(),
|
||||
|
||||
client: dlppb.NewDlpServiceClient(conn),
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
|
||||
c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
|
||||
if err != nil {
|
||||
// This error "should not happen", since we are just reusing old connection
|
||||
// and never actually need to dial.
|
||||
// If this does happen, we could leak conn. However, we cannot close conn:
|
||||
// If the user invoked the function with option.WithGRPCConn,
|
||||
// we would close a connection that's still in use.
|
||||
// TODO(pongad): investigate error conditions.
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Connection returns the client's connection to the API service.
|
||||
func (c *Client) Connection() *grpc.ClientConn {
|
||||
return c.conn
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *Client) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *Client) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// InspectContent finds potentially sensitive info in a list of strings.
|
||||
// This method has limits on input size, processing time, and output size.
|
||||
func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest, opts ...gax.CallOption) (*dlppb.InspectContentResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.InspectContent[0:len(c.CallOptions.InspectContent):len(c.CallOptions.InspectContent)], opts...)
|
||||
var resp *dlppb.InspectContentResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.InspectContent(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// RedactContent redacts potentially sensitive info from a list of strings.
|
||||
// This method has limits on input size, processing time, and output size.
|
||||
func (c *Client) RedactContent(ctx context.Context, req *dlppb.RedactContentRequest, opts ...gax.CallOption) (*dlppb.RedactContentResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.RedactContent[0:len(c.CallOptions.RedactContent):len(c.CallOptions.RedactContent)], opts...)
|
||||
var resp *dlppb.RedactContentResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.RedactContent(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// DeidentifyContent de-identifies potentially sensitive info from a list of strings.
|
||||
// This method has limits on input size and output size.
|
||||
func (c *Client) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest, opts ...gax.CallOption) (*dlppb.DeidentifyContentResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DeidentifyContent[0:len(c.CallOptions.DeidentifyContent):len(c.CallOptions.DeidentifyContent)], opts...)
|
||||
var resp *dlppb.DeidentifyContentResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.DeidentifyContent(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// AnalyzeDataSourceRisk schedules a job to compute risk analysis metrics over content in a Google
|
||||
// Cloud Platform repository.
|
||||
func (c *Client) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest, opts ...gax.CallOption) (*AnalyzeDataSourceRiskOperation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.AnalyzeDataSourceRisk[0:len(c.CallOptions.AnalyzeDataSourceRisk):len(c.CallOptions.AnalyzeDataSourceRisk)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.AnalyzeDataSourceRisk(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &AnalyzeDataSourceRiskOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, resp),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateInspectOperation schedules a job scanning content in a Google Cloud Platform data
|
||||
// repository.
|
||||
func (c *Client) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest, opts ...gax.CallOption) (*CreateInspectOperationHandle, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.CreateInspectOperation[0:len(c.CallOptions.CreateInspectOperation):len(c.CallOptions.CreateInspectOperation)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.CreateInspectOperation(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &CreateInspectOperationHandle{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, resp),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListInspectFindings returns list of results for given inspect operation result set id.
|
||||
func (c *Client) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest, opts ...gax.CallOption) (*dlppb.ListInspectFindingsResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListInspectFindings[0:len(c.CallOptions.ListInspectFindings):len(c.CallOptions.ListInspectFindings)], opts...)
|
||||
var resp *dlppb.ListInspectFindingsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.ListInspectFindings(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ListInfoTypes returns sensitive information types for given category.
|
||||
func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest, opts ...gax.CallOption) (*dlppb.ListInfoTypesResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListInfoTypes[0:len(c.CallOptions.ListInfoTypes):len(c.CallOptions.ListInfoTypes)], opts...)
|
||||
var resp *dlppb.ListInfoTypesResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.ListInfoTypes(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ListRootCategories returns the list of root categories of sensitive information.
|
||||
func (c *Client) ListRootCategories(ctx context.Context, req *dlppb.ListRootCategoriesRequest, opts ...gax.CallOption) (*dlppb.ListRootCategoriesResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListRootCategories[0:len(c.CallOptions.ListRootCategories):len(c.CallOptions.ListRootCategories)], opts...)
|
||||
var resp *dlppb.ListRootCategoriesResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.ListRootCategories(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// AnalyzeDataSourceRiskOperation manages a long-running operation from AnalyzeDataSourceRisk.
|
||||
type AnalyzeDataSourceRiskOperation struct {
|
||||
lro *longrunning.Operation
|
||||
}
|
||||
|
||||
// AnalyzeDataSourceRiskOperation returns a new AnalyzeDataSourceRiskOperation from a given name.
|
||||
// The name must be that of a previously created AnalyzeDataSourceRiskOperation, possibly from a different process.
|
||||
func (c *Client) AnalyzeDataSourceRiskOperation(name string) *AnalyzeDataSourceRiskOperation {
|
||||
return &AnalyzeDataSourceRiskOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
|
||||
//
|
||||
// See documentation of Poll for error-handling information.
|
||||
func (op *AnalyzeDataSourceRiskOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*dlppb.RiskAnalysisOperationResult, error) {
|
||||
var resp dlppb.RiskAnalysisOperationResult
|
||||
if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Poll fetches the latest state of the long-running operation.
|
||||
//
|
||||
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
|
||||
//
|
||||
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
|
||||
// the operation has completed with failure, the error is returned and op.Done will return true.
|
||||
// If Poll succeeds and the operation has completed successfully,
|
||||
// op.Done will return true, and the response of the operation is returned.
|
||||
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
|
||||
func (op *AnalyzeDataSourceRiskOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*dlppb.RiskAnalysisOperationResult, error) {
|
||||
var resp dlppb.RiskAnalysisOperationResult
|
||||
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !op.Done() {
|
||||
return nil, nil
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Metadata returns metadata associated with the long-running operation.
|
||||
// Metadata itself does not contact the server, but Poll does.
|
||||
// To get the latest metadata, call this method after a successful call to Poll.
|
||||
// If the metadata is not available, the returned metadata and error are both nil.
|
||||
func (op *AnalyzeDataSourceRiskOperation) Metadata() (*dlppb.RiskAnalysisOperationMetadata, error) {
|
||||
var meta dlppb.RiskAnalysisOperationMetadata
|
||||
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
// Done reports whether the long-running operation has completed.
|
||||
func (op *AnalyzeDataSourceRiskOperation) Done() bool {
|
||||
return op.lro.Done()
|
||||
}
|
||||
|
||||
// Name returns the name of the long-running operation.
|
||||
// The name is assigned by the server and is unique within the service from which the operation is created.
|
||||
func (op *AnalyzeDataSourceRiskOperation) Name() string {
|
||||
return op.lro.Name()
|
||||
}
|
||||
|
||||
// CreateInspectOperationHandle manages a long-running operation from CreateInspectOperation.
|
||||
type CreateInspectOperationHandle struct {
|
||||
lro *longrunning.Operation
|
||||
}
|
||||
|
||||
// CreateInspectOperationHandle returns a new CreateInspectOperationHandle from a given name.
|
||||
// The name must be that of a previously created CreateInspectOperationHandle, possibly from a different process.
|
||||
func (c *Client) CreateInspectOperationHandle(name string) *CreateInspectOperationHandle {
|
||||
return &CreateInspectOperationHandle{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
|
||||
//
|
||||
// See documentation of Poll for error-handling information.
|
||||
func (op *CreateInspectOperationHandle) Wait(ctx context.Context, opts ...gax.CallOption) (*dlppb.InspectOperationResult, error) {
|
||||
var resp dlppb.InspectOperationResult
|
||||
if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Poll fetches the latest state of the long-running operation.
|
||||
//
|
||||
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
|
||||
//
|
||||
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
|
||||
// the operation has completed with failure, the error is returned and op.Done will return true.
|
||||
// If Poll succeeds and the operation has completed successfully,
|
||||
// op.Done will return true, and the response of the operation is returned.
|
||||
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
|
||||
func (op *CreateInspectOperationHandle) Poll(ctx context.Context, opts ...gax.CallOption) (*dlppb.InspectOperationResult, error) {
|
||||
var resp dlppb.InspectOperationResult
|
||||
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !op.Done() {
|
||||
return nil, nil
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Metadata returns metadata associated with the long-running operation.
|
||||
// Metadata itself does not contact the server, but Poll does.
|
||||
// To get the latest metadata, call this method after a successful call to Poll.
|
||||
// If the metadata is not available, the returned metadata and error are both nil.
|
||||
func (op *CreateInspectOperationHandle) Metadata() (*dlppb.InspectOperationMetadata, error) {
|
||||
var meta dlppb.InspectOperationMetadata
|
||||
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
// Done reports whether the long-running operation has completed.
|
||||
func (op *CreateInspectOperationHandle) Done() bool {
|
||||
return op.lro.Done()
|
||||
}
|
||||
|
||||
// Name returns the name of the long-running operation.
|
||||
// The name is assigned by the server and is unique within the service from which the operation is created.
|
||||
func (op *CreateInspectOperationHandle) Name() string {
|
||||
return op.lro.Name()
|
||||
}
|
||||
187
vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client_example_test.go
generated
vendored
187
vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client_example_test.go
generated
vendored
@@ -1,187 +0,0 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package dlp_test
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/dlp/apiv2beta1"
|
||||
"golang.org/x/net/context"
|
||||
dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1"
|
||||
)
|
||||
|
||||
func ExampleNewClient() {
|
||||
ctx := context.Background()
|
||||
c, err := dlp.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use client.
|
||||
_ = c
|
||||
}
|
||||
|
||||
func ExampleClient_InspectContent() {
|
||||
ctx := context.Background()
|
||||
c, err := dlp.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dlppb.InspectContentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.InspectContent(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_RedactContent() {
|
||||
ctx := context.Background()
|
||||
c, err := dlp.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dlppb.RedactContentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.RedactContent(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_DeidentifyContent() {
|
||||
ctx := context.Background()
|
||||
c, err := dlp.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dlppb.DeidentifyContentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.DeidentifyContent(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_AnalyzeDataSourceRisk() {
|
||||
ctx := context.Background()
|
||||
c, err := dlp.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dlppb.AnalyzeDataSourceRiskRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
op, err := c.AnalyzeDataSourceRisk(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
resp, err := op.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_CreateInspectOperation() {
|
||||
ctx := context.Background()
|
||||
c, err := dlp.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dlppb.CreateInspectOperationRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
op, err := c.CreateInspectOperation(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
resp, err := op.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_ListInspectFindings() {
|
||||
ctx := context.Background()
|
||||
c, err := dlp.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dlppb.ListInspectFindingsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.ListInspectFindings(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_ListInfoTypes() {
|
||||
ctx := context.Background()
|
||||
c, err := dlp.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dlppb.ListInfoTypesRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.ListInfoTypes(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_ListRootCategories() {
|
||||
ctx := context.Background()
|
||||
c, err := dlp.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dlppb.ListRootCategoriesRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.ListRootCategories(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
48
vendor/cloud.google.com/go/dlp/apiv2beta1/doc.go
generated
vendored
48
vendor/cloud.google.com/go/dlp/apiv2beta1/doc.go
generated
vendored
@@ -1,48 +0,0 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package dlp is an auto-generated package for the
|
||||
// DLP API.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
||||
//
|
||||
// The Google Data Loss Prevention API provides methods for detection of
|
||||
// privacy-sensitive fragments in text, images, and Google Cloud Platform
|
||||
// storage repositories.
|
||||
package dlp // import "cloud.google.com/go/dlp/apiv2beta1"
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
func DefaultAuthScopes() []string {
|
||||
return []string{
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
}
|
||||
}
|
||||
844
vendor/cloud.google.com/go/dlp/apiv2beta1/mock_test.go
generated
vendored
844
vendor/cloud.google.com/go/dlp/apiv2beta1/mock_test.go
generated
vendored
@@ -1,844 +0,0 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package dlp
|
||||
|
||||
import (
|
||||
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
|
||||
dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1"
|
||||
)
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
status "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
gstatus "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var _ = io.EOF
|
||||
var _ = ptypes.MarshalAny
|
||||
var _ status.Status
|
||||
|
||||
type mockDlpServer struct {
|
||||
// Embed for forward compatibility.
|
||||
// Tests will keep working if more methods are added
|
||||
// in the future.
|
||||
dlppb.DlpServiceServer
|
||||
|
||||
reqs []proto.Message
|
||||
|
||||
// If set, all calls return this error.
|
||||
err error
|
||||
|
||||
// responses to return if err == nil
|
||||
resps []proto.Message
|
||||
}
|
||||
|
||||
func (s *mockDlpServer) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest) (*dlppb.InspectContentResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
return s.resps[0].(*dlppb.InspectContentResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockDlpServer) RedactContent(ctx context.Context, req *dlppb.RedactContentRequest) (*dlppb.RedactContentResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
return s.resps[0].(*dlppb.RedactContentResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockDlpServer) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest) (*dlppb.DeidentifyContentResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
return s.resps[0].(*dlppb.DeidentifyContentResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockDlpServer) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest) (*longrunningpb.Operation, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
return s.resps[0].(*longrunningpb.Operation), nil
|
||||
}
|
||||
|
||||
func (s *mockDlpServer) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest) (*longrunningpb.Operation, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
return s.resps[0].(*longrunningpb.Operation), nil
|
||||
}
|
||||
|
||||
func (s *mockDlpServer) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest) (*dlppb.ListInspectFindingsResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
return s.resps[0].(*dlppb.ListInspectFindingsResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockDlpServer) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest) (*dlppb.ListInfoTypesResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
return s.resps[0].(*dlppb.ListInfoTypesResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockDlpServer) ListRootCategories(ctx context.Context, req *dlppb.ListRootCategoriesRequest) (*dlppb.ListRootCategoriesResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
return s.resps[0].(*dlppb.ListRootCategoriesResponse), nil
|
||||
}
|
||||
|
||||
// clientOpt is the option tests should use to connect to the test server.
|
||||
// It is initialized by TestMain.
|
||||
var clientOpt option.ClientOption
|
||||
|
||||
var (
|
||||
mockDlp mockDlpServer
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
flag.Parse()
|
||||
|
||||
serv := grpc.NewServer()
|
||||
dlppb.RegisterDlpServiceServer(serv, &mockDlp)
|
||||
|
||||
lis, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
go serv.Serve(lis)
|
||||
|
||||
conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
clientOpt = option.WithGRPCConn(conn)
|
||||
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestDlpServiceInspectContent(t *testing.T) {
|
||||
var expectedResponse *dlppb.InspectContentResponse = &dlppb.InspectContentResponse{}
|
||||
|
||||
mockDlp.err = nil
|
||||
mockDlp.reqs = nil
|
||||
|
||||
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
|
||||
|
||||
var name string = "EMAIL_ADDRESS"
|
||||
var infoTypesElement = &dlppb.InfoType{
|
||||
Name: name,
|
||||
}
|
||||
var infoTypes = []*dlppb.InfoType{infoTypesElement}
|
||||
var inspectConfig = &dlppb.InspectConfig{
|
||||
InfoTypes: infoTypes,
|
||||
}
|
||||
var type_ string = "text/plain"
|
||||
var value string = "My email is example@example.com."
|
||||
var itemsElement = &dlppb.ContentItem{
|
||||
Type: type_,
|
||||
DataItem: &dlppb.ContentItem_Value{
|
||||
Value: value,
|
||||
},
|
||||
}
|
||||
var items = []*dlppb.ContentItem{itemsElement}
|
||||
var request = &dlppb.InspectContentRequest{
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.InspectContent(context.Background(), request)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong request %q, want %q", got, want)
|
||||
}
|
||||
|
||||
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong response %q, want %q)", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDlpServiceInspectContentError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDlp.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var name string = "EMAIL_ADDRESS"
|
||||
var infoTypesElement = &dlppb.InfoType{
|
||||
Name: name,
|
||||
}
|
||||
var infoTypes = []*dlppb.InfoType{infoTypesElement}
|
||||
var inspectConfig = &dlppb.InspectConfig{
|
||||
InfoTypes: infoTypes,
|
||||
}
|
||||
var type_ string = "text/plain"
|
||||
var value string = "My email is example@example.com."
|
||||
var itemsElement = &dlppb.ContentItem{
|
||||
Type: type_,
|
||||
DataItem: &dlppb.ContentItem_Value{
|
||||
Value: value,
|
||||
},
|
||||
}
|
||||
var items = []*dlppb.ContentItem{itemsElement}
|
||||
var request = &dlppb.InspectContentRequest{
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.InspectContent(context.Background(), request)
|
||||
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
}
|
||||
func TestDlpServiceRedactContent(t *testing.T) {
|
||||
var expectedResponse *dlppb.RedactContentResponse = &dlppb.RedactContentResponse{}
|
||||
|
||||
mockDlp.err = nil
|
||||
mockDlp.reqs = nil
|
||||
|
||||
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
|
||||
|
||||
var name string = "EMAIL_ADDRESS"
|
||||
var infoTypesElement = &dlppb.InfoType{
|
||||
Name: name,
|
||||
}
|
||||
var infoTypes = []*dlppb.InfoType{infoTypesElement}
|
||||
var inspectConfig = &dlppb.InspectConfig{
|
||||
InfoTypes: infoTypes,
|
||||
}
|
||||
var type_ string = "text/plain"
|
||||
var value string = "My email is example@example.com."
|
||||
var itemsElement = &dlppb.ContentItem{
|
||||
Type: type_,
|
||||
DataItem: &dlppb.ContentItem_Value{
|
||||
Value: value,
|
||||
},
|
||||
}
|
||||
var items = []*dlppb.ContentItem{itemsElement}
|
||||
var name2 string = "EMAIL_ADDRESS"
|
||||
var infoType = &dlppb.InfoType{
|
||||
Name: name2,
|
||||
}
|
||||
var replaceWith string = "REDACTED"
|
||||
var replaceConfigsElement = &dlppb.RedactContentRequest_ReplaceConfig{
|
||||
InfoType: infoType,
|
||||
ReplaceWith: replaceWith,
|
||||
}
|
||||
var replaceConfigs = []*dlppb.RedactContentRequest_ReplaceConfig{replaceConfigsElement}
|
||||
var request = &dlppb.RedactContentRequest{
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
ReplaceConfigs: replaceConfigs,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.RedactContent(context.Background(), request)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong request %q, want %q", got, want)
|
||||
}
|
||||
|
||||
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong response %q, want %q)", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDlpServiceRedactContentError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDlp.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var name string = "EMAIL_ADDRESS"
|
||||
var infoTypesElement = &dlppb.InfoType{
|
||||
Name: name,
|
||||
}
|
||||
var infoTypes = []*dlppb.InfoType{infoTypesElement}
|
||||
var inspectConfig = &dlppb.InspectConfig{
|
||||
InfoTypes: infoTypes,
|
||||
}
|
||||
var type_ string = "text/plain"
|
||||
var value string = "My email is example@example.com."
|
||||
var itemsElement = &dlppb.ContentItem{
|
||||
Type: type_,
|
||||
DataItem: &dlppb.ContentItem_Value{
|
||||
Value: value,
|
||||
},
|
||||
}
|
||||
var items = []*dlppb.ContentItem{itemsElement}
|
||||
var name2 string = "EMAIL_ADDRESS"
|
||||
var infoType = &dlppb.InfoType{
|
||||
Name: name2,
|
||||
}
|
||||
var replaceWith string = "REDACTED"
|
||||
var replaceConfigsElement = &dlppb.RedactContentRequest_ReplaceConfig{
|
||||
InfoType: infoType,
|
||||
ReplaceWith: replaceWith,
|
||||
}
|
||||
var replaceConfigs = []*dlppb.RedactContentRequest_ReplaceConfig{replaceConfigsElement}
|
||||
var request = &dlppb.RedactContentRequest{
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
ReplaceConfigs: replaceConfigs,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.RedactContent(context.Background(), request)
|
||||
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
}
|
||||
func TestDlpServiceDeidentifyContent(t *testing.T) {
|
||||
var expectedResponse *dlppb.DeidentifyContentResponse = &dlppb.DeidentifyContentResponse{}
|
||||
|
||||
mockDlp.err = nil
|
||||
mockDlp.reqs = nil
|
||||
|
||||
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
|
||||
|
||||
var deidentifyConfig *dlppb.DeidentifyConfig = &dlppb.DeidentifyConfig{}
|
||||
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
|
||||
var items []*dlppb.ContentItem = nil
|
||||
var request = &dlppb.DeidentifyContentRequest{
|
||||
DeidentifyConfig: deidentifyConfig,
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.DeidentifyContent(context.Background(), request)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong request %q, want %q", got, want)
|
||||
}
|
||||
|
||||
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong response %q, want %q)", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDlpServiceDeidentifyContentError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDlp.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var deidentifyConfig *dlppb.DeidentifyConfig = &dlppb.DeidentifyConfig{}
|
||||
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
|
||||
var items []*dlppb.ContentItem = nil
|
||||
var request = &dlppb.DeidentifyContentRequest{
|
||||
DeidentifyConfig: deidentifyConfig,
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.DeidentifyContent(context.Background(), request)
|
||||
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
}
|
||||
func TestDlpServiceAnalyzeDataSourceRisk(t *testing.T) {
|
||||
var expectedResponse *dlppb.RiskAnalysisOperationResult = &dlppb.RiskAnalysisOperationResult{}
|
||||
|
||||
mockDlp.err = nil
|
||||
mockDlp.reqs = nil
|
||||
|
||||
any, err := ptypes.MarshalAny(expectedResponse)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
|
||||
Name: "longrunning-test",
|
||||
Done: true,
|
||||
Result: &longrunningpb.Operation_Response{Response: any},
|
||||
})
|
||||
|
||||
var privacyMetric *dlppb.PrivacyMetric = &dlppb.PrivacyMetric{}
|
||||
var sourceTable *dlppb.BigQueryTable = &dlppb.BigQueryTable{}
|
||||
var request = &dlppb.AnalyzeDataSourceRiskRequest{
|
||||
PrivacyMetric: privacyMetric,
|
||||
SourceTable: sourceTable,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
respLRO, err := c.AnalyzeDataSourceRisk(context.Background(), request)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp, err := respLRO.Wait(context.Background())
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong request %q, want %q", got, want)
|
||||
}
|
||||
|
||||
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong response %q, want %q)", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDlpServiceAnalyzeDataSourceRiskError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDlp.err = nil
|
||||
mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
|
||||
Name: "longrunning-test",
|
||||
Done: true,
|
||||
Result: &longrunningpb.Operation_Error{
|
||||
Error: &status.Status{
|
||||
Code: int32(errCode),
|
||||
Message: "test error",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
var privacyMetric *dlppb.PrivacyMetric = &dlppb.PrivacyMetric{}
|
||||
var sourceTable *dlppb.BigQueryTable = &dlppb.BigQueryTable{}
|
||||
var request = &dlppb.AnalyzeDataSourceRiskRequest{
|
||||
PrivacyMetric: privacyMetric,
|
||||
SourceTable: sourceTable,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
respLRO, err := c.AnalyzeDataSourceRisk(context.Background(), request)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp, err := respLRO.Wait(context.Background())
|
||||
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
}
|
||||
func TestDlpServiceCreateInspectOperation(t *testing.T) {
|
||||
var name2 string = "name2-1052831874"
|
||||
var expectedResponse = &dlppb.InspectOperationResult{
|
||||
Name: name2,
|
||||
}
|
||||
|
||||
mockDlp.err = nil
|
||||
mockDlp.reqs = nil
|
||||
|
||||
any, err := ptypes.MarshalAny(expectedResponse)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
|
||||
Name: "longrunning-test",
|
||||
Done: true,
|
||||
Result: &longrunningpb.Operation_Response{Response: any},
|
||||
})
|
||||
|
||||
var name string = "EMAIL_ADDRESS"
|
||||
var infoTypesElement = &dlppb.InfoType{
|
||||
Name: name,
|
||||
}
|
||||
var infoTypes = []*dlppb.InfoType{infoTypesElement}
|
||||
var inspectConfig = &dlppb.InspectConfig{
|
||||
InfoTypes: infoTypes,
|
||||
}
|
||||
var url string = "gs://example_bucket/example_file.png"
|
||||
var fileSet = &dlppb.CloudStorageOptions_FileSet{
|
||||
Url: url,
|
||||
}
|
||||
var cloudStorageOptions = &dlppb.CloudStorageOptions{
|
||||
FileSet: fileSet,
|
||||
}
|
||||
var storageConfig = &dlppb.StorageConfig{
|
||||
Type: &dlppb.StorageConfig_CloudStorageOptions{
|
||||
CloudStorageOptions: cloudStorageOptions,
|
||||
},
|
||||
}
|
||||
var outputConfig *dlppb.OutputStorageConfig = &dlppb.OutputStorageConfig{}
|
||||
var request = &dlppb.CreateInspectOperationRequest{
|
||||
InspectConfig: inspectConfig,
|
||||
StorageConfig: storageConfig,
|
||||
OutputConfig: outputConfig,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
respLRO, err := c.CreateInspectOperation(context.Background(), request)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp, err := respLRO.Wait(context.Background())
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong request %q, want %q", got, want)
|
||||
}
|
||||
|
||||
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong response %q, want %q)", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDlpServiceCreateInspectOperationError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDlp.err = nil
|
||||
mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
|
||||
Name: "longrunning-test",
|
||||
Done: true,
|
||||
Result: &longrunningpb.Operation_Error{
|
||||
Error: &status.Status{
|
||||
Code: int32(errCode),
|
||||
Message: "test error",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
var name string = "EMAIL_ADDRESS"
|
||||
var infoTypesElement = &dlppb.InfoType{
|
||||
Name: name,
|
||||
}
|
||||
var infoTypes = []*dlppb.InfoType{infoTypesElement}
|
||||
var inspectConfig = &dlppb.InspectConfig{
|
||||
InfoTypes: infoTypes,
|
||||
}
|
||||
var url string = "gs://example_bucket/example_file.png"
|
||||
var fileSet = &dlppb.CloudStorageOptions_FileSet{
|
||||
Url: url,
|
||||
}
|
||||
var cloudStorageOptions = &dlppb.CloudStorageOptions{
|
||||
FileSet: fileSet,
|
||||
}
|
||||
var storageConfig = &dlppb.StorageConfig{
|
||||
Type: &dlppb.StorageConfig_CloudStorageOptions{
|
||||
CloudStorageOptions: cloudStorageOptions,
|
||||
},
|
||||
}
|
||||
var outputConfig *dlppb.OutputStorageConfig = &dlppb.OutputStorageConfig{}
|
||||
var request = &dlppb.CreateInspectOperationRequest{
|
||||
InspectConfig: inspectConfig,
|
||||
StorageConfig: storageConfig,
|
||||
OutputConfig: outputConfig,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
respLRO, err := c.CreateInspectOperation(context.Background(), request)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp, err := respLRO.Wait(context.Background())
|
||||
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
}
|
||||
func TestDlpServiceListInspectFindings(t *testing.T) {
|
||||
var nextPageToken string = "nextPageToken-1530815211"
|
||||
var expectedResponse = &dlppb.ListInspectFindingsResponse{
|
||||
NextPageToken: nextPageToken,
|
||||
}
|
||||
|
||||
mockDlp.err = nil
|
||||
mockDlp.reqs = nil
|
||||
|
||||
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
|
||||
|
||||
var formattedName string = fmt.Sprintf("inspect/results/%s", "[RESULT]")
|
||||
var request = &dlppb.ListInspectFindingsRequest{
|
||||
Name: formattedName,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.ListInspectFindings(context.Background(), request)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong request %q, want %q", got, want)
|
||||
}
|
||||
|
||||
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong response %q, want %q)", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDlpServiceListInspectFindingsError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDlp.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedName string = fmt.Sprintf("inspect/results/%s", "[RESULT]")
|
||||
var request = &dlppb.ListInspectFindingsRequest{
|
||||
Name: formattedName,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.ListInspectFindings(context.Background(), request)
|
||||
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
}
|
||||
func TestDlpServiceListInfoTypes(t *testing.T) {
|
||||
var expectedResponse *dlppb.ListInfoTypesResponse = &dlppb.ListInfoTypesResponse{}
|
||||
|
||||
mockDlp.err = nil
|
||||
mockDlp.reqs = nil
|
||||
|
||||
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
|
||||
|
||||
var category string = "PII"
|
||||
var languageCode string = "en"
|
||||
var request = &dlppb.ListInfoTypesRequest{
|
||||
Category: category,
|
||||
LanguageCode: languageCode,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.ListInfoTypes(context.Background(), request)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong request %q, want %q", got, want)
|
||||
}
|
||||
|
||||
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong response %q, want %q)", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDlpServiceListInfoTypesError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDlp.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var category string = "PII"
|
||||
var languageCode string = "en"
|
||||
var request = &dlppb.ListInfoTypesRequest{
|
||||
Category: category,
|
||||
LanguageCode: languageCode,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.ListInfoTypes(context.Background(), request)
|
||||
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
}
|
||||
func TestDlpServiceListRootCategories(t *testing.T) {
|
||||
var expectedResponse *dlppb.ListRootCategoriesResponse = &dlppb.ListRootCategoriesResponse{}
|
||||
|
||||
mockDlp.err = nil
|
||||
mockDlp.reqs = nil
|
||||
|
||||
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
|
||||
|
||||
var languageCode string = "en"
|
||||
var request = &dlppb.ListRootCategoriesRequest{
|
||||
LanguageCode: languageCode,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.ListRootCategories(context.Background(), request)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong request %q, want %q", got, want)
|
||||
}
|
||||
|
||||
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong response %q, want %q)", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDlpServiceListRootCategoriesError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDlp.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var languageCode string = "en"
|
||||
var request = &dlppb.ListRootCategoriesRequest{
|
||||
LanguageCode: languageCode,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.ListRootCategories(context.Background(), request)
|
||||
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
}
|
||||
27
vendor/cloud.google.com/go/dlp/apiv2beta1/path_funcs.go
generated
vendored
27
vendor/cloud.google.com/go/dlp/apiv2beta1/path_funcs.go
generated
vendored
@@ -1,27 +0,0 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dlp
|
||||
|
||||
// ResultPath returns the path for the result resource.
|
||||
//
|
||||
// Deprecated: Use
|
||||
// fmt.Sprintf("inspect/results/%s", result)
|
||||
// instead.
|
||||
func ResultPath(result string) string {
|
||||
return "" +
|
||||
"inspect/results/" +
|
||||
result +
|
||||
""
|
||||
}
|
||||
60
vendor/cloud.google.com/go/examples_test.go
generated
vendored
Normal file
60
vendor/cloud.google.com/go/examples_test.go
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
// Copyright 2018 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cloud_test
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigquery"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// To set a timeout for an RPC, use context.WithTimeout.
|
||||
func Example_timeout() {
|
||||
ctx := context.Background()
|
||||
// Do not set a timeout on the context passed to NewClient: dialing happens
|
||||
// asynchronously, and the context is used to refresh credentials in the
|
||||
// background.
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
// Time out if it takes more than 10 seconds to create a dataset.
|
||||
tctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel() // Always call cancel.
|
||||
|
||||
if err := client.Dataset("new-dataset").Create(tctx, nil); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
}
|
||||
|
||||
// To arrange for an RPC to be canceled, use context.WithCancel.
|
||||
func Example_cancellation() {
|
||||
ctx := context.Background()
|
||||
// Do not cancel the context passed to NewClient: dialing happens asynchronously,
|
||||
// and the context is used to refresh credentials in the background.
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel() // Always call cancel.
|
||||
|
||||
// TODO: Make the cancel function available to whatever might want to cancel the
|
||||
// call--perhaps a GUI button.
|
||||
if err := client.Dataset("new-dataset").Create(cctx, nil); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
}
|
||||
432
vendor/cloud.google.com/go/firestore/conformance_test.go
generated
vendored
Normal file
432
vendor/cloud.google.com/go/firestore/conformance_test.go
generated
vendored
Normal file
@@ -0,0 +1,432 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// A runner for the conformance tests.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
pb "cloud.google.com/go/firestore/genproto"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
ts "github.com/golang/protobuf/ptypes/timestamp"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
fspb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
)
|
||||
|
||||
const conformanceTestWatchTargetID = 1
|
||||
|
||||
func TestConformanceTests(t *testing.T) {
|
||||
const dir = "testdata"
|
||||
fis, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wtid := watchTargetID
|
||||
watchTargetID = conformanceTestWatchTargetID
|
||||
defer func() { watchTargetID = wtid }()
|
||||
n := 0
|
||||
for _, fi := range fis {
|
||||
if strings.HasSuffix(fi.Name(), ".textproto") {
|
||||
runTestFromFile(t, filepath.Join(dir, fi.Name()))
|
||||
n++
|
||||
}
|
||||
}
|
||||
t.Logf("ran %d conformance tests", n)
|
||||
}
|
||||
|
||||
func runTestFromFile(t *testing.T, filename string) {
|
||||
bytes, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %v", filename, err)
|
||||
}
|
||||
var test pb.Test
|
||||
if err := proto.UnmarshalText(string(bytes), &test); err != nil {
|
||||
t.Fatalf("unmarshalling %s: %v", filename, err)
|
||||
}
|
||||
msg := fmt.Sprintf("%s (file %s)", test.Description, filepath.Base(filename))
|
||||
runTest(t, msg, &test)
|
||||
}
|
||||
|
||||
func runTest(t *testing.T, msg string, test *pb.Test) {
|
||||
check := func(gotErr error, wantErr bool) bool {
|
||||
if wantErr && gotErr == nil {
|
||||
t.Errorf("%s: got nil, want error", msg)
|
||||
return false
|
||||
} else if !wantErr && gotErr != nil {
|
||||
t.Errorf("%s: %v", msg, gotErr)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
c, srv := newMock(t)
|
||||
switch tt := test.Test.(type) {
|
||||
case *pb.Test_Get:
|
||||
req := &fspb.BatchGetDocumentsRequest{
|
||||
Database: c.path(),
|
||||
Documents: []string{tt.Get.DocRefPath},
|
||||
}
|
||||
srv.addRPC(req, []interface{}{
|
||||
&fspb.BatchGetDocumentsResponse{
|
||||
Result: &fspb.BatchGetDocumentsResponse_Found{&fspb.Document{
|
||||
Name: tt.Get.DocRefPath,
|
||||
CreateTime: &ts.Timestamp{},
|
||||
UpdateTime: &ts.Timestamp{},
|
||||
}},
|
||||
ReadTime: &ts.Timestamp{},
|
||||
},
|
||||
})
|
||||
ref := docRefFromPath(tt.Get.DocRefPath, c)
|
||||
_, err := ref.Get(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("%s: %v", msg, err)
|
||||
return
|
||||
}
|
||||
// Checking response would just be testing the function converting a Document
|
||||
// proto to a DocumentSnapshot, hence uninteresting.
|
||||
|
||||
case *pb.Test_Create:
|
||||
srv.addRPC(tt.Create.Request, commitResponseForSet)
|
||||
ref := docRefFromPath(tt.Create.DocRefPath, c)
|
||||
data, err := convertData(tt.Create.JsonData)
|
||||
if err != nil {
|
||||
t.Errorf("%s: %v", msg, err)
|
||||
return
|
||||
}
|
||||
_, err = ref.Create(ctx, data)
|
||||
check(err, tt.Create.IsError)
|
||||
|
||||
case *pb.Test_Set:
|
||||
srv.addRPC(tt.Set.Request, commitResponseForSet)
|
||||
ref := docRefFromPath(tt.Set.DocRefPath, c)
|
||||
data, err := convertData(tt.Set.JsonData)
|
||||
if err != nil {
|
||||
t.Errorf("%s: %v", msg, err)
|
||||
return
|
||||
}
|
||||
var opts []SetOption
|
||||
if tt.Set.Option != nil {
|
||||
opts = []SetOption{convertSetOption(tt.Set.Option)}
|
||||
}
|
||||
_, err = ref.Set(ctx, data, opts...)
|
||||
check(err, tt.Set.IsError)
|
||||
|
||||
case *pb.Test_Update:
|
||||
// Ignore Update test because we only support UpdatePaths.
|
||||
// Not to worry, every Update test has a corresponding UpdatePaths test.
|
||||
|
||||
case *pb.Test_UpdatePaths:
|
||||
srv.addRPC(tt.UpdatePaths.Request, commitResponseForSet)
|
||||
ref := docRefFromPath(tt.UpdatePaths.DocRefPath, c)
|
||||
preconds := convertPrecondition(t, tt.UpdatePaths.Precondition)
|
||||
paths := convertFieldPaths(tt.UpdatePaths.FieldPaths)
|
||||
var ups []Update
|
||||
for i, path := range paths {
|
||||
val, err := convertJSONValue(tt.UpdatePaths.JsonValues[i])
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %v", msg, err)
|
||||
}
|
||||
ups = append(ups, Update{
|
||||
FieldPath: path,
|
||||
Value: val,
|
||||
})
|
||||
}
|
||||
_, err := ref.Update(ctx, ups, preconds...)
|
||||
check(err, tt.UpdatePaths.IsError)
|
||||
|
||||
case *pb.Test_Delete:
|
||||
srv.addRPC(tt.Delete.Request, commitResponseForSet)
|
||||
ref := docRefFromPath(tt.Delete.DocRefPath, c)
|
||||
preconds := convertPrecondition(t, tt.Delete.Precondition)
|
||||
_, err := ref.Delete(ctx, preconds...)
|
||||
check(err, tt.Delete.IsError)
|
||||
|
||||
case *pb.Test_Query:
|
||||
q := convertQuery(t, tt.Query)
|
||||
got, err := q.toProto()
|
||||
if check(err, tt.Query.IsError) && err == nil {
|
||||
if want := tt.Query.Query; !proto.Equal(got, want) {
|
||||
t.Errorf("%s\ngot: %s\nwant: %s", msg, proto.MarshalTextString(got), proto.MarshalTextString(want))
|
||||
}
|
||||
}
|
||||
|
||||
case *pb.Test_Listen:
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
iter := c.Collection("C").OrderBy("a", Asc).Snapshots(ctx)
|
||||
var rs []interface{}
|
||||
for _, r := range tt.Listen.Responses {
|
||||
rs = append(rs, r)
|
||||
}
|
||||
srv.addRPC(&fspb.ListenRequest{
|
||||
Database: "projects/projectID/databases/(default)",
|
||||
TargetChange: &fspb.ListenRequest_AddTarget{iter.ws.target},
|
||||
}, rs)
|
||||
got, err := nSnapshots(iter, len(tt.Listen.Snapshots))
|
||||
if err != nil {
|
||||
t.Errorf("%s: %v", msg, err)
|
||||
} else if diff := cmp.Diff(got, tt.Listen.Snapshots); diff != "" {
|
||||
t.Errorf("%s:\n%s", msg, diff)
|
||||
}
|
||||
if tt.Listen.IsError {
|
||||
_, err := iter.Next()
|
||||
if err == nil {
|
||||
t.Errorf("%s: got nil, want error", msg)
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
t.Fatalf("unknown test type %T", tt)
|
||||
}
|
||||
}
|
||||
|
||||
func nSnapshots(iter *QuerySnapshotIterator, n int) ([]*pb.Snapshot, error) {
|
||||
var snaps []*pb.Snapshot
|
||||
for i := 0; i < n; i++ {
|
||||
diter, err := iter.Next()
|
||||
if err != nil {
|
||||
return snaps, err
|
||||
}
|
||||
s := &pb.Snapshot{ReadTime: mustTimestampProto(iter.ReadTime)}
|
||||
for {
|
||||
doc, err := diter.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return snaps, err
|
||||
}
|
||||
s.Docs = append(s.Docs, doc.proto)
|
||||
}
|
||||
for _, c := range iter.Changes {
|
||||
var k pb.DocChange_Kind
|
||||
switch c.Kind {
|
||||
case DocumentAdded:
|
||||
k = pb.DocChange_ADDED
|
||||
case DocumentRemoved:
|
||||
k = pb.DocChange_REMOVED
|
||||
case DocumentModified:
|
||||
k = pb.DocChange_MODIFIED
|
||||
default:
|
||||
panic("bad kind")
|
||||
}
|
||||
s.Changes = append(s.Changes, &pb.DocChange{
|
||||
Kind: k,
|
||||
Doc: c.Doc.proto,
|
||||
OldIndex: int32(c.OldIndex),
|
||||
NewIndex: int32(c.NewIndex),
|
||||
})
|
||||
}
|
||||
snaps = append(snaps, s)
|
||||
}
|
||||
return snaps, nil
|
||||
}
|
||||
|
||||
func docRefFromPath(p string, c *Client) *DocumentRef {
|
||||
return &DocumentRef{
|
||||
Path: p,
|
||||
ID: path.Base(p),
|
||||
Parent: &CollectionRef{c: c},
|
||||
}
|
||||
}
|
||||
|
||||
func convertJSONValue(jv string) (interface{}, error) {
|
||||
var val interface{}
|
||||
if err := json.Unmarshal([]byte(jv), &val); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return convertTestValue(val), nil
|
||||
}
|
||||
|
||||
func convertData(jsonData string) (map[string]interface{}, error) {
|
||||
var m map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(jsonData), &m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return convertTestMap(m), nil
|
||||
}
|
||||
|
||||
func convertTestMap(m map[string]interface{}) map[string]interface{} {
|
||||
for k, v := range m {
|
||||
m[k] = convertTestValue(v)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func convertTestValue(v interface{}) interface{} {
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
switch v {
|
||||
case "ServerTimestamp":
|
||||
return ServerTimestamp
|
||||
case "Delete":
|
||||
return Delete
|
||||
case "NaN":
|
||||
return math.NaN()
|
||||
default:
|
||||
return v
|
||||
}
|
||||
case float64:
|
||||
if v == float64(int(v)) {
|
||||
return int(v)
|
||||
}
|
||||
return v
|
||||
case []interface{}:
|
||||
for i, e := range v {
|
||||
v[i] = convertTestValue(e)
|
||||
}
|
||||
return v
|
||||
case map[string]interface{}:
|
||||
return convertTestMap(v)
|
||||
default:
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
func convertSetOption(opt *pb.SetOption) SetOption {
|
||||
if opt.All {
|
||||
return MergeAll
|
||||
}
|
||||
return Merge(convertFieldPaths(opt.Fields)...)
|
||||
}
|
||||
|
||||
func convertFieldPaths(fps []*pb.FieldPath) []FieldPath {
|
||||
var res []FieldPath
|
||||
for _, fp := range fps {
|
||||
res = append(res, fp.Field)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func convertPrecondition(t *testing.T, fp *fspb.Precondition) []Precondition {
|
||||
if fp == nil {
|
||||
return nil
|
||||
}
|
||||
var pc Precondition
|
||||
switch fp := fp.ConditionType.(type) {
|
||||
case *fspb.Precondition_Exists:
|
||||
pc = exists(fp.Exists)
|
||||
case *fspb.Precondition_UpdateTime:
|
||||
tm, err := ptypes.Timestamp(fp.UpdateTime)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pc = LastUpdateTime(tm)
|
||||
default:
|
||||
t.Fatalf("unknown precondition type %T", fp)
|
||||
}
|
||||
return []Precondition{pc}
|
||||
}
|
||||
|
||||
func convertQuery(t *testing.T, qt *pb.QueryTest) Query {
|
||||
parts := strings.Split(qt.CollPath, "/")
|
||||
q := Query{
|
||||
parentPath: strings.Join(parts[:len(parts)-2], "/"),
|
||||
collectionID: parts[len(parts)-1],
|
||||
}
|
||||
for _, c := range qt.Clauses {
|
||||
switch c := c.Clause.(type) {
|
||||
case *pb.Clause_Select:
|
||||
q = q.SelectPaths(convertFieldPaths(c.Select.Fields)...)
|
||||
case *pb.Clause_OrderBy:
|
||||
var dir Direction
|
||||
switch c.OrderBy.Direction {
|
||||
case "asc":
|
||||
dir = Asc
|
||||
case "desc":
|
||||
dir = Desc
|
||||
default:
|
||||
t.Fatalf("bad direction: %q", c.OrderBy.Direction)
|
||||
}
|
||||
q = q.OrderByPath(FieldPath(c.OrderBy.Path.Field), dir)
|
||||
case *pb.Clause_Where:
|
||||
val, err := convertJSONValue(c.Where.JsonValue)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
q = q.WherePath(FieldPath(c.Where.Path.Field), c.Where.Op, val)
|
||||
case *pb.Clause_Offset:
|
||||
q = q.Offset(int(c.Offset))
|
||||
case *pb.Clause_Limit:
|
||||
q = q.Limit(int(c.Limit))
|
||||
case *pb.Clause_StartAt:
|
||||
q = q.StartAt(convertCursor(t, c.StartAt)...)
|
||||
case *pb.Clause_StartAfter:
|
||||
q = q.StartAfter(convertCursor(t, c.StartAfter)...)
|
||||
case *pb.Clause_EndAt:
|
||||
q = q.EndAt(convertCursor(t, c.EndAt)...)
|
||||
case *pb.Clause_EndBefore:
|
||||
q = q.EndBefore(convertCursor(t, c.EndBefore)...)
|
||||
default:
|
||||
t.Fatalf("bad clause type %T", c)
|
||||
}
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
// Returns args to a cursor method (StartAt, etc.).
|
||||
func convertCursor(t *testing.T, c *pb.Cursor) []interface{} {
|
||||
if c.DocSnapshot != nil {
|
||||
ds, err := convertDocSnapshot(c.DocSnapshot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return []interface{}{ds}
|
||||
}
|
||||
var vals []interface{}
|
||||
for _, jv := range c.JsonValues {
|
||||
v, err := convertJSONValue(jv)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
vals = append(vals, v)
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
func convertDocSnapshot(ds *pb.DocSnapshot) (*DocumentSnapshot, error) {
|
||||
data, err := convertData(ds.JsonData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
doc, transformPaths, err := toProtoDocument(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(transformPaths) > 0 {
|
||||
return nil, errors.New("saw transform paths in DocSnapshot")
|
||||
}
|
||||
return &DocumentSnapshot{
|
||||
Ref: &DocumentRef{
|
||||
Path: ds.Path,
|
||||
Parent: &CollectionRef{Path: path.Dir(ds.Path)},
|
||||
},
|
||||
proto: doc,
|
||||
}, nil
|
||||
}
|
||||
357
vendor/cloud.google.com/go/firestore/cross_language_test.go
generated
vendored
357
vendor/cloud.google.com/go/firestore/cross_language_test.go
generated
vendored
@@ -1,357 +0,0 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// A runner for the cross-language tests.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
pb "cloud.google.com/go/firestore/genproto"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
ts "github.com/golang/protobuf/ptypes/timestamp"
|
||||
"golang.org/x/net/context"
|
||||
fspb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
)
|
||||
|
||||
func TestCrossLanguageTests(t *testing.T) {
|
||||
const dir = "testdata"
|
||||
fis, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n := 0
|
||||
for _, fi := range fis {
|
||||
if strings.HasSuffix(fi.Name(), ".textproto") {
|
||||
runTestFromFile(t, filepath.Join(dir, fi.Name()))
|
||||
n++
|
||||
}
|
||||
}
|
||||
t.Logf("ran %d cross-language tests", n)
|
||||
}
|
||||
|
||||
func runTestFromFile(t *testing.T, filename string) {
|
||||
bytes, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %v", filename, err)
|
||||
}
|
||||
var test pb.Test
|
||||
if err := proto.UnmarshalText(string(bytes), &test); err != nil {
|
||||
t.Fatalf("unmarshalling %s: %v", filename, err)
|
||||
}
|
||||
msg := fmt.Sprintf("%s (file %s)", test.Description, filepath.Base(filename))
|
||||
runTest(t, msg, &test)
|
||||
}
|
||||
|
||||
func runTest(t *testing.T, msg string, test *pb.Test) {
|
||||
check := func(gotErr error, wantErr bool) bool {
|
||||
if wantErr && gotErr == nil {
|
||||
t.Errorf("%s: got nil, want error", msg)
|
||||
return false
|
||||
} else if !wantErr && gotErr != nil {
|
||||
t.Errorf("%s: %v", msg, gotErr)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
c, srv := newMock(t)
|
||||
switch tt := test.Test.(type) {
|
||||
case *pb.Test_Get:
|
||||
req := &fspb.BatchGetDocumentsRequest{
|
||||
Database: c.path(),
|
||||
Documents: []string{tt.Get.DocRefPath},
|
||||
}
|
||||
srv.addRPC(req, []interface{}{
|
||||
&fspb.BatchGetDocumentsResponse{
|
||||
Result: &fspb.BatchGetDocumentsResponse_Found{&fspb.Document{
|
||||
Name: tt.Get.DocRefPath,
|
||||
CreateTime: &ts.Timestamp{},
|
||||
UpdateTime: &ts.Timestamp{},
|
||||
}},
|
||||
ReadTime: &ts.Timestamp{},
|
||||
},
|
||||
})
|
||||
ref := docRefFromPath(tt.Get.DocRefPath, c)
|
||||
_, err := ref.Get(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("%s: %v", msg, err)
|
||||
return
|
||||
}
|
||||
// Checking response would just be testing the function converting a Document
|
||||
// proto to a DocumentSnapshot, hence uninteresting.
|
||||
|
||||
case *pb.Test_Create:
|
||||
srv.addRPC(tt.Create.Request, commitResponseForSet)
|
||||
ref := docRefFromPath(tt.Create.DocRefPath, c)
|
||||
data, err := convertData(tt.Create.JsonData)
|
||||
if err != nil {
|
||||
t.Errorf("%s: %v", msg, err)
|
||||
return
|
||||
}
|
||||
_, err = ref.Create(ctx, data)
|
||||
check(err, tt.Create.IsError)
|
||||
|
||||
case *pb.Test_Set:
|
||||
srv.addRPC(tt.Set.Request, commitResponseForSet)
|
||||
ref := docRefFromPath(tt.Set.DocRefPath, c)
|
||||
data, err := convertData(tt.Set.JsonData)
|
||||
if err != nil {
|
||||
t.Errorf("%s: %v", msg, err)
|
||||
return
|
||||
}
|
||||
var opts []SetOption
|
||||
if tt.Set.Option != nil {
|
||||
opts = []SetOption{convertSetOption(tt.Set.Option)}
|
||||
}
|
||||
_, err = ref.Set(ctx, data, opts...)
|
||||
check(err, tt.Set.IsError)
|
||||
|
||||
case *pb.Test_Update:
|
||||
// Ignore Update test because we only support UpdatePaths.
|
||||
// Not to worry, every Update test has a corresponding UpdatePaths test.
|
||||
|
||||
case *pb.Test_UpdatePaths:
|
||||
srv.addRPC(tt.UpdatePaths.Request, commitResponseForSet)
|
||||
ref := docRefFromPath(tt.UpdatePaths.DocRefPath, c)
|
||||
preconds := convertPrecondition(t, tt.UpdatePaths.Precondition)
|
||||
paths := convertFieldPaths(tt.UpdatePaths.FieldPaths)
|
||||
var ups []Update
|
||||
for i, path := range paths {
|
||||
val, err := convertJSONValue(tt.UpdatePaths.JsonValues[i])
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %v", msg, err)
|
||||
}
|
||||
ups = append(ups, Update{
|
||||
FieldPath: path,
|
||||
Value: val,
|
||||
})
|
||||
}
|
||||
_, err := ref.Update(ctx, ups, preconds...)
|
||||
check(err, tt.UpdatePaths.IsError)
|
||||
|
||||
case *pb.Test_Delete:
|
||||
srv.addRPC(tt.Delete.Request, commitResponseForSet)
|
||||
ref := docRefFromPath(tt.Delete.DocRefPath, c)
|
||||
preconds := convertPrecondition(t, tt.Delete.Precondition)
|
||||
_, err := ref.Delete(ctx, preconds...)
|
||||
check(err, tt.Delete.IsError)
|
||||
|
||||
case *pb.Test_Query:
|
||||
q := convertQuery(t, tt.Query)
|
||||
got, err := q.toProto()
|
||||
if check(err, tt.Query.IsError) && err == nil {
|
||||
if want := tt.Query.Query; !proto.Equal(got, want) {
|
||||
t.Errorf("%s\ngot: %s\nwant: %s", msg, proto.MarshalTextString(got), proto.MarshalTextString(want))
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
t.Fatalf("unknown test type %T", tt)
|
||||
}
|
||||
}
|
||||
|
||||
func docRefFromPath(p string, c *Client) *DocumentRef {
|
||||
return &DocumentRef{
|
||||
Path: p,
|
||||
ID: path.Base(p),
|
||||
Parent: &CollectionRef{c: c},
|
||||
}
|
||||
}
|
||||
|
||||
func convertJSONValue(jv string) (interface{}, error) {
|
||||
var val interface{}
|
||||
if err := json.Unmarshal([]byte(jv), &val); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return convertTestValue(val), nil
|
||||
}
|
||||
|
||||
func convertData(jsonData string) (map[string]interface{}, error) {
|
||||
var m map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(jsonData), &m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return convertTestMap(m), nil
|
||||
}
|
||||
|
||||
func convertTestMap(m map[string]interface{}) map[string]interface{} {
|
||||
for k, v := range m {
|
||||
m[k] = convertTestValue(v)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func convertTestValue(v interface{}) interface{} {
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
switch v {
|
||||
case "ServerTimestamp":
|
||||
return ServerTimestamp
|
||||
case "Delete":
|
||||
return Delete
|
||||
case "NaN":
|
||||
return math.NaN()
|
||||
default:
|
||||
return v
|
||||
}
|
||||
case float64:
|
||||
if v == float64(int(v)) {
|
||||
return int(v)
|
||||
}
|
||||
return v
|
||||
case []interface{}:
|
||||
for i, e := range v {
|
||||
v[i] = convertTestValue(e)
|
||||
}
|
||||
return v
|
||||
case map[string]interface{}:
|
||||
return convertTestMap(v)
|
||||
default:
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
func convertSetOption(opt *pb.SetOption) SetOption {
|
||||
if opt.All {
|
||||
return MergeAll
|
||||
}
|
||||
return Merge(convertFieldPaths(opt.Fields)...)
|
||||
}
|
||||
|
||||
func convertFieldPaths(fps []*pb.FieldPath) []FieldPath {
|
||||
var res []FieldPath
|
||||
for _, fp := range fps {
|
||||
res = append(res, fp.Field)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func convertPrecondition(t *testing.T, fp *fspb.Precondition) []Precondition {
|
||||
if fp == nil {
|
||||
return nil
|
||||
}
|
||||
var pc Precondition
|
||||
switch fp := fp.ConditionType.(type) {
|
||||
case *fspb.Precondition_Exists:
|
||||
pc = exists(fp.Exists)
|
||||
case *fspb.Precondition_UpdateTime:
|
||||
tm, err := ptypes.Timestamp(fp.UpdateTime)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pc = LastUpdateTime(tm)
|
||||
default:
|
||||
t.Fatalf("unknown precondition type %T", fp)
|
||||
}
|
||||
return []Precondition{pc}
|
||||
}
|
||||
|
||||
func convertQuery(t *testing.T, qt *pb.QueryTest) Query {
|
||||
parts := strings.Split(qt.CollPath, "/")
|
||||
q := Query{
|
||||
parentPath: strings.Join(parts[:len(parts)-2], "/"),
|
||||
collectionID: parts[len(parts)-1],
|
||||
}
|
||||
for _, c := range qt.Clauses {
|
||||
switch c := c.Clause.(type) {
|
||||
case *pb.Clause_Select:
|
||||
q = q.SelectPaths(convertFieldPaths(c.Select.Fields)...)
|
||||
case *pb.Clause_OrderBy:
|
||||
var dir Direction
|
||||
switch c.OrderBy.Direction {
|
||||
case "asc":
|
||||
dir = Asc
|
||||
case "desc":
|
||||
dir = Desc
|
||||
default:
|
||||
t.Fatalf("bad direction: %q", c.OrderBy.Direction)
|
||||
}
|
||||
q = q.OrderByPath(FieldPath(c.OrderBy.Path.Field), dir)
|
||||
case *pb.Clause_Where:
|
||||
val, err := convertJSONValue(c.Where.JsonValue)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
q = q.WherePath(FieldPath(c.Where.Path.Field), c.Where.Op, val)
|
||||
case *pb.Clause_Offset:
|
||||
q = q.Offset(int(c.Offset))
|
||||
case *pb.Clause_Limit:
|
||||
q = q.Limit(int(c.Limit))
|
||||
case *pb.Clause_StartAt:
|
||||
q = q.StartAt(convertCursor(t, c.StartAt)...)
|
||||
case *pb.Clause_StartAfter:
|
||||
q = q.StartAfter(convertCursor(t, c.StartAfter)...)
|
||||
case *pb.Clause_EndAt:
|
||||
q = q.EndAt(convertCursor(t, c.EndAt)...)
|
||||
case *pb.Clause_EndBefore:
|
||||
q = q.EndBefore(convertCursor(t, c.EndBefore)...)
|
||||
default:
|
||||
t.Fatalf("bad clause type %T", c)
|
||||
}
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
// Returns args to a cursor method (StartAt, etc.).
|
||||
func convertCursor(t *testing.T, c *pb.Cursor) []interface{} {
|
||||
if c.DocSnapshot != nil {
|
||||
ds, err := convertDocSnapshot(c.DocSnapshot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return []interface{}{ds}
|
||||
}
|
||||
var vals []interface{}
|
||||
for _, jv := range c.JsonValues {
|
||||
v, err := convertJSONValue(jv)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
vals = append(vals, v)
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
func convertDocSnapshot(ds *pb.DocSnapshot) (*DocumentSnapshot, error) {
|
||||
data, err := convertData(ds.JsonData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
doc, transformPaths, err := toProtoDocument(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(transformPaths) > 0 {
|
||||
return nil, errors.New("saw transform paths in DocSnapshot")
|
||||
}
|
||||
return &DocumentSnapshot{
|
||||
Ref: &DocumentRef{
|
||||
Path: ds.Path,
|
||||
Parent: &CollectionRef{Path: path.Dir(ds.Path)},
|
||||
},
|
||||
proto: doc,
|
||||
}, nil
|
||||
}
|
||||
8
vendor/cloud.google.com/go/firestore/doc.go
generated
vendored
8
vendor/cloud.google.com/go/firestore/doc.go
generated
vendored
@@ -21,6 +21,9 @@ database.
|
||||
See https://cloud.google.com/firestore/docs for an introduction
|
||||
to Cloud Firestore and additional help on using the Firestore API.
|
||||
|
||||
See https://godoc.org/cloud.google.com/go for authentication, timeouts,
|
||||
connection pooling and similar aspects of this package.
|
||||
|
||||
Note: you can't use both Cloud Firestore and Cloud Datastore in the same
|
||||
project.
|
||||
|
||||
@@ -209,10 +212,5 @@ read and write methods of the Transaction passed to it.
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Authentication
|
||||
|
||||
See examples of authorization and authentication at
|
||||
https://godoc.org/cloud.google.com/go#pkg-examples.
|
||||
*/
|
||||
package firestore
|
||||
|
||||
18
vendor/cloud.google.com/go/firestore/docref.go
generated
vendored
18
vendor/cloud.google.com/go/firestore/docref.go
generated
vendored
@@ -96,9 +96,11 @@ func (d *DocumentRef) Get(ctx context.Context) (*DocumentSnapshot, error) {
|
||||
// is the underlying type of a Integer.
|
||||
// - float32 and float64 convert to Double.
|
||||
// - []byte converts to Bytes.
|
||||
// - time.Time converts to Timestamp.
|
||||
// - latlng.LatLng converts to GeoPoint. latlng is the package
|
||||
// "google.golang.org/genproto/googleapis/type/latlng".
|
||||
// - time.Time and *ts.Timestamp convert to Timestamp. ts is the package
|
||||
// "github.com/golang/protobuf/ptypes/timestamp".
|
||||
// - *latlng.LatLng converts to GeoPoint. latlng is the package
|
||||
// "google.golang.org/genproto/googleapis/type/latlng". You should always use
|
||||
// a pointer to a LatLng.
|
||||
// - Slices convert to Array.
|
||||
// - Maps and structs convert to Map.
|
||||
// - nils of any type convert to Null.
|
||||
@@ -182,6 +184,10 @@ func (d *DocumentRef) newSetWrites(data interface{}, opts []SetOption) ([]*pb.Wr
|
||||
if v.Kind() != reflect.Map {
|
||||
return nil, errors.New("firestore: MergeAll can only be specified with map data")
|
||||
}
|
||||
if v.Len() == 0 {
|
||||
// Special case: MergeAll with an empty map.
|
||||
return d.newUpdateWithTransform(&pb.Document{Name: d.Path}, []FieldPath{}, nil, nil, true), nil
|
||||
}
|
||||
fpvsFromData(v, nil, &fpvs)
|
||||
} else {
|
||||
// Set with merge paths. Collect only the values at the given paths.
|
||||
@@ -218,6 +224,10 @@ func fpvsFromData(v reflect.Value, prefix FieldPath, fpvs *[]fpv) {
|
||||
// removePathsIf creates a new slice of FieldPaths that contains
|
||||
// exactly those elements of fps for which pred returns false.
|
||||
func removePathsIf(fps []FieldPath, pred func(FieldPath) bool) []FieldPath {
|
||||
// Return fps if it's empty to preserve the distinction betweeen nil and zero-length.
|
||||
if len(fps) == 0 {
|
||||
return fps
|
||||
}
|
||||
var result []FieldPath
|
||||
for _, fp := range fps {
|
||||
if !pred(fp) {
|
||||
@@ -344,7 +354,7 @@ func (d *DocumentRef) newUpdateWithTransform(doc *pb.Document, updatePaths []Fie
|
||||
if updateOnEmpty || len(doc.Fields) > 0 ||
|
||||
len(updatePaths) > 0 || (pc != nil && len(serverTimestampPaths) == 0) {
|
||||
var mask *pb.DocumentMask
|
||||
if len(updatePaths) > 0 {
|
||||
if updatePaths != nil {
|
||||
sfps := toServiceFieldPaths(updatePaths)
|
||||
sort.Strings(sfps) // TODO(jba): make tests pass without this
|
||||
mask = &pb.DocumentMask{FieldPaths: sfps}
|
||||
|
||||
6
vendor/cloud.google.com/go/firestore/docref_test.go
generated
vendored
6
vendor/cloud.google.com/go/firestore/docref_test.go
generated
vendored
@@ -89,7 +89,7 @@ func TestDocGet(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDocSet(t *testing.T) {
|
||||
// Most tests for Set are in the cross-language tests.
|
||||
// Most tests for Set are in the conformance tests.
|
||||
ctx := context.Background()
|
||||
c, srv := newMock(t)
|
||||
|
||||
@@ -134,7 +134,7 @@ func TestDocSet(t *testing.T) {
|
||||
func TestDocCreate(t *testing.T) {
|
||||
// Verify creation with structs. In particular, make sure zero values
|
||||
// are handled well.
|
||||
// Other tests for Create are handled by the cross-language tests.
|
||||
// Other tests for Create are handled by the conformance tests.
|
||||
ctx := context.Background()
|
||||
c, srv := newMock(t)
|
||||
|
||||
@@ -199,7 +199,7 @@ var (
|
||||
testFields = map[string]*pb.Value{"a": intval(1)}
|
||||
)
|
||||
|
||||
// Update is tested by the cross-language tests.
|
||||
// Update is tested by the conformance tests.
|
||||
|
||||
func TestFPVsFromData(t *testing.T) {
|
||||
type S struct{ X int }
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user