1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-03 17:13:18 +00:00

vendor: update all dependencies to latest versions

This commit is contained in:
Nick Craig-Wood
2018-01-16 13:20:59 +00:00
parent 8e83fb6fb9
commit 7d3a17725d
4878 changed files with 1974229 additions and 201215 deletions

246
Gopkg.lock generated
View File

@@ -4,26 +4,35 @@
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "bazil.org/fuse" name = "bazil.org/fuse"
packages = [".","fs","fuseutil"] packages = [
".",
"fs",
"fuseutil"
]
revision = "371fbbdaa8987b715bdd21d6adc4c9b20155f748" revision = "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
[[projects]] [[projects]]
name = "cloud.google.com/go" name = "cloud.google.com/go"
packages = ["compute/metadata"] packages = ["compute/metadata"]
revision = "f6de2c509ed9d2af648c3c147207eaaf97149aed" revision = "050b16d2314d5fc3d4c9a51e4cd5c7468e77f162"
version = "v0.14.0" version = "v0.17.0"
[[projects]] [[projects]]
name = "github.com/Azure/azure-sdk-for-go" name = "github.com/Azure/azure-sdk-for-go"
packages = ["storage"] packages = ["storage"]
revision = "2592daf71ab6b95dcfc7f7437ecc1afb9ddb7360" revision = "eae258195456be76b2ec9ad2ee2ab63cdda365d9"
version = "v11.0.0-beta" version = "v12.2.0-beta"
[[projects]] [[projects]]
name = "github.com/Azure/go-autorest" name = "github.com/Azure/go-autorest"
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"] packages = [
revision = "f6be1abbb5abd0517522f850dd785990d373da7e" "autorest",
version = "v8.4.0" "autorest/adal",
"autorest/azure",
"autorest/date"
]
revision = "6311d7a76f54cf2b6dea03d737d9bd9a6022ac5f"
version = "v9.7.1"
[[projects]] [[projects]]
branch = "master" branch = "master"
@@ -41,25 +50,53 @@
branch = "master" branch = "master"
name = "github.com/a8m/tree" name = "github.com/a8m/tree"
packages = ["."] packages = ["."]
revision = "5554ed4554293f11a726accc1ebf2bd3342742f8" revision = "cf42b1e486f0b025942a768a9ad59c9939d6ca40"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/aws/aws-sdk-go" name = "github.com/aws/aws-sdk-go"
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/shareddefaults","private/protocol","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/s3/s3iface","service/s3/s3manager","service/sts"] packages = [
revision = "5a2026bfb28e86839f9fcc46523850319399006c" "aws",
"aws/awserr",
"aws/awsutil",
"aws/client",
"aws/client/metadata",
"aws/corehandlers",
"aws/credentials",
"aws/credentials/ec2rolecreds",
"aws/credentials/endpointcreds",
"aws/credentials/stscreds",
"aws/defaults",
"aws/ec2metadata",
"aws/endpoints",
"aws/request",
"aws/session",
"aws/signer/v4",
"internal/shareddefaults",
"private/protocol",
"private/protocol/query",
"private/protocol/query/queryutil",
"private/protocol/rest",
"private/protocol/restxml",
"private/protocol/xml/xmlutil",
"service/s3",
"service/s3/s3iface",
"service/s3/s3manager",
"service/sts"
]
revision = "2fe57096de348e6cff4031af99254613f8ef73ea"
[[projects]] [[projects]]
name = "github.com/billziss-gh/cgofuse" name = "github.com/billziss-gh/cgofuse"
packages = ["fuse"] packages = ["fuse"]
revision = "3a24389863c5bf906de391226ee8c4ec2c925bfe" revision = "487e2baa5611bab252a906d7f9b869f944607305"
version = "v1.0.3" version = "v1.0.4"
[[projects]] [[projects]]
branch = "master"
name = "github.com/coreos/bbolt" name = "github.com/coreos/bbolt"
packages = ["."] packages = ["."]
revision = "a148de800f91fe6cbd8b2a472bbfdc09c4b6568f" revision = "48ea1b39c25fc1bab3506fbc712ecbaa842c4d2d"
version = "v1.3.1-coreos.6"
[[projects]] [[projects]]
name = "github.com/cpuguy83/go-md2man" name = "github.com/cpuguy83/go-md2man"
@@ -76,8 +113,8 @@
[[projects]] [[projects]]
name = "github.com/dgrijalva/jwt-go" name = "github.com/dgrijalva/jwt-go"
packages = ["."] packages = ["."]
revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c" revision = "dbeaa9332f19a944acb5736b4456cfcc02140e29"
version = "v3.0.0" version = "v3.1.0"
[[projects]] [[projects]]
name = "github.com/djherbis/times" name = "github.com/djherbis/times"
@@ -88,20 +125,25 @@
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/dropbox/dropbox-sdk-go-unofficial" name = "github.com/dropbox/dropbox-sdk-go-unofficial"
packages = ["dropbox","dropbox/async","dropbox/file_properties","dropbox/files"] packages = [
revision = "9befe8c89b6e17667716dedd2f62327bae374bf2" "dropbox",
"dropbox/async",
"dropbox/file_properties",
"dropbox/files"
]
revision = "9c27e83ceccc8f8bbc9afdc17c50798529d608b1"
[[projects]] [[projects]]
name = "github.com/go-ini/ini" name = "github.com/go-ini/ini"
packages = ["."] packages = ["."]
revision = "20b96f641a5ea98f2f8619ff4f3e061cff4833bd" revision = "32e4c1e6bc4e7d0d8451aa6b75200d19e37a536a"
version = "v1.28.2" version = "v1.32.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/golang/protobuf" name = "github.com/golang/protobuf"
packages = ["proto"] packages = ["proto"]
revision = "130e6b02ab059e7b717a096f397c5b60111cae74" revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
[[projects]] [[projects]]
branch = "master" branch = "master"
@@ -119,7 +161,7 @@
branch = "master" branch = "master"
name = "github.com/jlaffaye/ftp" name = "github.com/jlaffaye/ftp"
packages = ["."] packages = ["."]
revision = "299b7ff5b6096588cceca2edc1fc9f557002fb85" revision = "83891dbe0099af272b7f8d094427215a09b5fd0f"
[[projects]] [[projects]]
name = "github.com/jmespath/go-jmespath" name = "github.com/jmespath/go-jmespath"
@@ -132,6 +174,11 @@
packages = ["."] packages = ["."]
revision = "2788f0dbd16903de03cb8186e5c7d97b69ad387b" revision = "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
[[projects]]
name = "github.com/marstr/guid"
packages = ["."]
revision = "8bdf7d1a087ccc975cf37dd6507da50698fd19ca"
[[projects]] [[projects]]
name = "github.com/mattn/go-runewidth" name = "github.com/mattn/go-runewidth"
packages = ["."] packages = ["."]
@@ -142,19 +189,19 @@
branch = "master" branch = "master"
name = "github.com/ncw/go-acd" name = "github.com/ncw/go-acd"
packages = ["."] packages = ["."]
revision = "96a49aad3fc3889629f2eceb004927386884bd92" revision = "887eb06ab6a255fbf5744b5812788e884078620a"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/ncw/swift" name = "github.com/ncw/swift"
packages = ["."] packages = ["."]
revision = "c95c6e5c2d1a3d37fc44c8c6dc9e231c7500667d" revision = "ae9f0ea1605b9aa6434ed5c731ca35d83ba67c55"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/nsf/termbox-go" name = "github.com/nsf/termbox-go"
packages = ["."] packages = ["."]
revision = "4ed959e0540971545eddb8c75514973d670cf739" revision = "8c5e0793e04afcda7fe23d0751791e7321df4265"
[[projects]] [[projects]]
branch = "master" branch = "master"
@@ -163,28 +210,34 @@
revision = "ed8ca104421a21947710335006107540e3ecb335" revision = "ed8ca104421a21947710335006107540e3ecb335"
[[projects]] [[projects]]
branch = "master"
name = "github.com/patrickmn/go-cache" name = "github.com/patrickmn/go-cache"
packages = ["."] packages = ["."]
revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0" revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0"
version = "v2.1.0"
[[projects]] [[projects]]
name = "github.com/pengsrc/go-shared" name = "github.com/pengsrc/go-shared"
packages = ["check","convert","json","yaml"] packages = [
revision = "454950d6a0782c34427d4f29b46c6bf447256f20" "buffer",
version = "v0.0.8" "check",
"convert",
"log",
"reopen"
]
revision = "b98065a377794d577e2a0e32869378b9ce4b8952"
version = "v0.1.1"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/pkg/errors" name = "github.com/pkg/errors"
packages = ["."] packages = ["."]
revision = "2b3a18b5f0fb6b4f9190549597d3f962c02bc5eb" revision = "e881fd58d78e04cf6d0de1217f8707c8cc2249bc"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/pkg/sftp" name = "github.com/pkg/sftp"
packages = ["."] packages = ["."]
revision = "7c1f7a370726a2457b33b29baefc2402b4965c65" revision = "72ec6e85598d2480c30f633c154b07b6c112eade"
[[projects]] [[projects]]
name = "github.com/pmezard/go-difflib" name = "github.com/pmezard/go-difflib"
@@ -196,7 +249,7 @@
branch = "master" branch = "master"
name = "github.com/rfjakob/eme" name = "github.com/rfjakob/eme"
packages = ["."] packages = ["."]
revision = "7c8316a9cb0a6af865265f899f5de6aadb31a24b" revision = "2222dbd4ba467ab3fc7e8af41562fcfe69c0d770"
[[projects]] [[projects]]
name = "github.com/russross/blackfriday" name = "github.com/russross/blackfriday"
@@ -205,16 +258,10 @@
version = "v1.5" version = "v1.5"
[[projects]] [[projects]]
name = "github.com/satori/uuid" name = "github.com/satori/go.uuid"
packages = ["."] packages = ["."]
revision = "879c5887cd475cd7864858769793b2ceb0d44feb" revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
version = "v1.1.0" version = "v1.2.0"
[[projects]]
name = "github.com/sirupsen/logrus"
packages = ["."]
revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e"
version = "v1.0.3"
[[projects]] [[projects]]
branch = "master" branch = "master"
@@ -225,20 +272,26 @@
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/spf13/cobra" name = "github.com/spf13/cobra"
packages = [".","doc"] packages = [
revision = "e5f66de850af3302fbe378c8acded2b0fa55472c" ".",
"doc"
]
revision = "0c34d16c3123764e413b9ed982ada58b1c3d53ea"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/spf13/pflag" name = "github.com/spf13/pflag"
packages = ["."] packages = ["."]
revision = "7aff26db30c1be810f9de5038ec5ef96ac41fd7c" revision = "4c012f6dcd9546820e378d0bdda4d8fc772cdfea"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/stretchr/testify" name = "github.com/stretchr/testify"
packages = ["assert","require"] packages = [
revision = "890a5c3458b43e6104ff5da8dfa139d013d77544" "assert",
"require"
]
revision = "87b1dfb5b2fa649f52695dd9eae19abe404a4308"
[[projects]] [[projects]]
branch = "master" branch = "master"
@@ -247,40 +300,87 @@
revision = "ba9c9e33906f58169366275e3450db66139a31a9" revision = "ba9c9e33906f58169366275e3450db66139a31a9"
[[projects]] [[projects]]
branch = "master"
name = "github.com/yunify/qingstor-sdk-go" name = "github.com/yunify/qingstor-sdk-go"
packages = [".","config","logger","request","request/builder","request/data","request/errors","request/signer","request/unpacker","service","utils"] packages = [
revision = "088fbd27bd49adf215d02a05c36c5ac2d243d1f1" ".",
"config",
"logger",
"request",
"request/builder",
"request/data",
"request/errors",
"request/signer",
"request/unpacker",
"service",
"utils"
]
revision = "51fa3b6bb3c24f4d646eefff251cd2e6ba716600"
version = "v2.2.9"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/crypto" name = "golang.org/x/crypto"
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","nacl/secretbox","pbkdf2","poly1305","salsa20/salsa","scrypt","ssh","ssh/agent","ssh/terminal"] packages = [
revision = "76eec36fa14229c4b25bb894c2d0e591527af429" "curve25519",
"ed25519",
"ed25519/internal/edwards25519",
"nacl/secretbox",
"pbkdf2",
"poly1305",
"salsa20/salsa",
"scrypt",
"ssh",
"ssh/agent",
"ssh/terminal"
]
revision = "13931e22f9e72ea58bb73048bc752b48c6d4d4ac"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/net" name = "golang.org/x/net"
packages = ["context","context/ctxhttp","html","html/atom","webdav","webdav/internal/xml"] packages = [
revision = "0a9397675ba34b2845f758fe3cd68828369c6517" "context",
"context/ctxhttp",
"html",
"html/atom",
"webdav",
"webdav/internal/xml"
]
revision = "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/oauth2" name = "golang.org/x/oauth2"
packages = [".","google","internal","jws","jwt"] packages = [
revision = "bb50c06baba3d0c76f9d125c0719093e315b5b44" ".",
"google",
"internal",
"jws",
"jwt"
]
revision = "30785a2c434e431ef7c507b54617d6a951d5f2b4"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/sys" name = "golang.org/x/sys"
packages = ["unix","windows"] packages = [
revision = "314a259e304ff91bd6985da2a7149bbf91237993" "unix",
"windows"
]
revision = "fff93fa7cd278d84afc205751523809c464168ab"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/text" name = "golang.org/x/text"
packages = ["internal/gen","internal/triegen","internal/ucd","transform","unicode/cldr","unicode/norm"] packages = [
revision = "1cbadb444a806fd9430d14ad08967ed91da4fa0a" "internal/gen",
"internal/triegen",
"internal/ucd",
"transform",
"unicode/cldr",
"unicode/norm"
]
revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3"
[[projects]] [[projects]]
branch = "master" branch = "master"
@@ -291,12 +391,30 @@
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "google.golang.org/api" name = "google.golang.org/api"
packages = ["drive/v2","gensupport","googleapi","googleapi/internal/uritemplates","storage/v1"] packages = [
revision = "906273f42cdebd65de3a53f30dd9e23de1b55ba9" "drive/v2",
"gensupport",
"googleapi",
"googleapi/internal/uritemplates",
"storage/v1"
]
revision = "de3aa2cfa7f1c18dcb7f91738099bad280117b8e"
[[projects]] [[projects]]
name = "google.golang.org/appengine" name = "google.golang.org/appengine"
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","log","urlfetch"] packages = [
".",
"internal",
"internal/app_identity",
"internal/base",
"internal/datastore",
"internal/log",
"internal/modules",
"internal/remote_api",
"internal/urlfetch",
"log",
"urlfetch"
]
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a" revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
version = "v1.0.0" version = "v1.0.0"
@@ -304,7 +422,7 @@
branch = "v2" branch = "v2"
name = "gopkg.in/yaml.v2" name = "gopkg.in/yaml.v2"
packages = ["."] packages = ["."]
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f" revision = "d670f9405373e636a5a2765eea47fac0c9bc91a4"
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"

View File

@@ -1,14 +1,19 @@
sudo: false sudo: false
language: go language: go
go: go:
- 1.6 - 1.6.x
- 1.7 - 1.7.x
- 1.8 - 1.8.x
- 1.9.x
install: install:
- go get -v cloud.google.com/go/... - go get -v cloud.google.com/go/...
script: script:
- openssl aes-256-cbc -K $encrypted_a8b3f4fc85f4_key -iv $encrypted_a8b3f4fc85f4_iv -in key.json.enc -out key.json -d - openssl aes-256-cbc -K $encrypted_a8b3f4fc85f4_key -iv $encrypted_a8b3f4fc85f4_iv -in keys.tar.enc -out keys.tar -d
- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json" - tar xvf keys.tar
- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762"
GCLOUD_TESTS_GOLANG_KEY="$(pwd)/dulcet-port-762-key.json"
GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID="gcloud-golang-firestore-tests"
GCLOUD_TESTS_GOLANG_FIRESTORE_KEY="$(pwd)/gcloud-golang-firestore-tests-key.json"
./run-tests.sh $TRAVIS_COMMIT ./run-tests.sh $TRAVIS_COMMIT
env: env:
matrix: matrix:

View File

@@ -31,9 +31,12 @@ To run the integrations tests, creating and configuration of a project in the
Google Developers Console is required. Google Developers Console is required.
After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount). After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount).
Ensure the project-level **Owner** [IAM role](console.cloud.google.com/iam-admin/iam/project) Ensure the project-level **Owner**
(or **Editor** and **Logs Configuration Writer** roles) are added to the [IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to the
service account. service account. Alternatively, the account can be granted all of the following roles:
- **Editor**
- **Logs Configuration Writer**
- **PubSub Admin**
Once you create a project, set the following environment variables to be able to Once you create a project, set the following environment variables to be able to
run the against the actual APIs. run the against the actual APIs.
@@ -42,6 +45,12 @@ run the against the actual APIs.
- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file. - **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.
- **GCLOUD_TESTS_API_KEY**: Your API key. - **GCLOUD_TESTS_API_KEY**: Your API key.
Firestore requires a different project and key:
- **GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID**: Developers Console project's ID
supporting Firestore
- **GCLOUD_TESTS_GOLANG_FIRESTORE_KEY**: The path to the JSON key file.
Install the [gcloud command-line tool][gcloudcli] to your machine and use it Install the [gcloud command-line tool][gcloudcli] to your machine and use it
to create some resources used in integration tests. to create some resources used in integration tests.
@@ -63,10 +72,15 @@ $ gcloud preview datastore create-indexes datastore/testdata/index.yaml
$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID $ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID $ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
# Create a PubSub topic for integration tests of storage notifications.
$ gcloud beta pubsub topics create go-storage-notification-test
# Create a Spanner instance for the spanner integration tests. # Create a Spanner instance for the spanner integration tests.
$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 1 --description 'Instance for go client test' $ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 1 --description 'Instance for go client test'
# NOTE: Spanner instances are priced by the node-hour, so you may want to delete # NOTE: Spanner instances are priced by the node-hour, so you may want to delete
# the instance after testing with 'gcloud beta spanner instances delete'. # the instance after testing with 'gcloud beta spanner instances delete'.
``` ```
Once you've set the environment variables, you can run the integration tests by Once you've set the environment variables, you can run the integration tests by
@@ -82,9 +96,9 @@ Before we can accept your pull requests you'll need to sign a Contributor
License Agreement (CLA): License Agreement (CLA):
- **If you are an individual writing original source code** and **you own the - **If you are an individual writing original source code** and **you own the
- intellectual property**, then you'll need to sign an [individual CLA][indvcla]. intellectual property**, then you'll need to sign an [individual CLA][indvcla].
- **If you work for a company that wants to allow you to contribute your work**, - **If you work for a company that wants to allow you to contribute your
then you'll need to sign a [corporate CLA][corpcla]. work**, then you'll need to sign a [corporate CLA][corpcla].
You can sign these electronically (just scroll to the bottom). After that, You can sign these electronically (just scroll to the bottom). After that,
we'll be able to accept your pull requests. we'll be able to accept your pull requests.

View File

@@ -24,6 +24,7 @@ Glenn Lewis <gmlewis@google.com>
Ingo Oeser <nightlyone@googlemail.com> Ingo Oeser <nightlyone@googlemail.com>
Johan Euphrosine <proppy@google.com> Johan Euphrosine <proppy@google.com>
Jonathan Amsterdam <jba@google.com> Jonathan Amsterdam <jba@google.com>
Kunpei Sakai <namusyaka@gmail.com>
Luna Duclos <luna.duclos@palmstonegames.com> Luna Duclos <luna.duclos@palmstonegames.com>
Magnus Hiie <magnus.hiie@gmail.com> Magnus Hiie <magnus.hiie@gmail.com>
Michael McGreevy <mcgreevy@golang.org> Michael McGreevy <mcgreevy@golang.org>

157
vendor/cloud.google.com/go/README.md generated vendored
View File

@@ -33,110 +33,75 @@ make backwards-incompatible changes.
## News ## News
_September 28, 2017_ _December 11, 2017_
*v0.14.0* *v0.17.0*
- bigquery BREAKING CHANGES: - firestore BREAKING CHANGES:
- Standard SQL is the default for queries and views. - Remove UpdateMap and UpdateStruct; rename UpdatePaths to Update.
- `Table.Create` takes `TableMetadata` as a second argument, instead of Change
options. `docref.UpdateMap(ctx, map[string]interface{}{"a.b", 1})`
- `Dataset.Create` takes `DatasetMetadata` as a second argument. to
- `DatasetMetadata` field `ID` renamed to `FullID` `docref.Update(ctx, []firestore.Update{{Path: "a.b", Value: 1}})`
- `TableMetadata` field `ID` renamed to `FullID`
Change
`docref.UpdateStruct(ctx, []string{"Field"}, aStruct)`
to
`docref.Update(ctx, []firestore.Update{{Path: "Field", Value: aStruct.Field}})`
- Rename MergePaths to Merge; require args to be FieldPaths
- A value stored as an integer can be read into a floating-point field, and vice versa.
- bigtable/cmd/cbt:
- Support deleting a column.
- Add regex option for row read.
- spanner: Mark stable.
- storage:
- Add Reader.ContentEncoding method.
- Fix handling of SignedURL headers.
- bigquery:
- If Uploader.Put is called with no rows, it returns nil without making a
call.
- Schema inference supports the "nullable" option in struct tags for
non-required fields.
- TimePartitioning supports "Field".
_October 30, 2017_
*v0.16.0*
- Other bigquery changes: - Other bigquery changes:
- The client will append a random suffix to a provided job ID if you set - `JobIterator.Next` returns `*Job`; removed `JobInfo` (BREAKING CHANGE).
`AddJobIDSuffix` to true in a job config. - UseStandardSQL is deprecated; set UseLegacySQL to true if you need
- Listing jobs is supported. Legacy SQL.
- Better retry logic. - Uploader.Put will generate a random insert ID if you do not provide one.
- Support time partitioning for load jobs.
- Support dry-run queries.
- A `Job` remembers its last retrieved status.
- Support retrieving job configuration.
- Support labels for jobs and tables.
- Support dataset access lists.
- Improve support for external data sources, including data from Bigtable and
Google Sheets, and tables with external data.
- Support updating a table's view configuration.
- Fix uploading civil times with nanoseconds.
- vision, language, speech: clients are now stable - storage:
- Support PubSub notifications.
- Support Requester Pays buckets.
- monitoring: client is now beta - profiler: Support goroutine and mutex profile types.
- profiler:
- Rename InstanceName to Instance, ZoneName to Zone
- Auto-detect service name and version on AppEngine.
_September 8, 2017_
*v0.13.0*
- bigquery: UseLegacySQL options for CreateTable and QueryConfig. Use these
options to continue using Legacy SQL after the client switches its default
to Standard SQL.
- bigquery: Support for updating dataset labels.
- bigquery: Set DatasetIterator.ProjectID to list datasets in a project other
than the client's. DatasetsInProject is no longer needed and is deprecated.
- bigtable: Fail ListInstances when any zones fail.
- spanner: support decoding of slices of basic types (e.g. []string, []int64,
etc.)
- logging/logadmin: UpdateSink no longer creates a sink if it is missing
(actually a change to the underlying service, not the client)
- profiler: Service and ServiceVersion replace Target in Config.
_August 22, 2017_
*v0.12.0*
- pubsub: Subscription.Receive now uses streaming pull.
- pubsub: add Client.TopicInProject to access topics in a different project
than the client.
- errors: renamed errorreporting. The errors package will be removed shortly.
- datastore: improved retry behavior.
- bigquery: support updates to dataset metadata, with etags.
- bigquery: add etag support to Table.Update (BREAKING: etag argument added).
- bigquery: generate all job IDs on the client.
- storage: support bucket lifecycle configurations.
_July 31, 2017_ _October 3, 2017_
*v0.11.0* *v0.15.0*
- Clients for spanner, pubsub and video are now in beta. - firestore: beta release. See the
[announcement](https://firebase.googleblog.com/2017/10/introducing-cloud-firestore.html).
- New client for DLP. - errorreporting: The existing package has been redesigned.
- spanner: performance and testing improvements. - errors: This package has been removed. Use errorreporting.
- storage: requester-pays buckets are supported.
- storage, profiler, bigtable, bigquery: bug fixes and other minor improvements.
- pubsub: bug fixes and other minor improvements
_June 17, 2017_
*v0.10.0*
- pubsub: Subscription.ModifyPushConfig replaced with Subscription.Update.
- pubsub: Subscription.Receive now runs concurrently for higher throughput.
- vision: cloud.google.com/go/vision is deprecated. Use
cloud.google.com/go/vision/apiv1 instead.
- translation: now stable.
- trace: several changes to the surface. See the link below.
[Code changes required from v0.9.0.](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/MIGRATION.md)
[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md) [Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md)
@@ -146,6 +111,7 @@ cloud.google.com/go/vision/apiv1 instead.
Google API | Status | Package Google API | Status | Package
---------------------------------|--------------|----------------------------------------------------------- ---------------------------------|--------------|-----------------------------------------------------------
[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref] [Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
[Firestore][cloud-firestore] | beta | [`cloud.google.com/go/firestore`][cloud-firestore-ref]
[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref] [Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref]
[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref] [Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
[BigQuery][cloud-bigquery] | beta | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref] [BigQuery][cloud-bigquery] | beta | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
@@ -155,7 +121,7 @@ Google API | Status | Package
[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref] [Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref]
[Language][cloud-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-language-ref] [Language][cloud-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref] [Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
[Spanner][cloud-spanner] | beta | [`cloud.google.com/go/spanner`][cloud-spanner-ref] [Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref] [Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref]
[Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace`][cloud-trace-ref] [Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace`][cloud-trace-ref]
[Video Intelligence][cloud-video]| beta | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref] [Video Intelligence][cloud-video]| beta | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref]
@@ -480,6 +446,11 @@ for more information.
[cloud-datastore-docs]: https://cloud.google.com/datastore/docs [cloud-datastore-docs]: https://cloud.google.com/datastore/docs
[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate [cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate
[cloud-firestore]: https://cloud.google.com/firestore/
[cloud-firestore-ref]: https://godoc.org/cloud.google.com/go/firestore
[cloud-firestore-docs]: https://cloud.google.com/firestore/docs
[cloud-firestore-activation]: https://cloud.google.com/firestore/docs/activate
[cloud-pubsub]: https://cloud.google.com/pubsub/ [cloud-pubsub]: https://cloud.google.com/pubsub/
[cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub [cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub
[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs [cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs

View File

@@ -0,0 +1,8 @@
# BigQuery Benchmark
This directory contains benchmarks for BigQuery client.
## Usage
`go run bench.go -- <your project id> queries.json`
BigQuery service caches requests so the benchmark should be run
at least twice, disregarding the first result.

View File

@@ -0,0 +1,85 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//+build ignore
package main
import (
"encoding/json"
"flag"
"io/ioutil"
"log"
"time"
"cloud.google.com/go/bigquery"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
)
func main() {
flag.Parse()
ctx := context.Background()
c, err := bigquery.NewClient(ctx, flag.Arg(0))
if err != nil {
log.Fatal(err)
}
queriesJSON, err := ioutil.ReadFile(flag.Arg(1))
if err != nil {
log.Fatal(err)
}
var queries []string
if err := json.Unmarshal(queriesJSON, &queries); err != nil {
log.Fatal(err)
}
for _, q := range queries {
doQuery(ctx, c, q)
}
}
func doQuery(ctx context.Context, c *bigquery.Client, qt string) {
startTime := time.Now()
q := c.Query(qt)
it, err := q.Read(ctx)
if err != nil {
log.Fatal(err)
}
numRows, numCols := 0, 0
var firstByte time.Duration
for {
var values []bigquery.Value
err := it.Next(&values)
if err == iterator.Done {
break
}
if err != nil {
log.Fatal(err)
}
if numRows == 0 {
numCols = len(values)
firstByte = time.Since(startTime)
} else if numCols != len(values) {
log.Fatalf("got %d columns, want %d", len(values), numCols)
}
numRows++
}
log.Printf("query %q: %d rows, %d cols, first byte %f sec, total %f sec",
qt, numRows, numCols, firstByte.Seconds(), time.Since(startTime).Seconds())
}

View File

@@ -0,0 +1,10 @@
[
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 10000",
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 100000",
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 1000000",
"SELECT title FROM `bigquery-public-data.samples.wikipedia` ORDER BY title LIMIT 1000",
"SELECT title, id, timestamp, contributor_ip FROM `bigquery-public-data.samples.wikipedia` WHERE title like 'Blo%' ORDER BY id",
"SELECT * FROM `bigquery-public-data.baseball.games_post_wide` ORDER BY gameId",
"SELECT * FROM `bigquery-public-data.samples.github_nested` WHERE repository.has_downloads ORDER BY repository.created_at LIMIT 10000",
"SELECT repo_name, path FROM `bigquery-public-data.github_repos.files` WHERE path LIKE '%.java' ORDER BY id LIMIT 1000000"
]

View File

@@ -14,11 +14,18 @@
package bigquery package bigquery
// TODO(mcgreevy): support dry-run mode when creating jobs.
import ( import (
"fmt" "fmt"
"io"
"net/http"
"time"
gax "github.com/googleapis/gax-go"
"cloud.google.com/go/internal"
"cloud.google.com/go/internal/version"
"google.golang.org/api/googleapi"
"google.golang.org/api/option" "google.golang.org/api/option"
htransport "google.golang.org/api/transport/http" htransport "google.golang.org/api/transport/http"
@@ -26,20 +33,22 @@ import (
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
) )
const prodAddr = "https://www.googleapis.com/bigquery/v2/" const (
prodAddr = "https://www.googleapis.com/bigquery/v2/"
Scope = "https://www.googleapis.com/auth/bigquery"
userAgent = "gcloud-golang-bigquery/20160429"
)
// ExternalData is a table which is stored outside of BigQuery. It is implemented by GCSReference. var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
type ExternalData interface {
externalDataConfig() bq.ExternalDataConfiguration func setClientHeader(headers http.Header) {
headers.Set("x-goog-api-client", xGoogHeader)
} }
const Scope = "https://www.googleapis.com/auth/bigquery"
const userAgent = "gcloud-golang-bigquery/20160429"
// Client may be used to perform BigQuery operations. // Client may be used to perform BigQuery operations.
type Client struct { type Client struct {
service service
projectID string projectID string
bqs *bq.Service
} }
// NewClient constructs a new Client which can perform BigQuery operations. // NewClient constructs a new Client which can perform BigQuery operations.
@@ -53,17 +62,16 @@ func NewClient(ctx context.Context, projectID string, opts ...option.ClientOptio
o = append(o, opts...) o = append(o, opts...)
httpClient, endpoint, err := htransport.NewClient(ctx, o...) httpClient, endpoint, err := htransport.NewClient(ctx, o...)
if err != nil { if err != nil {
return nil, fmt.Errorf("dialing: %v", err) return nil, fmt.Errorf("bigquery: dialing: %v", err)
} }
bqs, err := bq.New(httpClient)
s, err := newBigqueryService(httpClient, endpoint)
if err != nil { if err != nil {
return nil, fmt.Errorf("constructing bigquery client: %v", err) return nil, fmt.Errorf("bigquery: constructing client: %v", err)
} }
bqs.BasePath = endpoint
c := &Client{ c := &Client{
service: s,
projectID: projectID, projectID: projectID,
bqs: bqs,
} }
return c, nil return c, nil
} }
@@ -75,11 +83,74 @@ func (c *Client) Close() error {
return nil return nil
} }
func (c *Client) insertJob(ctx context.Context, conf *insertJobConf) (*Job, error) { // Calls the Jobs.Insert RPC and returns a Job.
job, err := c.service.insertJob(ctx, c.projectID, conf) func (c *Client) insertJob(ctx context.Context, job *bq.Job, media io.Reader) (*Job, error) {
call := c.bqs.Jobs.Insert(c.projectID, job).Context(ctx)
setClientHeader(call.Header())
if media != nil {
call.Media(media)
}
var res *bq.Job
var err error
invoke := func() error {
res, err = call.Do()
return err
}
// A job with a client-generated ID can be retried; the presence of the
// ID makes the insert operation idempotent.
// We don't retry if there is media, because it is an io.Reader. We'd
// have to read the contents and keep it in memory, and that could be expensive.
// TODO(jba): Look into retrying if media != nil.
if job.JobReference != nil && media == nil {
err = runWithRetry(ctx, invoke)
} else {
err = invoke()
}
if err != nil { if err != nil {
return nil, err return nil, err
} }
job.c = c return bqToJob(res, c)
return job, nil }
// Convert a number of milliseconds since the Unix epoch to a time.Time.
// Treat an input of zero specially: convert it to the zero time,
// rather than the start of the epoch.
func unixMillisToTime(m int64) time.Time {
if m == 0 {
return time.Time{}
}
return time.Unix(0, m*1e6)
}
// runWithRetry calls the function until it returns nil or a non-retryable error, or
// the context is done.
// See the similar function in ../storage/invoke.go. The main difference is the
// reason for retrying.
func runWithRetry(ctx context.Context, call func() error) error {
// These parameters match the suggestions in https://cloud.google.com/bigquery/sla.
backoff := gax.Backoff{
Initial: 1 * time.Second,
Max: 32 * time.Second,
Multiplier: 2,
}
return internal.Retry(ctx, backoff, func() (stop bool, err error) {
err = call()
if err == nil {
return true, nil
}
return !retryableError(err), err
})
}
// This is the correct definition of retryable according to the BigQuery team.
func retryableError(err error) bool {
e, ok := err.(*googleapi.Error)
if !ok {
return false
}
var reason string
if len(e.Errors) > 0 {
reason = e.Errors[0].Reason
}
return reason == "backendError" || reason == "rateLimitExceeded"
} }

View File

@@ -21,12 +21,6 @@ import (
// CopyConfig holds the configuration for a copy job. // CopyConfig holds the configuration for a copy job.
type CopyConfig struct { type CopyConfig struct {
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
JobID string
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
AddJobIDSuffix bool
// Srcs are the tables from which data will be copied. // Srcs are the tables from which data will be copied.
Srcs []*Table Srcs []*Table
@@ -38,18 +32,51 @@ type CopyConfig struct {
CreateDisposition TableCreateDisposition CreateDisposition TableCreateDisposition
// WriteDisposition specifies how existing data in the destination table is treated. // WriteDisposition specifies how existing data in the destination table is treated.
// The default is WriteAppend. // The default is WriteEmpty.
WriteDisposition TableWriteDisposition WriteDisposition TableWriteDisposition
// The labels associated with this job.
Labels map[string]string
}
func (c *CopyConfig) toBQ() *bq.JobConfiguration {
var ts []*bq.TableReference
for _, t := range c.Srcs {
ts = append(ts, t.toBQ())
}
return &bq.JobConfiguration{
Labels: c.Labels,
Copy: &bq.JobConfigurationTableCopy{
CreateDisposition: string(c.CreateDisposition),
WriteDisposition: string(c.WriteDisposition),
DestinationTable: c.Dst.toBQ(),
SourceTables: ts,
},
}
}
func bqToCopyConfig(q *bq.JobConfiguration, c *Client) *CopyConfig {
cc := &CopyConfig{
Labels: q.Labels,
CreateDisposition: TableCreateDisposition(q.Copy.CreateDisposition),
WriteDisposition: TableWriteDisposition(q.Copy.WriteDisposition),
Dst: bqToTable(q.Copy.DestinationTable, c),
}
for _, t := range q.Copy.SourceTables {
cc.Srcs = append(cc.Srcs, bqToTable(t, c))
}
return cc
} }
// A Copier copies data into a BigQuery table from one or more BigQuery tables. // A Copier copies data into a BigQuery table from one or more BigQuery tables.
type Copier struct { type Copier struct {
JobIDConfig
CopyConfig CopyConfig
c *Client c *Client
} }
// CopierFrom returns a Copier which can be used to copy data into a // CopierFrom returns a Copier which can be used to copy data into a
// BigQuery table from one or more BigQuery tables. // BigQuery table from one or more BigQuery tables.
// The returned Copier may optionally be further configured before its Run method is called. // The returned Copier may optionally be further configured before its Run method is called.
func (t *Table) CopierFrom(srcs ...*Table) *Copier { func (t *Table) CopierFrom(srcs ...*Table) *Copier {
return &Copier{ return &Copier{
@@ -63,17 +90,12 @@ func (t *Table) CopierFrom(srcs ...*Table) *Copier {
// Run initiates a copy job. // Run initiates a copy job.
func (c *Copier) Run(ctx context.Context) (*Job, error) { func (c *Copier) Run(ctx context.Context) (*Job, error) {
conf := &bq.JobConfigurationTableCopy{ return c.c.insertJob(ctx, c.newJob(), nil)
CreateDisposition: string(c.CreateDisposition), }
WriteDisposition: string(c.WriteDisposition),
DestinationTable: c.Dst.tableRefProto(), func (c *Copier) newJob() *bq.Job {
} return &bq.Job{
for _, t := range c.Srcs { JobReference: c.JobIDConfig.createJobRef(c.c.projectID),
conf.SourceTables = append(conf.SourceTables, t.tableRefProto()) Configuration: c.CopyConfig.toBQ(),
} }
job := &bq.Job{
JobReference: createJobRef(c.JobID, c.AddJobIDSuffix, c.c.projectID),
Configuration: &bq.JobConfiguration{Copy: conf},
}
return c.c.insertJob(ctx, &insertJobConf{job: job})
} }

View File

@@ -17,7 +17,10 @@ package bigquery
import ( import (
"testing" "testing"
"golang.org/x/net/context" "github.com/google/go-cmp/cmp/cmpopts"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
) )
@@ -44,10 +47,11 @@ func defaultCopyJob() *bq.Job {
} }
func TestCopy(t *testing.T) { func TestCopy(t *testing.T) {
defer fixRandomJobID("RANDOM")() defer fixRandomID("RANDOM")()
testCases := []struct { testCases := []struct {
dst *Table dst *Table
srcs []*Table srcs []*Table
jobID string
config CopyConfig config CopyConfig
want *bq.Job want *bq.Job
}{ }{
@@ -82,9 +86,11 @@ func TestCopy(t *testing.T) {
config: CopyConfig{ config: CopyConfig{
CreateDisposition: CreateNever, CreateDisposition: CreateNever,
WriteDisposition: WriteTruncate, WriteDisposition: WriteTruncate,
Labels: map[string]string{"a": "b"},
}, },
want: func() *bq.Job { want: func() *bq.Job {
j := defaultCopyJob() j := defaultCopyJob()
j.Configuration.Labels = map[string]string{"a": "b"}
j.Configuration.Copy.CreateDisposition = "CREATE_NEVER" j.Configuration.Copy.CreateDisposition = "CREATE_NEVER"
j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE" j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE"
return j return j
@@ -103,7 +109,7 @@ func TestCopy(t *testing.T) {
TableID: "s-table-id", TableID: "s-table-id",
}, },
}, },
config: CopyConfig{JobID: "job-id"}, jobID: "job-id",
want: func() *bq.Job { want: func() *bq.Job {
j := defaultCopyJob() j := defaultCopyJob()
j.JobReference.JobId = "job-id" j.JobReference.JobId = "job-id"
@@ -111,22 +117,25 @@ func TestCopy(t *testing.T) {
}(), }(),
}, },
} }
c := &Client{projectID: "client-project-id"}
for i, tc := range testCases { for i, tc := range testCases {
s := &testService{}
c := &Client{
service: s,
projectID: "client-project-id",
}
tc.dst.c = c tc.dst.c = c
copier := tc.dst.CopierFrom(tc.srcs...) copier := tc.dst.CopierFrom(tc.srcs...)
copier.JobID = tc.jobID
tc.config.Srcs = tc.srcs tc.config.Srcs = tc.srcs
tc.config.Dst = tc.dst tc.config.Dst = tc.dst
copier.CopyConfig = tc.config copier.CopyConfig = tc.config
if _, err := copier.Run(context.Background()); err != nil { got := copier.newJob()
t.Errorf("#%d: err calling Run: %v", i, err) checkJob(t, i, got, tc.want)
continue
jc, err := bqToJobConfig(got.Configuration, c)
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
diff := testutil.Diff(jc.(*CopyConfig), &copier.CopyConfig,
cmpopts.IgnoreUnexported(Table{}))
if diff != "" {
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
} }
checkJob(t, i, s.Job, tc.want)
} }
} }

View File

@@ -15,11 +15,14 @@
package bigquery package bigquery
import ( import (
"errors"
"fmt"
"time" "time"
"cloud.google.com/go/internal/optional" "cloud.google.com/go/internal/optional"
"golang.org/x/net/context" "golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/iterator" "google.golang.org/api/iterator"
) )
@@ -38,6 +41,7 @@ type DatasetMetadata struct {
Location string // The geo location of the dataset. Location string // The geo location of the dataset.
DefaultTableExpiration time.Duration // The default expiration time for new tables. DefaultTableExpiration time.Duration // The default expiration time for new tables.
Labels map[string]string // User-provided labels. Labels map[string]string // User-provided labels.
Access []*AccessEntry // Access permissions.
// These fields are read-only. // These fields are read-only.
CreationTime time.Time CreationTime time.Time
@@ -47,7 +51,6 @@ type DatasetMetadata struct {
// ETag is the ETag obtained when reading metadata. Pass it to Dataset.Update to // ETag is the ETag obtained when reading metadata. Pass it to Dataset.Update to
// ensure that the metadata hasn't changed since it was read. // ensure that the metadata hasn't changed since it was read.
ETag string ETag string
// TODO(jba): access rules
} }
// DatasetMetadataToUpdate is used when updating a dataset's metadata. // DatasetMetadataToUpdate is used when updating a dataset's metadata.
@@ -55,30 +58,15 @@ type DatasetMetadata struct {
type DatasetMetadataToUpdate struct { type DatasetMetadataToUpdate struct {
Description optional.String // The user-friendly description of this table. Description optional.String // The user-friendly description of this table.
Name optional.String // The user-friendly name for this dataset. Name optional.String // The user-friendly name for this dataset.
// DefaultTableExpiration is the the default expiration time for new tables. // DefaultTableExpiration is the the default expiration time for new tables.
// If set to time.Duration(0), new tables never expire. // If set to time.Duration(0), new tables never expire.
DefaultTableExpiration optional.Duration DefaultTableExpiration optional.Duration
setLabels map[string]string // The entire access list. It is not possible to replace individual entries.
deleteLabels map[string]bool Access []*AccessEntry
}
// SetLabel causes a label to be added or modified when dm is used labelUpdater
// in a call to Dataset.Update.
func (dm *DatasetMetadataToUpdate) SetLabel(name, value string) {
if dm.setLabels == nil {
dm.setLabels = map[string]string{}
}
dm.setLabels[name] = value
}
// DeleteLabel causes a label to be deleted when dm is used in a
// call to Dataset.Update.
func (dm *DatasetMetadataToUpdate) DeleteLabel(name string) {
if dm.deleteLabels == nil {
dm.deleteLabels = map[string]bool{}
}
dm.deleteLabels[name] = true
} }
// Dataset creates a handle to a BigQuery dataset in the client's project. // Dataset creates a handle to a BigQuery dataset in the client's project.
@@ -98,17 +86,100 @@ func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset {
// Create creates a dataset in the BigQuery service. An error will be returned if the // Create creates a dataset in the BigQuery service. An error will be returned if the
// dataset already exists. Pass in a DatasetMetadata value to configure the dataset. // dataset already exists. Pass in a DatasetMetadata value to configure the dataset.
func (d *Dataset) Create(ctx context.Context, md *DatasetMetadata) error { func (d *Dataset) Create(ctx context.Context, md *DatasetMetadata) error {
return d.c.service.insertDataset(ctx, d.DatasetID, d.ProjectID, md) ds, err := md.toBQ()
if err != nil {
return err
}
ds.DatasetReference = &bq.DatasetReference{DatasetId: d.DatasetID}
call := d.c.bqs.Datasets.Insert(d.ProjectID, ds).Context(ctx)
setClientHeader(call.Header())
_, err = call.Do()
return err
}
func (dm *DatasetMetadata) toBQ() (*bq.Dataset, error) {
ds := &bq.Dataset{}
if dm == nil {
return ds, nil
}
ds.FriendlyName = dm.Name
ds.Description = dm.Description
ds.Location = dm.Location
ds.DefaultTableExpirationMs = int64(dm.DefaultTableExpiration / time.Millisecond)
ds.Labels = dm.Labels
var err error
ds.Access, err = accessListToBQ(dm.Access)
if err != nil {
return nil, err
}
if !dm.CreationTime.IsZero() {
return nil, errors.New("bigquery: Dataset.CreationTime is not writable")
}
if !dm.LastModifiedTime.IsZero() {
return nil, errors.New("bigquery: Dataset.LastModifiedTime is not writable")
}
if dm.FullID != "" {
return nil, errors.New("bigquery: Dataset.FullID is not writable")
}
if dm.ETag != "" {
return nil, errors.New("bigquery: Dataset.ETag is not writable")
}
return ds, nil
}
func accessListToBQ(a []*AccessEntry) ([]*bq.DatasetAccess, error) {
var q []*bq.DatasetAccess
for _, e := range a {
a, err := e.toBQ()
if err != nil {
return nil, err
}
q = append(q, a)
}
return q, nil
} }
// Delete deletes the dataset. // Delete deletes the dataset.
func (d *Dataset) Delete(ctx context.Context) error { func (d *Dataset) Delete(ctx context.Context) error {
return d.c.service.deleteDataset(ctx, d.DatasetID, d.ProjectID) call := d.c.bqs.Datasets.Delete(d.ProjectID, d.DatasetID).Context(ctx)
setClientHeader(call.Header())
return call.Do()
} }
// Metadata fetches the metadata for the dataset. // Metadata fetches the metadata for the dataset.
func (d *Dataset) Metadata(ctx context.Context) (*DatasetMetadata, error) { func (d *Dataset) Metadata(ctx context.Context) (*DatasetMetadata, error) {
return d.c.service.getDatasetMetadata(ctx, d.ProjectID, d.DatasetID) call := d.c.bqs.Datasets.Get(d.ProjectID, d.DatasetID).Context(ctx)
setClientHeader(call.Header())
var ds *bq.Dataset
if err := runWithRetry(ctx, func() (err error) {
ds, err = call.Do()
return err
}); err != nil {
return nil, err
}
return bqToDatasetMetadata(ds)
}
func bqToDatasetMetadata(d *bq.Dataset) (*DatasetMetadata, error) {
dm := &DatasetMetadata{
CreationTime: unixMillisToTime(d.CreationTime),
LastModifiedTime: unixMillisToTime(d.LastModifiedTime),
DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond,
Description: d.Description,
Name: d.FriendlyName,
FullID: d.Id,
Location: d.Location,
Labels: d.Labels,
ETag: d.Etag,
}
for _, a := range d.Access {
e, err := bqToAccessEntry(a, nil)
if err != nil {
return nil, err
}
dm.Access = append(dm.Access, e)
}
return dm, nil
} }
// Update modifies specific Dataset metadata fields. // Update modifies specific Dataset metadata fields.
@@ -116,7 +187,63 @@ func (d *Dataset) Metadata(ctx context.Context) (*DatasetMetadata, error) {
// set the etag argument to the DatasetMetadata.ETag field from the read. // set the etag argument to the DatasetMetadata.ETag field from the read.
// Pass the empty string for etag for a "blind write" that will always succeed. // Pass the empty string for etag for a "blind write" that will always succeed.
func (d *Dataset) Update(ctx context.Context, dm DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error) { func (d *Dataset) Update(ctx context.Context, dm DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error) {
return d.c.service.patchDataset(ctx, d.ProjectID, d.DatasetID, &dm, etag) ds, err := dm.toBQ()
if err != nil {
return nil, err
}
call := d.c.bqs.Datasets.Patch(d.ProjectID, d.DatasetID, ds).Context(ctx)
setClientHeader(call.Header())
if etag != "" {
call.Header().Set("If-Match", etag)
}
var ds2 *bq.Dataset
if err := runWithRetry(ctx, func() (err error) {
ds2, err = call.Do()
return err
}); err != nil {
return nil, err
}
return bqToDatasetMetadata(ds2)
}
func (dm *DatasetMetadataToUpdate) toBQ() (*bq.Dataset, error) {
ds := &bq.Dataset{}
forceSend := func(field string) {
ds.ForceSendFields = append(ds.ForceSendFields, field)
}
if dm.Description != nil {
ds.Description = optional.ToString(dm.Description)
forceSend("Description")
}
if dm.Name != nil {
ds.FriendlyName = optional.ToString(dm.Name)
forceSend("FriendlyName")
}
if dm.DefaultTableExpiration != nil {
dur := optional.ToDuration(dm.DefaultTableExpiration)
if dur == 0 {
// Send a null to delete the field.
ds.NullFields = append(ds.NullFields, "DefaultTableExpirationMs")
} else {
ds.DefaultTableExpirationMs = int64(dur / time.Millisecond)
}
}
if dm.Access != nil {
var err error
ds.Access, err = accessListToBQ(dm.Access)
if err != nil {
return nil, err
}
if len(ds.Access) == 0 {
ds.NullFields = append(ds.NullFields, "Access")
}
}
labels, forces, nulls := dm.update()
ds.Labels = labels
ds.ForceSendFields = append(ds.ForceSendFields, forces...)
ds.NullFields = append(ds.NullFields, nulls...)
return ds, nil
} }
// Table creates a handle to a BigQuery table in the dataset. // Table creates a handle to a BigQuery table in the dataset.
@@ -163,16 +290,41 @@ func (it *TableIterator) Next() (*Table, error) {
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. // PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TableIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } func (it *TableIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
// for testing
var listTables = func(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) {
call := it.dataset.c.bqs.Tables.List(it.dataset.ProjectID, it.dataset.DatasetID).
PageToken(pageToken).
Context(it.ctx)
setClientHeader(call.Header())
if pageSize > 0 {
call.MaxResults(int64(pageSize))
}
var res *bq.TableList
err := runWithRetry(it.ctx, func() (err error) {
res, err = call.Do()
return err
})
return res, err
}
func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) { func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) {
tables, tok, err := it.dataset.c.service.listTables(it.ctx, it.dataset.ProjectID, it.dataset.DatasetID, pageSize, pageToken) res, err := listTables(it, pageSize, pageToken)
if err != nil { if err != nil {
return "", err return "", err
} }
for _, t := range tables { for _, t := range res.Tables {
t.c = it.dataset.c it.tables = append(it.tables, bqToTable(t.TableReference, it.dataset.c))
it.tables = append(it.tables, t) }
return res.NextPageToken, nil
}
func bqToTable(tr *bq.TableReference, c *Client) *Table {
return &Table{
ProjectID: tr.ProjectId,
DatasetID: tr.DatasetId,
TableID: tr.TableId,
c: c,
} }
return tok, nil
} }
// Datasets returns an iterator over the datasets in a project. // Datasets returns an iterator over the datasets in a project.
@@ -232,15 +384,118 @@ func (it *DatasetIterator) Next() (*Dataset, error) {
return item, nil return item, nil
} }
// for testing
var listDatasets = func(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) {
call := it.c.bqs.Datasets.List(it.ProjectID).
Context(it.ctx).
PageToken(pageToken).
All(it.ListHidden)
setClientHeader(call.Header())
if pageSize > 0 {
call.MaxResults(int64(pageSize))
}
if it.Filter != "" {
call.Filter(it.Filter)
}
var res *bq.DatasetList
err := runWithRetry(it.ctx, func() (err error) {
res, err = call.Do()
return err
})
return res, err
}
func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) { func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) {
datasets, nextPageToken, err := it.c.service.listDatasets(it.ctx, it.ProjectID, res, err := listDatasets(it, pageSize, pageToken)
pageSize, pageToken, it.ListHidden, it.Filter)
if err != nil { if err != nil {
return "", err return "", err
} }
for _, d := range datasets { for _, d := range res.Datasets {
d.c = it.c it.items = append(it.items, &Dataset{
it.items = append(it.items, d) ProjectID: d.DatasetReference.ProjectId,
DatasetID: d.DatasetReference.DatasetId,
c: it.c,
})
} }
return nextPageToken, nil return res.NextPageToken, nil
}
// An AccessEntry describes the permissions that an entity has on a dataset.
type AccessEntry struct {
Role AccessRole // The role of the entity
EntityType EntityType // The type of entity
Entity string // The entity (individual or group) granted access
View *Table // The view granted access (EntityType must be ViewEntity)
}
// AccessRole is the level of access to grant to a dataset.
type AccessRole string
const (
OwnerRole AccessRole = "OWNER"
ReaderRole AccessRole = "READER"
WriterRole AccessRole = "WRITER"
)
// EntityType is the type of entity in an AccessEntry.
type EntityType int
const (
// A domain (e.g. "example.com")
DomainEntity EntityType = iota + 1
// Email address of a Google Group
GroupEmailEntity
// Email address of an individual user.
UserEmailEntity
// A special group: one of projectOwners, projectReaders, projectWriters or allAuthenticatedUsers.
SpecialGroupEntity
// A BigQuery view.
ViewEntity
)
func (e *AccessEntry) toBQ() (*bq.DatasetAccess, error) {
q := &bq.DatasetAccess{Role: string(e.Role)}
switch e.EntityType {
case DomainEntity:
q.Domain = e.Entity
case GroupEmailEntity:
q.GroupByEmail = e.Entity
case UserEmailEntity:
q.UserByEmail = e.Entity
case SpecialGroupEntity:
q.SpecialGroup = e.Entity
case ViewEntity:
q.View = e.View.toBQ()
default:
return nil, fmt.Errorf("bigquery: unknown entity type %d", e.EntityType)
}
return q, nil
}
func bqToAccessEntry(q *bq.DatasetAccess, c *Client) (*AccessEntry, error) {
e := &AccessEntry{Role: AccessRole(q.Role)}
switch {
case q.Domain != "":
e.Entity = q.Domain
e.EntityType = DomainEntity
case q.GroupByEmail != "":
e.Entity = q.GroupByEmail
e.EntityType = GroupEmailEntity
case q.UserByEmail != "":
e.Entity = q.UserByEmail
e.EntityType = UserEmailEntity
case q.SpecialGroup != "":
e.Entity = q.SpecialGroup
e.EntityType = SpecialGroupEntity
case q.View != nil:
e.View = c.DatasetInProject(q.View.ProjectId, q.View.DatasetId).Table(q.View.TableId)
e.EntityType = ViewEntity
default:
return nil, errors.New("bigquery: invalid access value")
}
return e, nil
} }

View File

@@ -18,24 +18,29 @@ import (
"errors" "errors"
"strconv" "strconv"
"testing" "testing"
"time"
"github.com/google/go-cmp/cmp"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context" "golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
itest "google.golang.org/api/iterator/testing" itest "google.golang.org/api/iterator/testing"
) )
// readServiceStub services read requests by returning data from an in-memory list of values. // readServiceStub services read requests by returning data from an in-memory list of values.
type listTablesServiceStub struct { type listTablesStub struct {
expectedProject, expectedDataset string expectedProject, expectedDataset string
tables []*Table tables []*bq.TableListTables
service
} }
func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error) { func (s *listTablesStub) listTables(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) {
if projectID != s.expectedProject { if it.dataset.ProjectID != s.expectedProject {
return nil, "", errors.New("wrong project id") return nil, errors.New("wrong project id")
} }
if datasetID != s.expectedDataset { if it.dataset.DatasetID != s.expectedDataset {
return nil, "", errors.New("wrong dataset id") return nil, errors.New("wrong dataset id")
} }
const maxPageSize = 2 const maxPageSize = 2
if pageSize <= 0 || pageSize > maxPageSize { if pageSize <= 0 || pageSize > maxPageSize {
@@ -46,7 +51,7 @@ func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datas
var err error var err error
start, err = strconv.Atoi(pageToken) start, err = strconv.Atoi(pageToken)
if err != nil { if err != nil {
return nil, "", err return nil, err
} }
} }
end := start + pageSize end := start + pageSize
@@ -57,100 +62,267 @@ func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datas
if end < len(s.tables) { if end < len(s.tables) {
nextPageToken = strconv.Itoa(end) nextPageToken = strconv.Itoa(end)
} }
return s.tables[start:end], nextPageToken, nil return &bq.TableList{
Tables: s.tables[start:end],
NextPageToken: nextPageToken,
}, nil
} }
func TestTables(t *testing.T) { func TestTables(t *testing.T) {
t1 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t1"} c := &Client{projectID: "p1"}
t2 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t2"} inTables := []*bq.TableListTables{
t3 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t3"} {TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t1"}},
allTables := []*Table{t1, t2, t3} {TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t2"}},
c := &Client{ {TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t3"}},
service: &listTablesServiceStub{
expectedProject: "x",
expectedDataset: "y",
tables: allTables,
},
projectID: "x",
} }
msg, ok := itest.TestIterator(allTables, outTables := []*Table{
func() interface{} { return c.Dataset("y").Tables(context.Background()) }, {ProjectID: "p1", DatasetID: "d1", TableID: "t1", c: c},
{ProjectID: "p1", DatasetID: "d1", TableID: "t2", c: c},
{ProjectID: "p1", DatasetID: "d1", TableID: "t3", c: c},
}
lts := &listTablesStub{
expectedProject: "p1",
expectedDataset: "d1",
tables: inTables,
}
old := listTables
listTables = lts.listTables // cannot use t.Parallel with this test
defer func() { listTables = old }()
msg, ok := itest.TestIterator(outTables,
func() interface{} { return c.Dataset("d1").Tables(context.Background()) },
func(it interface{}) (interface{}, error) { return it.(*TableIterator).Next() }) func(it interface{}) (interface{}, error) { return it.(*TableIterator).Next() })
if !ok { if !ok {
t.Error(msg) t.Error(msg)
} }
} }
type listDatasetsFake struct { type listDatasetsStub struct {
service expectedProject string
datasets []*bq.DatasetListDatasets
projectID string hidden map[*bq.DatasetListDatasets]bool
datasets []*Dataset
hidden map[*Dataset]bool
} }
func (df *listDatasetsFake) listDatasets(_ context.Context, projectID string, pageSize int, pageToken string, listHidden bool, filter string) ([]*Dataset, string, error) { func (s *listDatasetsStub) listDatasets(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) {
const maxPageSize = 2 const maxPageSize = 2
if pageSize <= 0 || pageSize > maxPageSize { if pageSize <= 0 || pageSize > maxPageSize {
pageSize = maxPageSize pageSize = maxPageSize
} }
if filter != "" { if it.Filter != "" {
return nil, "", errors.New("filter not supported") return nil, errors.New("filter not supported")
} }
if projectID != df.projectID { if it.ProjectID != s.expectedProject {
return nil, "", errors.New("bad project ID") return nil, errors.New("bad project ID")
} }
start := 0 start := 0
if pageToken != "" { if pageToken != "" {
var err error var err error
start, err = strconv.Atoi(pageToken) start, err = strconv.Atoi(pageToken)
if err != nil { if err != nil {
return nil, "", err return nil, err
} }
} }
var ( var (
i int i int
result []*Dataset result []*bq.DatasetListDatasets
nextPageToken string nextPageToken string
) )
for i = start; len(result) < pageSize && i < len(df.datasets); i++ { for i = start; len(result) < pageSize && i < len(s.datasets); i++ {
if df.hidden[df.datasets[i]] && !listHidden { if s.hidden[s.datasets[i]] && !it.ListHidden {
continue continue
} }
result = append(result, df.datasets[i]) result = append(result, s.datasets[i])
} }
if i < len(df.datasets) { if i < len(s.datasets) {
nextPageToken = strconv.Itoa(i) nextPageToken = strconv.Itoa(i)
} }
return result, nextPageToken, nil return &bq.DatasetList{
Datasets: result,
NextPageToken: nextPageToken,
}, nil
} }
func TestDatasets(t *testing.T) { func TestDatasets(t *testing.T) {
service := &listDatasetsFake{projectID: "p"} client := &Client{projectID: "p"}
client := &Client{service: service} inDatasets := []*bq.DatasetListDatasets{
datasets := []*Dataset{ {DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "a"}},
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "b"}},
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "hidden"}},
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "c"}},
}
outDatasets := []*Dataset{
{"p", "a", client}, {"p", "a", client},
{"p", "b", client}, {"p", "b", client},
{"p", "hidden", client}, {"p", "hidden", client},
{"p", "c", client}, {"p", "c", client},
} }
service.datasets = datasets lds := &listDatasetsStub{
service.hidden = map[*Dataset]bool{datasets[2]: true} expectedProject: "p",
c := &Client{ datasets: inDatasets,
projectID: "p", hidden: map[*bq.DatasetListDatasets]bool{inDatasets[2]: true},
service: service,
} }
msg, ok := itest.TestIterator(datasets, old := listDatasets
func() interface{} { it := c.Datasets(context.Background()); it.ListHidden = true; return it }, listDatasets = lds.listDatasets // cannot use t.Parallel with this test
defer func() { listDatasets = old }()
msg, ok := itest.TestIterator(outDatasets,
func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = true; return it },
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() }) func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() })
if !ok { if !ok {
t.Fatalf("ListHidden=true: %s", msg) t.Fatalf("ListHidden=true: %s", msg)
} }
msg, ok = itest.TestIterator([]*Dataset{datasets[0], datasets[1], datasets[3]}, msg, ok = itest.TestIterator([]*Dataset{outDatasets[0], outDatasets[1], outDatasets[3]},
func() interface{} { it := c.Datasets(context.Background()); it.ListHidden = false; return it }, func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = false; return it },
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() }) func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() })
if !ok { if !ok {
t.Fatalf("ListHidden=false: %s", msg) t.Fatalf("ListHidden=false: %s", msg)
} }
} }
func TestDatasetToBQ(t *testing.T) {
for _, test := range []struct {
in *DatasetMetadata
want *bq.Dataset
}{
{nil, &bq.Dataset{}},
{&DatasetMetadata{Name: "name"}, &bq.Dataset{FriendlyName: "name"}},
{&DatasetMetadata{
Name: "name",
Description: "desc",
DefaultTableExpiration: time.Hour,
Location: "EU",
Labels: map[string]string{"x": "y"},
Access: []*AccessEntry{{Role: OwnerRole, Entity: "example.com", EntityType: DomainEntity}},
}, &bq.Dataset{
FriendlyName: "name",
Description: "desc",
DefaultTableExpirationMs: 60 * 60 * 1000,
Location: "EU",
Labels: map[string]string{"x": "y"},
Access: []*bq.DatasetAccess{{Role: "OWNER", Domain: "example.com"}},
}},
} {
got, err := test.in.toBQ()
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, test.want) {
t.Errorf("%v:\ngot %+v\nwant %+v", test.in, got, test.want)
}
}
// Check that non-writeable fields are unset.
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
for _, dm := range []*DatasetMetadata{
{CreationTime: aTime},
{LastModifiedTime: aTime},
{FullID: "x"},
{ETag: "e"},
} {
if _, err := dm.toBQ(); err == nil {
t.Errorf("%+v: got nil, want error", dm)
}
}
}
func TestBQToDatasetMetadata(t *testing.T) {
cTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
cMillis := cTime.UnixNano() / 1e6
mTime := time.Date(2017, 10, 31, 0, 0, 0, 0, time.Local)
mMillis := mTime.UnixNano() / 1e6
q := &bq.Dataset{
CreationTime: cMillis,
LastModifiedTime: mMillis,
FriendlyName: "name",
Description: "desc",
DefaultTableExpirationMs: 60 * 60 * 1000,
Location: "EU",
Labels: map[string]string{"x": "y"},
Access: []*bq.DatasetAccess{
{Role: "READER", UserByEmail: "joe@example.com"},
{Role: "WRITER", GroupByEmail: "users@example.com"},
},
Etag: "etag",
}
want := &DatasetMetadata{
CreationTime: cTime,
LastModifiedTime: mTime,
Name: "name",
Description: "desc",
DefaultTableExpiration: time.Hour,
Location: "EU",
Labels: map[string]string{"x": "y"},
Access: []*AccessEntry{
{Role: ReaderRole, Entity: "joe@example.com", EntityType: UserEmailEntity},
{Role: WriterRole, Entity: "users@example.com", EntityType: GroupEmailEntity},
},
ETag: "etag",
}
got, err := bqToDatasetMetadata(q)
if err != nil {
t.Fatal(err)
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("-got, +want:\n%s", diff)
}
}
func TestDatasetMetadataToUpdateToBQ(t *testing.T) {
dm := DatasetMetadataToUpdate{
Description: "desc",
Name: "name",
DefaultTableExpiration: time.Hour,
}
dm.SetLabel("label", "value")
dm.DeleteLabel("del")
got, err := dm.toBQ()
if err != nil {
t.Fatal(err)
}
want := &bq.Dataset{
Description: "desc",
FriendlyName: "name",
DefaultTableExpirationMs: 60 * 60 * 1000,
Labels: map[string]string{"label": "value"},
ForceSendFields: []string{"Description", "FriendlyName"},
NullFields: []string{"Labels.del"},
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("-got, +want:\n%s", diff)
}
}
func TestConvertAccessEntry(t *testing.T) {
c := &Client{projectID: "pid"}
for _, e := range []*AccessEntry{
{Role: ReaderRole, Entity: "e", EntityType: DomainEntity},
{Role: WriterRole, Entity: "e", EntityType: GroupEmailEntity},
{Role: OwnerRole, Entity: "e", EntityType: UserEmailEntity},
{Role: ReaderRole, Entity: "e", EntityType: SpecialGroupEntity},
{Role: ReaderRole, EntityType: ViewEntity,
View: &Table{ProjectID: "p", DatasetID: "d", TableID: "t", c: c}},
} {
q, err := e.toBQ()
if err != nil {
t.Fatal(err)
}
got, err := bqToAccessEntry(q, c)
if err != nil {
t.Fatal(err)
}
if diff := testutil.Diff(got, e, cmp.AllowUnexported(Table{}, Client{})); diff != "" {
t.Errorf("got=-, want=+:\n%s", diff)
}
}
e := &AccessEntry{Role: ReaderRole, Entity: "e"}
if _, err := e.toBQ(); err == nil {
t.Error("got nil, want error")
}
if _, err := bqToAccessEntry(&bq.DatasetAccess{Role: "WRITER"}, nil); err == nil {
t.Error("got nil, want error")
}
}

View File

@@ -0,0 +1,689 @@
// Copyright 2017, Google LLC All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package datatransfer
import (
"math"
"time"
"cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/transport"
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// CallOptions contains the retry settings for each method of Client.
type CallOptions struct {
GetDataSource []gax.CallOption
ListDataSources []gax.CallOption
CreateTransferConfig []gax.CallOption
UpdateTransferConfig []gax.CallOption
DeleteTransferConfig []gax.CallOption
GetTransferConfig []gax.CallOption
ListTransferConfigs []gax.CallOption
ScheduleTransferRuns []gax.CallOption
GetTransferRun []gax.CallOption
DeleteTransferRun []gax.CallOption
ListTransferRuns []gax.CallOption
ListTransferLogs []gax.CallOption
CheckValidCreds []gax.CallOption
}
func defaultClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("bigquerydatatransfer.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultCallOptions() *CallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &CallOptions{
GetDataSource: retry[[2]string{"default", "idempotent"}],
ListDataSources: retry[[2]string{"default", "idempotent"}],
CreateTransferConfig: retry[[2]string{"default", "non_idempotent"}],
UpdateTransferConfig: retry[[2]string{"default", "non_idempotent"}],
DeleteTransferConfig: retry[[2]string{"default", "idempotent"}],
GetTransferConfig: retry[[2]string{"default", "idempotent"}],
ListTransferConfigs: retry[[2]string{"default", "idempotent"}],
ScheduleTransferRuns: retry[[2]string{"default", "non_idempotent"}],
GetTransferRun: retry[[2]string{"default", "idempotent"}],
DeleteTransferRun: retry[[2]string{"default", "idempotent"}],
ListTransferRuns: retry[[2]string{"default", "idempotent"}],
ListTransferLogs: retry[[2]string{"default", "idempotent"}],
CheckValidCreds: retry[[2]string{"default", "idempotent"}],
}
}
// Client is a client for interacting with BigQuery Data Transfer API.
type Client struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
client datatransferpb.DataTransferServiceClient
// The call options for this service.
CallOptions *CallOptions
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewClient creates a new data transfer service client.
//
// The Google BigQuery Data Transfer Service API enables BigQuery users to
// configure the transfer of their data from other Google Products into BigQuery.
// This service contains methods that are end user exposed. It backs up the
// frontend.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &Client{
conn: conn,
CallOptions: defaultCallOptions(),
client: datatransferpb.NewDataTransferServiceClient(conn),
}
c.setGoogleClientInfo()
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *Client) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *Client) Close() error {
return c.conn.Close()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *Client) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// ProjectPath returns the path for the project resource.
func ProjectPath(project string) string {
return "" +
"projects/" +
project +
""
}
// LocationPath returns the path for the location resource.
func LocationPath(project, location string) string {
return "" +
"projects/" +
project +
"/locations/" +
location +
""
}
// LocationDataSourcePath returns the path for the location data source resource.
func LocationDataSourcePath(project, location, dataSource string) string {
return "" +
"projects/" +
project +
"/locations/" +
location +
"/dataSources/" +
dataSource +
""
}
// LocationTransferConfigPath returns the path for the location transfer config resource.
func LocationTransferConfigPath(project, location, transferConfig string) string {
return "" +
"projects/" +
project +
"/locations/" +
location +
"/transferConfigs/" +
transferConfig +
""
}
// LocationRunPath returns the path for the location run resource.
func LocationRunPath(project, location, transferConfig, run string) string {
return "" +
"projects/" +
project +
"/locations/" +
location +
"/transferConfigs/" +
transferConfig +
"/runs/" +
run +
""
}
// DataSourcePath returns the path for the data source resource.
func DataSourcePath(project, dataSource string) string {
return "" +
"projects/" +
project +
"/dataSources/" +
dataSource +
""
}
// TransferConfigPath returns the path for the transfer config resource.
func TransferConfigPath(project, transferConfig string) string {
return "" +
"projects/" +
project +
"/transferConfigs/" +
transferConfig +
""
}
// RunPath returns the path for the run resource.
func RunPath(project, transferConfig, run string) string {
return "" +
"projects/" +
project +
"/transferConfigs/" +
transferConfig +
"/runs/" +
run +
""
}
// GetDataSource retrieves a supported data source and returns its settings,
// which can be used for UI rendering.
func (c *Client) GetDataSource(ctx context.Context, req *datatransferpb.GetDataSourceRequest, opts ...gax.CallOption) (*datatransferpb.DataSource, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetDataSource[0:len(c.CallOptions.GetDataSource):len(c.CallOptions.GetDataSource)], opts...)
var resp *datatransferpb.DataSource
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetDataSource(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListDataSources lists supported data sources and returns their settings,
// which can be used for UI rendering.
func (c *Client) ListDataSources(ctx context.Context, req *datatransferpb.ListDataSourcesRequest, opts ...gax.CallOption) *DataSourceIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListDataSources[0:len(c.CallOptions.ListDataSources):len(c.CallOptions.ListDataSources)], opts...)
it := &DataSourceIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.DataSource, string, error) {
var resp *datatransferpb.ListDataSourcesResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListDataSources(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.DataSources, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// CreateTransferConfig creates a new data transfer configuration.
func (c *Client) CreateTransferConfig(ctx context.Context, req *datatransferpb.CreateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CreateTransferConfig[0:len(c.CallOptions.CreateTransferConfig):len(c.CallOptions.CreateTransferConfig)], opts...)
var resp *datatransferpb.TransferConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.CreateTransferConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// UpdateTransferConfig updates a data transfer configuration.
// All fields must be set, even if they are not updated.
func (c *Client) UpdateTransferConfig(ctx context.Context, req *datatransferpb.UpdateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.UpdateTransferConfig[0:len(c.CallOptions.UpdateTransferConfig):len(c.CallOptions.UpdateTransferConfig)], opts...)
var resp *datatransferpb.TransferConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.UpdateTransferConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeleteTransferConfig deletes a data transfer configuration,
// including any associated transfer runs and logs.
func (c *Client) DeleteTransferConfig(ctx context.Context, req *datatransferpb.DeleteTransferConfigRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteTransferConfig[0:len(c.CallOptions.DeleteTransferConfig):len(c.CallOptions.DeleteTransferConfig)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.DeleteTransferConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// GetTransferConfig returns information about a data transfer config.
func (c *Client) GetTransferConfig(ctx context.Context, req *datatransferpb.GetTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetTransferConfig[0:len(c.CallOptions.GetTransferConfig):len(c.CallOptions.GetTransferConfig)], opts...)
var resp *datatransferpb.TransferConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetTransferConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListTransferConfigs returns information about all data transfers in the project.
func (c *Client) ListTransferConfigs(ctx context.Context, req *datatransferpb.ListTransferConfigsRequest, opts ...gax.CallOption) *TransferConfigIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListTransferConfigs[0:len(c.CallOptions.ListTransferConfigs):len(c.CallOptions.ListTransferConfigs)], opts...)
it := &TransferConfigIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferConfig, string, error) {
var resp *datatransferpb.ListTransferConfigsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListTransferConfigs(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.TransferConfigs, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// ScheduleTransferRuns creates transfer runs for a time range [range_start_time, range_end_time].
// For each date - or whatever granularity the data source supports - in the
// range, one transfer run is created.
// Note that runs are created per UTC time in the time range.
func (c *Client) ScheduleTransferRuns(ctx context.Context, req *datatransferpb.ScheduleTransferRunsRequest, opts ...gax.CallOption) (*datatransferpb.ScheduleTransferRunsResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ScheduleTransferRuns[0:len(c.CallOptions.ScheduleTransferRuns):len(c.CallOptions.ScheduleTransferRuns)], opts...)
var resp *datatransferpb.ScheduleTransferRunsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ScheduleTransferRuns(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// GetTransferRun returns information about the particular transfer run.
func (c *Client) GetTransferRun(ctx context.Context, req *datatransferpb.GetTransferRunRequest, opts ...gax.CallOption) (*datatransferpb.TransferRun, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetTransferRun[0:len(c.CallOptions.GetTransferRun):len(c.CallOptions.GetTransferRun)], opts...)
var resp *datatransferpb.TransferRun
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetTransferRun(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeleteTransferRun deletes the specified transfer run.
func (c *Client) DeleteTransferRun(ctx context.Context, req *datatransferpb.DeleteTransferRunRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteTransferRun[0:len(c.CallOptions.DeleteTransferRun):len(c.CallOptions.DeleteTransferRun)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.DeleteTransferRun(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// ListTransferRuns returns information about running and completed jobs.
func (c *Client) ListTransferRuns(ctx context.Context, req *datatransferpb.ListTransferRunsRequest, opts ...gax.CallOption) *TransferRunIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListTransferRuns[0:len(c.CallOptions.ListTransferRuns):len(c.CallOptions.ListTransferRuns)], opts...)
it := &TransferRunIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferRun, string, error) {
var resp *datatransferpb.ListTransferRunsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListTransferRuns(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.TransferRuns, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// ListTransferLogs returns user facing log messages for the data transfer run.
func (c *Client) ListTransferLogs(ctx context.Context, req *datatransferpb.ListTransferLogsRequest, opts ...gax.CallOption) *TransferMessageIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListTransferLogs[0:len(c.CallOptions.ListTransferLogs):len(c.CallOptions.ListTransferLogs)], opts...)
it := &TransferMessageIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferMessage, string, error) {
var resp *datatransferpb.ListTransferLogsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListTransferLogs(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.TransferMessages, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// CheckValidCreds returns true if valid credentials exist for the given data source and
// requesting user.
// Some data sources doesn't support service account, so we need to talk to
// them on behalf of the end user. This API just checks whether we have OAuth
// token for the particular user, which is a pre-requisite before user can
// create a transfer config.
func (c *Client) CheckValidCreds(ctx context.Context, req *datatransferpb.CheckValidCredsRequest, opts ...gax.CallOption) (*datatransferpb.CheckValidCredsResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CheckValidCreds[0:len(c.CallOptions.CheckValidCreds):len(c.CallOptions.CheckValidCreds)], opts...)
var resp *datatransferpb.CheckValidCredsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.CheckValidCreds(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DataSourceIterator manages a stream of *datatransferpb.DataSource.
type DataSourceIterator struct {
items []*datatransferpb.DataSource
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.DataSource, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *DataSourceIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *DataSourceIterator) Next() (*datatransferpb.DataSource, error) {
var item *datatransferpb.DataSource
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *DataSourceIterator) bufLen() int {
return len(it.items)
}
func (it *DataSourceIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
// TransferConfigIterator manages a stream of *datatransferpb.TransferConfig.
type TransferConfigIterator struct {
items []*datatransferpb.TransferConfig
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferConfig, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TransferConfigIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *TransferConfigIterator) Next() (*datatransferpb.TransferConfig, error) {
var item *datatransferpb.TransferConfig
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *TransferConfigIterator) bufLen() int {
return len(it.items)
}
func (it *TransferConfigIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
// TransferMessageIterator manages a stream of *datatransferpb.TransferMessage.
type TransferMessageIterator struct {
items []*datatransferpb.TransferMessage
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferMessage, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TransferMessageIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *TransferMessageIterator) Next() (*datatransferpb.TransferMessage, error) {
var item *datatransferpb.TransferMessage
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *TransferMessageIterator) bufLen() int {
return len(it.items)
}
func (it *TransferMessageIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
// TransferRunIterator manages a stream of *datatransferpb.TransferRun.
type TransferRunIterator struct {
items []*datatransferpb.TransferRun
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferRun, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TransferRunIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *TransferRunIterator) Next() (*datatransferpb.TransferRun, error) {
var item *datatransferpb.TransferRun
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *TransferRunIterator) bufLen() int {
return len(it.items)
}
func (it *TransferRunIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}

View File

@@ -0,0 +1,288 @@
// Copyright 2017, Google LLC All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package datatransfer_test
import (
"cloud.google.com/go/bigquery/datatransfer/apiv1"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
)
func ExampleNewClient() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}
func ExampleClient_GetDataSource() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.GetDataSourceRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetDataSource(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_ListDataSources() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.ListDataSourcesRequest{
// TODO: Fill request struct fields.
}
it := c.ListDataSources(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleClient_CreateTransferConfig() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.CreateTransferConfigRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CreateTransferConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_UpdateTransferConfig() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.UpdateTransferConfigRequest{
// TODO: Fill request struct fields.
}
resp, err := c.UpdateTransferConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_DeleteTransferConfig() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.DeleteTransferConfigRequest{
// TODO: Fill request struct fields.
}
err = c.DeleteTransferConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleClient_GetTransferConfig() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.GetTransferConfigRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetTransferConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_ListTransferConfigs() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.ListTransferConfigsRequest{
// TODO: Fill request struct fields.
}
it := c.ListTransferConfigs(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleClient_ScheduleTransferRuns() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.ScheduleTransferRunsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.ScheduleTransferRuns(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_GetTransferRun() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.GetTransferRunRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetTransferRun(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_DeleteTransferRun() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.DeleteTransferRunRequest{
// TODO: Fill request struct fields.
}
err = c.DeleteTransferRun(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleClient_ListTransferRuns() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.ListTransferRunsRequest{
// TODO: Fill request struct fields.
}
it := c.ListTransferRuns(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleClient_ListTransferLogs() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.ListTransferLogsRequest{
// TODO: Fill request struct fields.
}
it := c.ListTransferLogs(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleClient_CheckValidCreds() {
ctx := context.Background()
c, err := datatransfer.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &datatransferpb.CheckValidCredsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CheckValidCreds(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

View File

@@ -0,0 +1,49 @@
// Copyright 2017, Google LLC All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
// Package datatransfer is an auto-generated package for the
// BigQuery Data Transfer API.
//
// NOTE: This package is in alpha. It is not stable, and is likely to change.
//
// Transfers data from partner SaaS applications to Google BigQuery on a
// scheduled, managed basis.
package datatransfer // import "cloud.google.com/go/bigquery/datatransfer/apiv1"
import (
"golang.org/x/net/context"
"google.golang.org/grpc/metadata"
)
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -124,7 +124,7 @@ These methods create references to datasets, not the datasets themselves. You ca
a dataset reference even if the dataset doesn't exist yet. Use Dataset.Create to a dataset reference even if the dataset doesn't exist yet. Use Dataset.Create to
create a dataset from a reference: create a dataset from a reference:
if err := myDataset.Create(ctx); err != nil { if err := myDataset.Create(ctx, nil); err != nil {
// TODO: Handle error. // TODO: Handle error.
} }
@@ -134,9 +134,10 @@ to an object in BigQuery that may or may not exist.
table := myDataset.Table("my_table") table := myDataset.Table("my_table")
You can create, delete and update the metadata of tables with methods on Table. You can create, delete and update the metadata of tables with methods on Table.
Table.Create supports a few options. For instance, you could create a temporary table with: For instance, you could create a temporary table with:
err = myDataset.Table("temp").Create(ctx, bigquery.TableExpiration(time.Now().Add(1*time.Hour))) err = myDataset.Table("temp").Create(ctx, &bigquery.TableMetadata{
ExpirationTime: time.Now().Add(1*time.Hour)})
if err != nil { if err != nil {
// TODO: Handle error. // TODO: Handle error.
} }
@@ -166,22 +167,23 @@ Or you can infer the schema from a struct:
// schema1 and schema2 are identical. // schema1 and schema2 are identical.
Struct inference supports tags like those of the encoding/json package, Struct inference supports tags like those of the encoding/json package,
so you can change names or ignore fields: so you can change names, ignore fields, or mark a field as nullable (non-required):
type student2 struct { type student2 struct {
Name string `bigquery:"full_name"` Name string `bigquery:"full_name"`
Grades []int Grades []int
Secret string `bigquery:"-"` Secret string `bigquery:"-"`
Optional int `bigquery:",nullable"
} }
schema3, err := bigquery.InferSchema(student2{}) schema3, err := bigquery.InferSchema(student2{})
if err != nil { if err != nil {
// TODO: Handle error. // TODO: Handle error.
} }
// schema3 has fields "full_name" and "Grade". // schema3 has required fields "full_name", "Grade" and nullable field "Optional".
Having constructed a schema, you can pass it to Table.Create as an option: Having constructed a schema, you can create a table with it like so:
if err := table.Create(ctx, schema1); err != nil { if err := table.Create(ctx, &bigquery.TableMetadata{Schema: schema1}); err != nil {
// TODO: Handle error. // TODO: Handle error.
} }

View File

@@ -30,7 +30,7 @@ func (e Error) Error() string {
return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason) return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason)
} }
func errorFromErrorProto(ep *bq.ErrorProto) *Error { func bqToError(ep *bq.ErrorProto) *Error {
if ep == nil { if ep == nil {
return nil return nil
} }

View File

@@ -95,7 +95,7 @@ func TestErrorFromErrorProto(t *testing.T) {
want: &Error{Location: "L", Message: "M", Reason: "R"}, want: &Error{Location: "L", Message: "M", Reason: "R"},
}, },
} { } {
if got := errorFromErrorProto(test.in); !testutil.Equal(got, test.want) { if got := bqToError(test.in); !testutil.Equal(got, test.want) {
t.Errorf("%v: got %v, want %v", test.in, got, test.want) t.Errorf("%v: got %v, want %v", test.in, got, test.want)
} }
} }

View File

@@ -86,7 +86,18 @@ func ExampleClient_JobFromID() {
if err != nil { if err != nil {
// TODO: Handle error. // TODO: Handle error.
} }
fmt.Println(job) fmt.Println(job.LastStatus()) // Display the job's status.
}
func ExampleClient_Jobs() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.Jobs(ctx)
it.State = bigquery.Running // list only running jobs.
_ = it // TODO: iterate using Next or iterator.Pager.
} }
func ExampleNewGCSReference() { func ExampleNewGCSReference() {
@@ -228,6 +239,25 @@ func ExampleJob_Wait() {
} }
} }
func ExampleJob_Config() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx)
if err != nil {
// TODO: Handle error.
}
jc, err := job.Config()
if err != nil {
// TODO: Handle error.
}
copyConfig := jc.(*bigquery.CopyConfig)
fmt.Println(copyConfig.Dst, copyConfig.CreateDisposition)
}
func ExampleDataset_Create() { func ExampleDataset_Create() {
ctx := context.Background() ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id") client, err := bigquery.NewClient(ctx, "project-id")
@@ -365,10 +395,11 @@ func ExampleInferSchema() {
func ExampleInferSchema_tags() { func ExampleInferSchema_tags() {
type Item struct { type Item struct {
Name string Name string
Size float64 Size float64
Count int `bigquery:"number"` Count int `bigquery:"number"`
Secret []byte `bigquery:"-"` Secret []byte `bigquery:"-"`
Optional bool `bigquery:",nullable"`
} }
schema, err := bigquery.InferSchema(Item{}) schema, err := bigquery.InferSchema(Item{})
if err != nil { if err != nil {
@@ -376,12 +407,13 @@ func ExampleInferSchema_tags() {
// TODO: Handle error. // TODO: Handle error.
} }
for _, fs := range schema { for _, fs := range schema {
fmt.Println(fs.Name, fs.Type) fmt.Println(fs.Name, fs.Type, fs.Required)
} }
// Output: // Output:
// Name STRING // Name STRING true
// Size FLOAT // Size FLOAT true
// number INTEGER // number INTEGER true
// Optional BOOLEAN false
} }
func ExampleTable_Create() { func ExampleTable_Create() {

398
vendor/cloud.google.com/go/bigquery/external.go generated vendored Normal file
View File

@@ -0,0 +1,398 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"encoding/base64"
"unicode/utf8"
bq "google.golang.org/api/bigquery/v2"
)
// DataFormat describes the format of BigQuery table data.
type DataFormat string
// Constants describing the format of BigQuery table data.
const (
CSV DataFormat = "CSV"
Avro DataFormat = "AVRO"
JSON DataFormat = "NEWLINE_DELIMITED_JSON"
DatastoreBackup DataFormat = "DATASTORE_BACKUP"
GoogleSheets DataFormat = "GOOGLE_SHEETS"
Bigtable DataFormat = "BIGTABLE"
)
// ExternalData is a table which is stored outside of BigQuery. It is implemented by
// *ExternalDataConfig.
// GCSReference also implements it, for backwards compatibility.
type ExternalData interface {
toBQ() bq.ExternalDataConfiguration
}
// ExternalDataConfig describes data external to BigQuery that can be used
// in queries and to create external tables.
type ExternalDataConfig struct {
// The format of the data. Required.
SourceFormat DataFormat
// The fully-qualified URIs that point to your
// data in Google Cloud. Required.
//
// For Google Cloud Storage URIs, each URI can contain one '*' wildcard character
// and it must come after the 'bucket' name. Size limits related to load jobs
// apply to external data sources.
//
// For Google Cloud Bigtable URIs, exactly one URI can be specified and it has be
// a fully specified and valid HTTPS URL for a Google Cloud Bigtable table.
//
// For Google Cloud Datastore backups, exactly one URI can be specified. Also,
// the '*' wildcard character is not allowed.
SourceURIs []string
// The schema of the data. Required for CSV and JSON; disallowed for the
// other formats.
Schema Schema
// Try to detect schema and format options automatically.
// Any option specified explicitly will be honored.
AutoDetect bool
// The compression type of the data.
Compression Compression
// IgnoreUnknownValues causes values not matching the schema to be
// tolerated. Unknown values are ignored. For CSV this ignores extra values
// at the end of a line. For JSON this ignores named values that do not
// match any column name. If this field is not set, records containing
// unknown values are treated as bad records. The MaxBadRecords field can
// be used to customize how bad records are handled.
IgnoreUnknownValues bool
// MaxBadRecords is the maximum number of bad records that will be ignored
// when reading data.
MaxBadRecords int64
// Additional options for CSV, GoogleSheets and Bigtable formats.
Options ExternalDataConfigOptions
}
func (e *ExternalDataConfig) toBQ() bq.ExternalDataConfiguration {
q := bq.ExternalDataConfiguration{
SourceFormat: string(e.SourceFormat),
SourceUris: e.SourceURIs,
Autodetect: e.AutoDetect,
Compression: string(e.Compression),
IgnoreUnknownValues: e.IgnoreUnknownValues,
MaxBadRecords: e.MaxBadRecords,
}
if e.Schema != nil {
q.Schema = e.Schema.toBQ()
}
if e.Options != nil {
e.Options.populateExternalDataConfig(&q)
}
return q
}
func bqToExternalDataConfig(q *bq.ExternalDataConfiguration) (*ExternalDataConfig, error) {
e := &ExternalDataConfig{
SourceFormat: DataFormat(q.SourceFormat),
SourceURIs: q.SourceUris,
AutoDetect: q.Autodetect,
Compression: Compression(q.Compression),
IgnoreUnknownValues: q.IgnoreUnknownValues,
MaxBadRecords: q.MaxBadRecords,
Schema: bqToSchema(q.Schema),
}
switch {
case q.CsvOptions != nil:
e.Options = bqToCSVOptions(q.CsvOptions)
case q.GoogleSheetsOptions != nil:
e.Options = bqToGoogleSheetsOptions(q.GoogleSheetsOptions)
case q.BigtableOptions != nil:
var err error
e.Options, err = bqToBigtableOptions(q.BigtableOptions)
if err != nil {
return nil, err
}
}
return e, nil
}
// ExternalDataConfigOptions are additional options for external data configurations.
// This interface is implemented by CSVOptions, GoogleSheetsOptions and BigtableOptions.
type ExternalDataConfigOptions interface {
populateExternalDataConfig(*bq.ExternalDataConfiguration)
}
// CSVOptions are additional options for CSV external data sources.
type CSVOptions struct {
// AllowJaggedRows causes missing trailing optional columns to be tolerated
// when reading CSV data. Missing values are treated as nulls.
AllowJaggedRows bool
// AllowQuotedNewlines sets whether quoted data sections containing
// newlines are allowed when reading CSV data.
AllowQuotedNewlines bool
// Encoding is the character encoding of data to be read.
Encoding Encoding
// FieldDelimiter is the separator for fields in a CSV file, used when
// reading or exporting data. The default is ",".
FieldDelimiter string
// Quote is the value used to quote data sections in a CSV file. The
// default quotation character is the double quote ("), which is used if
// both Quote and ForceZeroQuote are unset.
// To specify that no character should be interpreted as a quotation
// character, set ForceZeroQuote to true.
// Only used when reading data.
Quote string
ForceZeroQuote bool
// The number of rows at the top of a CSV file that BigQuery will skip when
// reading data.
SkipLeadingRows int64
}
func (o *CSVOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
c.CsvOptions = &bq.CsvOptions{
AllowJaggedRows: o.AllowJaggedRows,
AllowQuotedNewlines: o.AllowQuotedNewlines,
Encoding: string(o.Encoding),
FieldDelimiter: o.FieldDelimiter,
Quote: o.quote(),
SkipLeadingRows: o.SkipLeadingRows,
}
}
// quote returns the CSV quote character, or nil if unset.
func (o *CSVOptions) quote() *string {
if o.ForceZeroQuote {
quote := ""
return &quote
}
if o.Quote == "" {
return nil
}
return &o.Quote
}
func (o *CSVOptions) setQuote(ps *string) {
if ps != nil {
o.Quote = *ps
if o.Quote == "" {
o.ForceZeroQuote = true
}
}
}
func bqToCSVOptions(q *bq.CsvOptions) *CSVOptions {
o := &CSVOptions{
AllowJaggedRows: q.AllowJaggedRows,
AllowQuotedNewlines: q.AllowQuotedNewlines,
Encoding: Encoding(q.Encoding),
FieldDelimiter: q.FieldDelimiter,
SkipLeadingRows: q.SkipLeadingRows,
}
o.setQuote(q.Quote)
return o
}
// GoogleSheetsOptions are additional options for GoogleSheets external data sources.
type GoogleSheetsOptions struct {
// The number of rows at the top of a sheet that BigQuery will skip when
// reading data.
SkipLeadingRows int64
}
func (o *GoogleSheetsOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
c.GoogleSheetsOptions = &bq.GoogleSheetsOptions{
SkipLeadingRows: o.SkipLeadingRows,
}
}
func bqToGoogleSheetsOptions(q *bq.GoogleSheetsOptions) *GoogleSheetsOptions {
return &GoogleSheetsOptions{
SkipLeadingRows: q.SkipLeadingRows,
}
}
// BigtableOptions are additional options for Bigtable external data sources.
type BigtableOptions struct {
// A list of column families to expose in the table schema along with their
// types. If omitted, all column families are present in the table schema and
// their values are read as BYTES.
ColumnFamilies []*BigtableColumnFamily
// If true, then the column families that are not specified in columnFamilies
// list are not exposed in the table schema. Otherwise, they are read with BYTES
// type values. The default is false.
IgnoreUnspecifiedColumnFamilies bool
// If true, then the rowkey column families will be read and converted to string.
// Otherwise they are read with BYTES type values and users need to manually cast
// them with CAST if necessary. The default is false.
ReadRowkeyAsString bool
}
func (o *BigtableOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
q := &bq.BigtableOptions{
IgnoreUnspecifiedColumnFamilies: o.IgnoreUnspecifiedColumnFamilies,
ReadRowkeyAsString: o.ReadRowkeyAsString,
}
for _, f := range o.ColumnFamilies {
q.ColumnFamilies = append(q.ColumnFamilies, f.toBQ())
}
c.BigtableOptions = q
}
func bqToBigtableOptions(q *bq.BigtableOptions) (*BigtableOptions, error) {
b := &BigtableOptions{
IgnoreUnspecifiedColumnFamilies: q.IgnoreUnspecifiedColumnFamilies,
ReadRowkeyAsString: q.ReadRowkeyAsString,
}
for _, f := range q.ColumnFamilies {
f2, err := bqToBigtableColumnFamily(f)
if err != nil {
return nil, err
}
b.ColumnFamilies = append(b.ColumnFamilies, f2)
}
return b, nil
}
// BigtableColumnFamily describes how BigQuery should access a Bigtable column family.
type BigtableColumnFamily struct {
// Identifier of the column family.
FamilyID string
// Lists of columns that should be exposed as individual fields as opposed to a
// list of (column name, value) pairs. All columns whose qualifier matches a
// qualifier in this list can be accessed as .. Other columns can be accessed as
// a list through .Column field.
Columns []*BigtableColumn
// The encoding of the values when the type is not STRING. Acceptable encoding values are:
// - TEXT - indicates values are alphanumeric text strings.
// - BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions.
// This can be overridden for a specific column by listing that column in 'columns' and
// specifying an encoding for it.
Encoding string
// If true, only the latest version of values are exposed for all columns in this
// column family. This can be overridden for a specific column by listing that
// column in 'columns' and specifying a different setting for that column.
OnlyReadLatest bool
// The type to convert the value in cells of this
// column family. The values are expected to be encoded using HBase
// Bytes.toBytes function when using the BINARY encoding value.
// Following BigQuery types are allowed (case-sensitive):
// BYTES STRING INTEGER FLOAT BOOLEAN.
// The default type is BYTES. This can be overridden for a specific column by
// listing that column in 'columns' and specifying a type for it.
Type string
}
func (b *BigtableColumnFamily) toBQ() *bq.BigtableColumnFamily {
q := &bq.BigtableColumnFamily{
FamilyId: b.FamilyID,
Encoding: b.Encoding,
OnlyReadLatest: b.OnlyReadLatest,
Type: b.Type,
}
for _, col := range b.Columns {
q.Columns = append(q.Columns, col.toBQ())
}
return q
}
func bqToBigtableColumnFamily(q *bq.BigtableColumnFamily) (*BigtableColumnFamily, error) {
b := &BigtableColumnFamily{
FamilyID: q.FamilyId,
Encoding: q.Encoding,
OnlyReadLatest: q.OnlyReadLatest,
Type: q.Type,
}
for _, col := range q.Columns {
c, err := bqToBigtableColumn(col)
if err != nil {
return nil, err
}
b.Columns = append(b.Columns, c)
}
return b, nil
}
// BigtableColumn describes how BigQuery should access a Bigtable column.
type BigtableColumn struct {
// Qualifier of the column. Columns in the parent column family that have this
// exact qualifier are exposed as . field. The column field name is the
// same as the column qualifier.
Qualifier string
// If the qualifier is not a valid BigQuery field identifier i.e. does not match
// [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field
// name and is used as field name in queries.
FieldName string
// If true, only the latest version of values are exposed for this column.
// See BigtableColumnFamily.OnlyReadLatest.
OnlyReadLatest bool
// The encoding of the values when the type is not STRING.
// See BigtableColumnFamily.Encoding
Encoding string
// The type to convert the value in cells of this column.
// See BigtableColumnFamily.Type
Type string
}
func (b *BigtableColumn) toBQ() *bq.BigtableColumn {
q := &bq.BigtableColumn{
FieldName: b.FieldName,
OnlyReadLatest: b.OnlyReadLatest,
Encoding: b.Encoding,
Type: b.Type,
}
if utf8.ValidString(b.Qualifier) {
q.QualifierString = b.Qualifier
} else {
q.QualifierEncoded = base64.RawStdEncoding.EncodeToString([]byte(b.Qualifier))
}
return q
}
func bqToBigtableColumn(q *bq.BigtableColumn) (*BigtableColumn, error) {
b := &BigtableColumn{
FieldName: q.FieldName,
OnlyReadLatest: q.OnlyReadLatest,
Encoding: q.Encoding,
Type: q.Type,
}
if q.QualifierString != "" {
b.Qualifier = q.QualifierString
} else {
bytes, err := base64.RawStdEncoding.DecodeString(q.QualifierEncoded)
if err != nil {
return nil, err
}
b.Qualifier = string(bytes)
}
return b, nil
}

143
vendor/cloud.google.com/go/bigquery/external_test.go generated vendored Normal file
View File

@@ -0,0 +1,143 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"testing"
"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
)
func TestExternalDataConfig(t *testing.T) {
// Round-trip of ExternalDataConfig to underlying representation.
for i, want := range []*ExternalDataConfig{
{
SourceFormat: CSV,
SourceURIs: []string{"uri"},
Schema: Schema{{Name: "n", Type: IntegerFieldType}},
AutoDetect: true,
Compression: Gzip,
IgnoreUnknownValues: true,
MaxBadRecords: 17,
Options: &CSVOptions{
AllowJaggedRows: true,
AllowQuotedNewlines: true,
Encoding: UTF_8,
FieldDelimiter: "f",
Quote: "q",
SkipLeadingRows: 3,
},
},
{
SourceFormat: GoogleSheets,
Options: &GoogleSheetsOptions{SkipLeadingRows: 4},
},
{
SourceFormat: Bigtable,
Options: &BigtableOptions{
IgnoreUnspecifiedColumnFamilies: true,
ReadRowkeyAsString: true,
ColumnFamilies: []*BigtableColumnFamily{
{
FamilyID: "f1",
Encoding: "TEXT",
OnlyReadLatest: true,
Type: "FLOAT",
Columns: []*BigtableColumn{
{
Qualifier: "valid-utf-8",
FieldName: "fn",
OnlyReadLatest: true,
Encoding: "BINARY",
Type: "STRING",
},
},
},
},
},
},
} {
q := want.toBQ()
got, err := bqToExternalDataConfig(&q)
if err != nil {
t.Fatal(err)
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("#%d: got=-, want=+:\n%s", i, diff)
}
}
}
func TestQuote(t *testing.T) {
ptr := func(s string) *string { return &s }
for _, test := range []struct {
quote string
force bool
want *string
}{
{"", false, nil},
{"", true, ptr("")},
{"-", false, ptr("-")},
{"-", true, ptr("")},
} {
o := CSVOptions{
Quote: test.quote,
ForceZeroQuote: test.force,
}
got := o.quote()
if (got == nil) != (test.want == nil) {
t.Errorf("%+v\ngot %v\nwant %v", test, pretty.Value(got), pretty.Value(test.want))
}
if got != nil && test.want != nil && *got != *test.want {
t.Errorf("%+v: got %q, want %q", test, *got, *test.want)
}
}
}
func TestQualifier(t *testing.T) {
b := BigtableColumn{Qualifier: "a"}
q := b.toBQ()
if q.QualifierString != b.Qualifier || q.QualifierEncoded != "" {
t.Errorf("got (%q, %q), want (%q, %q)",
q.QualifierString, q.QualifierEncoded, b.Qualifier, "")
}
b2, err := bqToBigtableColumn(q)
if err != nil {
t.Fatal(err)
}
if got, want := b2.Qualifier, b.Qualifier; got != want {
t.Errorf("got %q, want %q", got, want)
}
const (
invalidUTF8 = "\xDF\xFF"
invalidEncoded = "3/8"
)
b = BigtableColumn{Qualifier: invalidUTF8}
q = b.toBQ()
if q.QualifierString != "" || q.QualifierEncoded != invalidEncoded {
t.Errorf("got (%q, %q), want (%q, %q)",
q.QualifierString, "", b.Qualifier, invalidEncoded)
}
b2, err = bqToBigtableColumn(q)
if err != nil {
t.Fatal(err)
}
if got, want := b2.Qualifier, b.Qualifier; got != want {
t.Errorf("got %q, want %q", got, want)
}
}

View File

@@ -21,12 +21,6 @@ import (
// ExtractConfig holds the configuration for an extract job. // ExtractConfig holds the configuration for an extract job.
type ExtractConfig struct { type ExtractConfig struct {
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
JobID string
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
AddJobIDSuffix bool
// Src is the table from which data will be extracted. // Src is the table from which data will be extracted.
Src *Table Src *Table
@@ -35,10 +29,52 @@ type ExtractConfig struct {
// DisableHeader disables the printing of a header row in exported data. // DisableHeader disables the printing of a header row in exported data.
DisableHeader bool DisableHeader bool
// The labels associated with this job.
Labels map[string]string
}
func (e *ExtractConfig) toBQ() *bq.JobConfiguration {
var printHeader *bool
if e.DisableHeader {
f := false
printHeader = &f
}
return &bq.JobConfiguration{
Labels: e.Labels,
Extract: &bq.JobConfigurationExtract{
DestinationUris: append([]string{}, e.Dst.URIs...),
Compression: string(e.Dst.Compression),
DestinationFormat: string(e.Dst.DestinationFormat),
FieldDelimiter: e.Dst.FieldDelimiter,
SourceTable: e.Src.toBQ(),
PrintHeader: printHeader,
},
}
}
func bqToExtractConfig(q *bq.JobConfiguration, c *Client) *ExtractConfig {
qe := q.Extract
return &ExtractConfig{
Labels: q.Labels,
Dst: &GCSReference{
URIs: qe.DestinationUris,
Compression: Compression(qe.Compression),
DestinationFormat: DataFormat(qe.DestinationFormat),
FileConfig: FileConfig{
CSVOptions: CSVOptions{
FieldDelimiter: qe.FieldDelimiter,
},
},
},
DisableHeader: qe.PrintHeader != nil && !*qe.PrintHeader,
Src: bqToTable(qe.SourceTable, c),
}
} }
// An Extractor extracts data from a BigQuery table into Google Cloud Storage. // An Extractor extracts data from a BigQuery table into Google Cloud Storage.
type Extractor struct { type Extractor struct {
JobIDConfig
ExtractConfig ExtractConfig
c *Client c *Client
} }
@@ -58,23 +94,12 @@ func (t *Table) ExtractorTo(dst *GCSReference) *Extractor {
// Run initiates an extract job. // Run initiates an extract job.
func (e *Extractor) Run(ctx context.Context) (*Job, error) { func (e *Extractor) Run(ctx context.Context) (*Job, error) {
var printHeader *bool return e.c.insertJob(ctx, e.newJob(), nil)
if e.DisableHeader { }
f := false
printHeader = &f func (e *Extractor) newJob() *bq.Job {
} return &bq.Job{
job := &bq.Job{ JobReference: e.JobIDConfig.createJobRef(e.c.projectID),
JobReference: createJobRef(e.JobID, e.AddJobIDSuffix, e.c.projectID), Configuration: e.ExtractConfig.toBQ(),
Configuration: &bq.JobConfiguration{ }
Extract: &bq.JobConfigurationExtract{
DestinationUris: append([]string{}, e.Dst.uris...),
Compression: string(e.Dst.Compression),
DestinationFormat: string(e.Dst.DestinationFormat),
FieldDelimiter: e.Dst.FieldDelimiter,
SourceTable: e.Src.tableRefProto(),
PrintHeader: printHeader,
},
},
}
return e.c.insertJob(ctx, &insertJobConf{job: job})
} }

View File

@@ -17,7 +17,9 @@ package bigquery
import ( import (
"testing" "testing"
"golang.org/x/net/context" "github.com/google/go-cmp/cmp"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
) )
@@ -38,11 +40,15 @@ func defaultExtractJob() *bq.Job {
} }
} }
func defaultGCS() *GCSReference {
return &GCSReference{
URIs: []string{"uri"},
}
}
func TestExtract(t *testing.T) { func TestExtract(t *testing.T) {
defer fixRandomJobID("RANDOM")() defer fixRandomID("RANDOM")()
s := &testService{}
c := &Client{ c := &Client{
service: s,
projectID: "client-project-id", projectID: "client-project-id",
} }
@@ -58,11 +64,15 @@ func TestExtract(t *testing.T) {
want: defaultExtractJob(), want: defaultExtractJob(),
}, },
{ {
dst: defaultGCS(), dst: defaultGCS(),
src: c.Dataset("dataset-id").Table("table-id"), src: c.Dataset("dataset-id").Table("table-id"),
config: ExtractConfig{DisableHeader: true}, config: ExtractConfig{
DisableHeader: true,
Labels: map[string]string{"a": "b"},
},
want: func() *bq.Job { want: func() *bq.Job {
j := defaultExtractJob() j := defaultExtractJob()
j.Configuration.Labels = map[string]string{"a": "b"}
f := false f := false
j.Configuration.Extract.PrintHeader = &f j.Configuration.Extract.PrintHeader = &f
return j return j
@@ -92,10 +102,17 @@ func TestExtract(t *testing.T) {
tc.config.Src = ext.Src tc.config.Src = ext.Src
tc.config.Dst = ext.Dst tc.config.Dst = ext.Dst
ext.ExtractConfig = tc.config ext.ExtractConfig = tc.config
if _, err := ext.Run(context.Background()); err != nil { got := ext.newJob()
t.Errorf("#%d: err calling extract: %v", i, err) checkJob(t, i, got, tc.want)
continue
jc, err := bqToJobConfig(got.Configuration, c)
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
diff := testutil.Diff(jc, &ext.ExtractConfig,
cmp.AllowUnexported(Table{}, Client{}))
if diff != "" {
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
} }
checkJob(t, i, s.Job, tc.want)
} }
} }

View File

@@ -22,6 +22,10 @@ import (
// A ReaderSource is a source for a load operation that gets // A ReaderSource is a source for a load operation that gets
// data from an io.Reader. // data from an io.Reader.
//
// When a ReaderSource is part of a LoadConfig obtained via Job.Config,
// its internal io.Reader will be nil, so it cannot be used for a
// subsequent load operation.
type ReaderSource struct { type ReaderSource struct {
r io.Reader r io.Reader
FileConfig FileConfig
@@ -34,9 +38,9 @@ func NewReaderSource(r io.Reader) *ReaderSource {
return &ReaderSource{r: r} return &ReaderSource{r: r}
} }
func (r *ReaderSource) populateInsertJobConfForLoad(conf *insertJobConf) { func (r *ReaderSource) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader {
conf.media = r.r r.FileConfig.populateLoadConfig(lc)
r.FileConfig.populateLoadConfig(conf.job.Configuration.Load) return r.r
} }
// FileConfig contains configuration options that pertain to files, typically // FileConfig contains configuration options that pertain to files, typically
@@ -48,29 +52,10 @@ type FileConfig struct {
// Allowed values are: CSV, Avro, JSON, DatastoreBackup. The default is CSV. // Allowed values are: CSV, Avro, JSON, DatastoreBackup. The default is CSV.
SourceFormat DataFormat SourceFormat DataFormat
// FieldDelimiter is the separator for fields in a CSV file, used when
// reading or exporting data. The default is ",".
FieldDelimiter string
// The number of rows at the top of a CSV file that BigQuery will skip when
// reading data.
SkipLeadingRows int64
// AllowJaggedRows causes missing trailing optional columns to be tolerated
// when reading CSV data. Missing values are treated as nulls.
AllowJaggedRows bool
// AllowQuotedNewlines sets whether quoted data sections containing
// newlines are allowed when reading CSV data.
AllowQuotedNewlines bool
// Indicates if we should automatically infer the options and // Indicates if we should automatically infer the options and
// schema for CSV and JSON sources. // schema for CSV and JSON sources.
AutoDetect bool AutoDetect bool
// Encoding is the character encoding of data to be read.
Encoding Encoding
// MaxBadRecords is the maximum number of bad records that will be ignored // MaxBadRecords is the maximum number of bad records that will be ignored
// when reading data. // when reading data.
MaxBadRecords int64 MaxBadRecords int64
@@ -87,26 +72,8 @@ type FileConfig struct {
// unless the data is being loaded into a table that already exists. // unless the data is being loaded into a table that already exists.
Schema Schema Schema Schema
// Quote is the value used to quote data sections in a CSV file. The // Additional options for CSV files.
// default quotation character is the double quote ("), which is used if CSVOptions
// both Quote and ForceZeroQuote are unset.
// To specify that no character should be interpreted as a quotation
// character, set ForceZeroQuote to true.
// Only used when reading data.
Quote string
ForceZeroQuote bool
}
// quote returns the CSV quote character, or nil if unset.
func (fc *FileConfig) quote() *string {
if fc.ForceZeroQuote {
quote := ""
return &quote
}
if fc.Quote == "" {
return nil
}
return &fc.Quote
} }
func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) { func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) {
@@ -120,47 +87,43 @@ func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) {
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
conf.MaxBadRecords = fc.MaxBadRecords conf.MaxBadRecords = fc.MaxBadRecords
if fc.Schema != nil { if fc.Schema != nil {
conf.Schema = fc.Schema.asTableSchema() conf.Schema = fc.Schema.toBQ()
} }
conf.Quote = fc.quote() conf.Quote = fc.quote()
} }
func bqPopulateFileConfig(conf *bq.JobConfigurationLoad, fc *FileConfig) {
fc.SourceFormat = DataFormat(conf.SourceFormat)
fc.AutoDetect = conf.Autodetect
fc.MaxBadRecords = conf.MaxBadRecords
fc.IgnoreUnknownValues = conf.IgnoreUnknownValues
fc.Schema = bqToSchema(conf.Schema)
fc.SkipLeadingRows = conf.SkipLeadingRows
fc.AllowJaggedRows = conf.AllowJaggedRows
fc.AllowQuotedNewlines = conf.AllowQuotedNewlines
fc.Encoding = Encoding(conf.Encoding)
fc.FieldDelimiter = conf.FieldDelimiter
fc.CSVOptions.setQuote(conf.Quote)
}
func (fc *FileConfig) populateExternalDataConfig(conf *bq.ExternalDataConfiguration) { func (fc *FileConfig) populateExternalDataConfig(conf *bq.ExternalDataConfiguration) {
format := fc.SourceFormat format := fc.SourceFormat
if format == "" { if format == "" {
// Format must be explicitly set for external data sources. // Format must be explicitly set for external data sources.
format = CSV format = CSV
} }
// TODO(jba): support AutoDetect. conf.Autodetect = fc.AutoDetect
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
conf.MaxBadRecords = fc.MaxBadRecords conf.MaxBadRecords = fc.MaxBadRecords
conf.SourceFormat = string(format) conf.SourceFormat = string(format)
if fc.Schema != nil { if fc.Schema != nil {
conf.Schema = fc.Schema.asTableSchema() conf.Schema = fc.Schema.toBQ()
} }
if format == CSV { if format == CSV {
conf.CsvOptions = &bq.CsvOptions{ fc.CSVOptions.populateExternalDataConfig(conf)
AllowJaggedRows: fc.AllowJaggedRows,
AllowQuotedNewlines: fc.AllowQuotedNewlines,
Encoding: string(fc.Encoding),
FieldDelimiter: fc.FieldDelimiter,
SkipLeadingRows: fc.SkipLeadingRows,
Quote: fc.quote(),
}
} }
} }
// DataFormat describes the format of BigQuery table data.
type DataFormat string
// Constants describing the format of BigQuery table data.
const (
CSV DataFormat = "CSV"
Avro DataFormat = "AVRO"
JSON DataFormat = "NEWLINE_DELIMITED_JSON"
DatastoreBackup DataFormat = "DATASTORE_BACKUP"
)
// Encoding specifies the character encoding of data to be loaded into BigQuery. // Encoding specifies the character encoding of data to be loaded into BigQuery.
// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding // See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding
// for more details about how this is used. // for more details about how this is used.

View File

@@ -22,56 +22,36 @@ import (
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
) )
func TestQuote(t *testing.T) { var (
ptr := func(s string) *string { return &s } hyphen = "-"
fc = FileConfig{
for _, test := range []struct {
quote string
force bool
want *string
}{
{"", false, nil},
{"", true, ptr("")},
{"-", false, ptr("-")},
{"-", true, ptr("")},
} {
fc := FileConfig{
Quote: test.quote,
ForceZeroQuote: test.force,
}
got := fc.quote()
if (got == nil) != (test.want == nil) {
t.Errorf("%+v\ngot %v\nwant %v", test, pretty.Value(got), pretty.Value(test.want))
}
if got != nil && test.want != nil && *got != *test.want {
t.Errorf("%+v: got %q, want %q", test, *got, *test.want)
}
}
}
func TestPopulateLoadConfig(t *testing.T) {
hyphen := "-"
fc := FileConfig{
SourceFormat: CSV, SourceFormat: CSV,
FieldDelimiter: "\t", AutoDetect: true,
SkipLeadingRows: 8,
AllowJaggedRows: true,
AllowQuotedNewlines: true,
Encoding: UTF_8,
MaxBadRecords: 7, MaxBadRecords: 7,
IgnoreUnknownValues: true, IgnoreUnknownValues: true,
Schema: Schema{ Schema: Schema{
stringFieldSchema(), stringFieldSchema(),
nestedFieldSchema(), nestedFieldSchema(),
}, },
Quote: hyphen, CSVOptions: CSVOptions{
Quote: hyphen,
FieldDelimiter: "\t",
SkipLeadingRows: 8,
AllowJaggedRows: true,
AllowQuotedNewlines: true,
Encoding: UTF_8,
},
} }
)
func TestFileConfigPopulateLoadConfig(t *testing.T) {
want := &bq.JobConfigurationLoad{ want := &bq.JobConfigurationLoad{
SourceFormat: "CSV", SourceFormat: "CSV",
FieldDelimiter: "\t", FieldDelimiter: "\t",
SkipLeadingRows: 8, SkipLeadingRows: 8,
AllowJaggedRows: true, AllowJaggedRows: true,
AllowQuotedNewlines: true, AllowQuotedNewlines: true,
Autodetect: true,
Encoding: "UTF-8", Encoding: "UTF-8",
MaxBadRecords: 7, MaxBadRecords: 7,
IgnoreUnknownValues: true, IgnoreUnknownValues: true,
@@ -88,3 +68,31 @@ func TestPopulateLoadConfig(t *testing.T) {
t.Errorf("got:\n%v\nwant:\n%v", pretty.Value(got), pretty.Value(want)) t.Errorf("got:\n%v\nwant:\n%v", pretty.Value(got), pretty.Value(want))
} }
} }
func TestFileConfigPopulateExternalDataConfig(t *testing.T) {
got := &bq.ExternalDataConfiguration{}
fc.populateExternalDataConfig(got)
want := &bq.ExternalDataConfiguration{
SourceFormat: "CSV",
Autodetect: true,
MaxBadRecords: 7,
IgnoreUnknownValues: true,
Schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqStringFieldSchema(),
bqNestedFieldSchema(),
}},
CsvOptions: &bq.CsvOptions{
AllowJaggedRows: true,
AllowQuotedNewlines: true,
Encoding: "UTF-8",
FieldDelimiter: "\t",
Quote: &hyphen,
SkipLeadingRows: 8,
},
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("got=-, want=+:\n%s", diff)
}
}

View File

@@ -14,13 +14,17 @@
package bigquery package bigquery
import bq "google.golang.org/api/bigquery/v2" import (
"io"
bq "google.golang.org/api/bigquery/v2"
)
// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute // GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute
// an input or output to a BigQuery operation. // an input or output to a BigQuery operation.
type GCSReference struct { type GCSReference struct {
// TODO(jba): Export so that GCSReference can be used to hold data from a Job.get api call and expose it to the user. // URIs refer to Google Cloud Storage objects.
uris []string URIs []string
FileConfig FileConfig
@@ -42,7 +46,7 @@ type GCSReference struct {
// For more information about the treatment of wildcards and multiple URIs, // For more information about the treatment of wildcards and multiple URIs,
// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple // see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
func NewGCSReference(uri ...string) *GCSReference { func NewGCSReference(uri ...string) *GCSReference {
return &GCSReference{uris: uri} return &GCSReference{URIs: uri}
} }
// Compression is the type of compression to apply when writing data to Google Cloud Storage. // Compression is the type of compression to apply when writing data to Google Cloud Storage.
@@ -53,15 +57,16 @@ const (
Gzip Compression = "GZIP" Gzip Compression = "GZIP"
) )
func (gcs *GCSReference) populateInsertJobConfForLoad(conf *insertJobConf) { func (gcs *GCSReference) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader {
conf.job.Configuration.Load.SourceUris = gcs.uris lc.SourceUris = gcs.URIs
gcs.FileConfig.populateLoadConfig(conf.job.Configuration.Load) gcs.FileConfig.populateLoadConfig(lc)
return nil
} }
func (gcs *GCSReference) externalDataConfig() bq.ExternalDataConfiguration { func (gcs *GCSReference) toBQ() bq.ExternalDataConfiguration {
conf := bq.ExternalDataConfiguration{ conf := bq.ExternalDataConfiguration{
Compression: string(gcs.Compression), Compression: string(gcs.Compression),
SourceUris: append([]string{}, gcs.uris...), SourceUris: append([]string{}, gcs.URIs...),
} }
gcs.FileConfig.populateExternalDataConfig(&conf) gcs.FileConfig.populateExternalDataConfig(&conf)
return conf return conf

View File

@@ -26,12 +26,15 @@ import (
"testing" "testing"
"time" "time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
gax "github.com/googleapis/gax-go" gax "github.com/googleapis/gax-go"
"cloud.google.com/go/civil" "cloud.google.com/go/civil"
"cloud.google.com/go/internal" "cloud.google.com/go/internal"
"cloud.google.com/go/internal/pretty" "cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil" "cloud.google.com/go/internal/testutil"
"cloud.google.com/go/storage"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
"google.golang.org/api/iterator" "google.golang.org/api/iterator"
@@ -39,9 +42,10 @@ import (
) )
var ( var (
client *Client client *Client
dataset *Dataset storageClient *storage.Client
schema = Schema{ dataset *Dataset
schema = Schema{
{Name: "name", Type: StringFieldType}, {Name: "name", Type: StringFieldType},
{Name: "nums", Type: IntegerFieldType, Repeated: true}, {Name: "nums", Type: IntegerFieldType, Repeated: true},
{Name: "rec", Type: RecordFieldType, Schema: Schema{ {Name: "rec", Type: RecordFieldType, Schema: Schema{
@@ -49,12 +53,17 @@ var (
}}, }},
} }
testTableExpiration time.Time testTableExpiration time.Time
datasetIDs = testutil.NewUIDSpace("dataset") // BigQuery does not accept hyphens in dataset or table IDs, so we create IDs
// with underscores.
datasetIDs = testutil.NewUIDSpaceSep("dataset", '_')
tableIDs = testutil.NewUIDSpaceSep("table", '_')
) )
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
initIntegrationTest() cleanup := initIntegrationTest()
os.Exit(m.Run()) r := m.Run()
cleanup()
os.Exit(r)
} }
func getClient(t *testing.T) *Client { func getClient(t *testing.T) *Client {
@@ -65,16 +74,16 @@ func getClient(t *testing.T) *Client {
} }
// If integration tests will be run, create a unique bucket for them. // If integration tests will be run, create a unique bucket for them.
func initIntegrationTest() { func initIntegrationTest() func() {
flag.Parse() // needed for testing.Short() flag.Parse() // needed for testing.Short()
if testing.Short() { if testing.Short() {
return return func() {}
} }
ctx := context.Background() ctx := context.Background()
ts := testutil.TokenSource(ctx, Scope) ts := testutil.TokenSource(ctx, Scope)
if ts == nil { if ts == nil {
log.Println("Integration tests skipped. See CONTRIBUTING.md for details") log.Println("Integration tests skipped. See CONTRIBUTING.md for details")
return return func() {}
} }
projID := testutil.ProjID() projID := testutil.ProjID()
var err error var err error
@@ -82,13 +91,39 @@ func initIntegrationTest() {
if err != nil { if err != nil {
log.Fatalf("NewClient: %v", err) log.Fatalf("NewClient: %v", err)
} }
dataset = client.Dataset("bigquery_integration_test") storageClient, err = storage.NewClient(ctx,
if err := dataset.Create(ctx, nil); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409 option.WithTokenSource(testutil.TokenSource(ctx, storage.ScopeFullControl)))
log.Fatalf("creating dataset: %v", err) if err != nil {
log.Fatalf("storage.NewClient: %v", err)
}
dataset = client.Dataset(datasetIDs.New())
if err := dataset.Create(ctx, nil); err != nil {
log.Fatalf("creating dataset %s: %v", dataset.DatasetID, err)
} }
testTableExpiration = time.Now().Add(10 * time.Minute).Round(time.Second) testTableExpiration = time.Now().Add(10 * time.Minute).Round(time.Second)
return func() {
if err := deleteDataset(ctx, dataset); err != nil {
log.Printf("could not delete %s", dataset.DatasetID)
}
}
} }
func deleteDataset(ctx context.Context, ds *Dataset) error {
it := ds.Tables(ctx)
for {
tbl, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
return err
}
if err := tbl.Delete(ctx); err != nil {
return err
}
}
return ds.Delete(ctx)
}
func TestIntegration_TableCreate(t *testing.T) { func TestIntegration_TableCreate(t *testing.T) {
// Check that creating a record field with an empty schema is an error. // Check that creating a record field with an empty schema is an error.
if client == nil { if client == nil {
@@ -162,16 +197,28 @@ func TestIntegration_TableMetadata(t *testing.T) {
// Create tables that have time partitioning // Create tables that have time partitioning
partitionCases := []struct { partitionCases := []struct {
timePartitioning TimePartitioning timePartitioning TimePartitioning
expectedExpiration time.Duration wantExpiration time.Duration
wantField string
}{ }{
{TimePartitioning{}, time.Duration(0)}, {TimePartitioning{}, time.Duration(0), ""},
{TimePartitioning{time.Second}, time.Second}, {TimePartitioning{Expiration: time.Second}, time.Second, ""},
{
TimePartitioning{
Expiration: time.Second,
Field: "date",
}, time.Second, "date"},
} }
schema2 := Schema{
{Name: "name", Type: StringFieldType},
{Name: "date", Type: DateFieldType},
}
for i, c := range partitionCases { for i, c := range partitionCases {
table := dataset.Table(fmt.Sprintf("t_metadata_partition_%v", i)) table := dataset.Table(fmt.Sprintf("t_metadata_partition_%v", i))
err = table.Create(context.Background(), &TableMetadata{ err = table.Create(context.Background(), &TableMetadata{
Schema: schema, Schema: schema2,
TimePartitioning: &c.timePartitioning, TimePartitioning: &c.timePartitioning,
ExpirationTime: time.Now().Add(5 * time.Minute), ExpirationTime: time.Now().Add(5 * time.Minute),
}) })
@@ -185,7 +232,10 @@ func TestIntegration_TableMetadata(t *testing.T) {
} }
got := md.TimePartitioning got := md.TimePartitioning
want := &TimePartitioning{c.expectedExpiration} want := &TimePartitioning{
Expiration: c.wantExpiration,
Field: c.wantField,
}
if !testutil.Equal(got, want) { if !testutil.Equal(got, want) {
t.Errorf("metadata.TimePartitioning: got %v, want %v", got, want) t.Errorf("metadata.TimePartitioning: got %v, want %v", got, want)
} }
@@ -197,8 +247,7 @@ func TestIntegration_DatasetCreate(t *testing.T) {
t.Skip("Integration tests skipped") t.Skip("Integration tests skipped")
} }
ctx := context.Background() ctx := context.Background()
uid := strings.Replace(datasetIDs.New(), "-", "_", -1) ds := client.Dataset(datasetIDs.New())
ds := client.Dataset(uid)
wmd := &DatasetMetadata{Name: "name", Location: "EU"} wmd := &DatasetMetadata{Name: "name", Location: "EU"}
err := ds.Create(ctx, wmd) err := ds.Create(ctx, wmd)
if err != nil { if err != nil {
@@ -215,7 +264,7 @@ func TestIntegration_DatasetCreate(t *testing.T) {
t.Errorf("location: got %q, want %q", got, want) t.Errorf("location: got %q, want %q", got, want)
} }
if err := ds.Delete(ctx); err != nil { if err := ds.Delete(ctx); err != nil {
t.Fatalf("deleting dataset %s: %v", ds, err) t.Fatalf("deleting dataset %v: %v", ds, err)
} }
} }
@@ -251,12 +300,12 @@ func TestIntegration_DatasetDelete(t *testing.T) {
t.Skip("Integration tests skipped") t.Skip("Integration tests skipped")
} }
ctx := context.Background() ctx := context.Background()
ds := client.Dataset("delete_test") ds := client.Dataset(datasetIDs.New())
if err := ds.Create(ctx, nil); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409 if err := ds.Create(ctx, nil); err != nil {
t.Fatalf("creating dataset %s: %v", ds, err) t.Fatalf("creating dataset %s: %v", ds.DatasetID, err)
} }
if err := ds.Delete(ctx); err != nil { if err := ds.Delete(ctx); err != nil {
t.Fatalf("deleting dataset %s: %v", ds, err) t.Fatalf("deleting dataset %s: %v", ds.DatasetID, err)
} }
} }
@@ -340,6 +389,38 @@ func TestIntegration_DatasetUpdateDefaultExpiration(t *testing.T) {
} }
} }
func TestIntegration_DatasetUpdateAccess(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
md, err := dataset.Metadata(ctx)
if err != nil {
t.Fatal(err)
}
origAccess := append([]*AccessEntry(nil), md.Access...)
newEntry := &AccessEntry{
Role: ReaderRole,
Entity: "Joe@example.com",
EntityType: UserEmailEntity,
}
newAccess := append(md.Access, newEntry)
dm := DatasetMetadataToUpdate{Access: newAccess}
md, err = dataset.Update(ctx, dm, md.ETag)
if err != nil {
t.Fatal(err)
}
defer func() {
_, err := dataset.Update(ctx, DatasetMetadataToUpdate{Access: origAccess}, md.ETag)
if err != nil {
t.Log("could not restore dataset access list")
}
}()
if diff := testutil.Diff(md.Access, newAccess); diff != "" {
t.Fatalf("got=-, want=+:\n%s", diff)
}
}
func TestIntegration_DatasetUpdateLabels(t *testing.T) { func TestIntegration_DatasetUpdateLabels(t *testing.T) {
if client == nil { if client == nil {
t.Skip("Integration tests skipped") t.Skip("Integration tests skipped")
@@ -349,8 +430,6 @@ func TestIntegration_DatasetUpdateLabels(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// TODO(jba): use a separate dataset for each test run so
// tests don't interfere with each other.
var dm DatasetMetadataToUpdate var dm DatasetMetadataToUpdate
dm.SetLabel("label", "value") dm.SetLabel("label", "value")
md, err = dataset.Update(ctx, dm, "") md, err = dataset.Update(ctx, dm, "")
@@ -371,6 +450,34 @@ func TestIntegration_DatasetUpdateLabels(t *testing.T) {
} }
} }
func TestIntegration_TableUpdateLabels(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
table := newTable(t, schema)
defer table.Delete(ctx)
var tm TableMetadataToUpdate
tm.SetLabel("label", "value")
md, err := table.Update(ctx, tm, "")
if err != nil {
t.Fatal(err)
}
if got, want := md.Labels["label"], "value"; got != want {
t.Errorf("got %q, want %q", got, want)
}
tm = TableMetadataToUpdate{}
tm.DeleteLabel("label")
md, err = table.Update(ctx, tm, "")
if err != nil {
t.Fatal(err)
}
if _, ok := md.Labels["label"]; ok {
t.Error("label still present after deletion")
}
}
func TestIntegration_Tables(t *testing.T) { func TestIntegration_Tables(t *testing.T) {
if client == nil { if client == nil {
t.Skip("Integration tests skipped") t.Skip("Integration tests skipped")
@@ -450,7 +557,6 @@ func TestIntegration_UploadAndRead(t *testing.T) {
// Query the table. // Query the table.
q := client.Query(fmt.Sprintf("select name, nums, rec from %s", table.TableID)) q := client.Query(fmt.Sprintf("select name, nums, rec from %s", table.TableID))
q.UseStandardSQL = true
q.DefaultProjectID = dataset.ProjectID q.DefaultProjectID = dataset.ProjectID
q.DefaultDatasetID = dataset.DatasetID q.DefaultDatasetID = dataset.DatasetID
@@ -465,11 +571,16 @@ func TestIntegration_UploadAndRead(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if job1.LastStatus() == nil {
t.Error("no LastStatus")
}
job2, err := client.JobFromID(ctx, job1.ID()) job2, err := client.JobFromID(ctx, job1.ID())
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if job2.LastStatus() == nil {
t.Error("no LastStatus")
}
rit, err = job2.Read(ctx) rit, err = job2.Read(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@@ -574,10 +685,9 @@ func TestIntegration_UploadAndReadStructs(t *testing.T) {
defer table.Delete(ctx) defer table.Delete(ctx)
d := civil.Date{2016, 3, 20} d := civil.Date{2016, 3, 20}
tm := civil.Time{15, 4, 5, 0} tm := civil.Time{15, 4, 5, 6000}
ts := time.Date(2016, 3, 20, 15, 4, 5, 0, time.UTC) ts := time.Date(2016, 3, 20, 15, 4, 5, 6000, time.UTC)
dtm := civil.DateTime{d, tm} dtm := civil.DateTime{d, tm}
d2 := civil.Date{1994, 5, 15} d2 := civil.Date{1994, 5, 15}
tm2 := civil.Time{1, 2, 4, 0} tm2 := civil.Time{1, 2, 4, 0}
ts2 := time.Date(1994, 5, 15, 1, 2, 4, 0, time.UTC) ts2 := time.Date(1994, 5, 15, 1, 2, 4, 0, time.UTC)
@@ -660,12 +770,15 @@ func TestIntegration_UploadAndReadStructs(t *testing.T) {
} }
sort.Sort(byName(got)) sort.Sort(byName(got))
// Round times to the microsecond.
roundToMicros := cmp.Transformer("RoundToMicros",
func(t time.Time) time.Time { return t.Round(time.Microsecond) })
// BigQuery does not elide nils. It reports an error for nil fields. // BigQuery does not elide nils. It reports an error for nil fields.
for i, g := range got { for i, g := range got {
if i >= len(want) { if i >= len(want) {
t.Errorf("%d: got %v, past end of want", i, pretty.Value(g)) t.Errorf("%d: got %v, past end of want", i, pretty.Value(g))
} else if w := want[i]; !testutil.Equal(g, w) { } else if diff := testutil.Diff(g, want[i], roundToMicros); diff != "" {
t.Errorf("%d: got %v, want %v", i, pretty.Value(g), pretty.Value(w)) t.Errorf("%d: got=-, want=+:\n%s", i, diff)
} }
} }
} }
@@ -814,14 +927,35 @@ func TestIntegration_Load(t *testing.T) {
rs := NewReaderSource(r) rs := NewReaderSource(r)
loader := table.LoaderFrom(rs) loader := table.LoaderFrom(rs)
loader.WriteDisposition = WriteTruncate loader.WriteDisposition = WriteTruncate
loader.Labels = map[string]string{"test": "go"}
job, err := loader.Run(ctx) job, err := loader.Run(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if job.LastStatus() == nil {
t.Error("no LastStatus")
}
conf, err := job.Config()
if err != nil {
t.Fatal(err)
}
config, ok := conf.(*LoadConfig)
if !ok {
t.Fatalf("got %T, want LoadConfig", conf)
}
diff := testutil.Diff(config, &loader.LoadConfig,
cmp.AllowUnexported(Table{}),
cmpopts.IgnoreUnexported(Client{}, ReaderSource{}),
// returned schema is at top level, not in the config
cmpopts.IgnoreFields(FileConfig{}, "Schema"))
if diff != "" {
t.Errorf("got=-, want=+:\n%s", diff)
}
if err := wait(ctx, job); err != nil { if err := wait(ctx, job); err != nil {
t.Fatal(err) t.Fatal(err)
} }
checkRead(t, "reader load", table.Read(ctx), wantRows) checkRead(t, "reader load", table.Read(ctx), wantRows)
} }
func TestIntegration_DML(t *testing.T) { func TestIntegration_DML(t *testing.T) {
@@ -829,22 +963,30 @@ func TestIntegration_DML(t *testing.T) {
t.Skip("Integration tests skipped") t.Skip("Integration tests skipped")
} }
ctx := context.Background() ctx := context.Background()
// Retry insert; sometimes it fails with INTERNAL. table := newTable(t, schema)
err := internal.Retry(ctx, gax.Backoff{}, func() (bool, error) { defer table.Delete(ctx)
table := newTable(t, schema)
defer table.Delete(ctx)
sql := fmt.Sprintf(`INSERT %s.%s (name, nums, rec)
VALUES ('a', [0], STRUCT<BOOL>(TRUE)),
('b', [1], STRUCT<BOOL>(FALSE)),
('c', [2], STRUCT<BOOL>(TRUE))`,
table.DatasetID, table.TableID)
if err := dmlInsert(ctx, sql); err != nil {
t.Fatal(err)
}
wantRows := [][]Value{
[]Value{"a", []Value{int64(0)}, []Value{true}},
[]Value{"b", []Value{int64(1)}, []Value{false}},
[]Value{"c", []Value{int64(2)}, []Value{true}},
}
checkRead(t, "DML", table.Read(ctx), wantRows)
}
func dmlInsert(ctx context.Context, sql string) error {
// Retry insert; sometimes it fails with INTERNAL.
return internal.Retry(ctx, gax.Backoff{}, func() (bool, error) {
// Use DML to insert. // Use DML to insert.
wantRows := [][]Value{ q := client.Query(sql)
[]Value{"a", []Value{int64(0)}, []Value{true}},
[]Value{"b", []Value{int64(1)}, []Value{false}},
[]Value{"c", []Value{int64(2)}, []Value{true}},
}
query := fmt.Sprintf("INSERT bigquery_integration_test.%s (name, nums, rec) "+
"VALUES ('a', [0], STRUCT<BOOL>(TRUE)), ('b', [1], STRUCT<BOOL>(FALSE)), ('c', [2], STRUCT<BOOL>(TRUE))",
table.TableID)
q := client.Query(query)
q.UseStandardSQL = true // necessary for DML
job, err := q.Run(ctx) job, err := q.Run(ctx)
if err != nil { if err != nil {
if e, ok := err.(*googleapi.Error); ok && e.Code < 500 { if e, ok := err.(*googleapi.Error); ok && e.Code < 500 {
@@ -853,18 +995,13 @@ func TestIntegration_DML(t *testing.T) {
return false, err return false, err
} }
if err := wait(ctx, job); err != nil { if err := wait(ctx, job); err != nil {
fmt.Printf("wait: %v\n", err) if e, ok := err.(*googleapi.Error); ok && e.Code < 500 {
return true, err // fail on 4xx
}
return false, err return false, err
} }
if msg, ok := compareRead(table.Read(ctx), wantRows); !ok {
// Stop on read error, because that has never been flaky.
return true, errors.New(msg)
}
return true, nil return true, nil
}) })
if err != nil {
t.Fatal(err)
}
} }
func TestIntegration_TimeTypes(t *testing.T) { func TestIntegration_TimeTypes(t *testing.T) {
@@ -882,10 +1019,11 @@ func TestIntegration_TimeTypes(t *testing.T) {
defer table.Delete(ctx) defer table.Delete(ctx)
d := civil.Date{2016, 3, 20} d := civil.Date{2016, 3, 20}
tm := civil.Time{12, 30, 0, 0} tm := civil.Time{12, 30, 0, 6000}
dtm := civil.DateTime{d, tm}
ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC) ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
wantRows := [][]Value{ wantRows := [][]Value{
[]Value{d, tm, civil.DateTime{d, tm}, ts}, []Value{d, tm, dtm, ts},
} }
upl := table.Uploader() upl := table.Uploader()
if err := upl.Put(ctx, []*ValuesSaver{ if err := upl.Put(ctx, []*ValuesSaver{
@@ -899,16 +1037,11 @@ func TestIntegration_TimeTypes(t *testing.T) {
// SQL wants DATETIMEs with a space between date and time, but the service // SQL wants DATETIMEs with a space between date and time, but the service
// returns them in RFC3339 form, with a "T" between. // returns them in RFC3339 form, with a "T" between.
query := fmt.Sprintf("INSERT bigquery_integration_test.%s (d, t, dt, ts) "+ query := fmt.Sprintf("INSERT %s.%s (d, t, dt, ts) "+
"VALUES ('%s', '%s', '%s %s', '%s')", "VALUES ('%s', '%s', '%s', '%s')",
table.TableID, d, tm, d, tm, ts.Format("2006-01-02 15:04:05")) table.DatasetID, table.TableID,
q := client.Query(query) d, CivilTimeString(tm), CivilDateTimeString(dtm), ts.Format("2006-01-02 15:04:05"))
q.UseStandardSQL = true // necessary for DML if err := dmlInsert(ctx, query); err != nil {
job, err := q.Run(ctx)
if err != nil {
t.Fatal(err)
}
if err := wait(ctx, job); err != nil {
t.Fatal(err) t.Fatal(err)
} }
wantRows = append(wantRows, wantRows[0]) wantRows = append(wantRows, wantRows[0])
@@ -959,7 +1092,6 @@ func TestIntegration_StandardQuery(t *testing.T) {
} }
for _, c := range testCases { for _, c := range testCases {
q := client.Query(c.query) q := client.Query(c.query)
q.UseStandardSQL = true
it, err := q.Read(ctx) it, err := q.Read(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@@ -1045,7 +1177,14 @@ func TestIntegration_QueryParameters(t *testing.T) {
for _, c := range testCases { for _, c := range testCases {
q := client.Query(c.query) q := client.Query(c.query)
q.Parameters = c.parameters q.Parameters = c.parameters
it, err := q.Read(ctx) job, err := q.Run(ctx)
if err != nil {
t.Fatal(err)
}
if job.LastStatus() == nil {
t.Error("no LastStatus")
}
it, err := job.Read(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -1053,6 +1192,131 @@ func TestIntegration_QueryParameters(t *testing.T) {
} }
} }
func TestIntegration_QueryDryRun(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
q := client.Query("SELECT word from " + stdName + " LIMIT 10")
q.DryRun = true
job, err := q.Run(ctx)
if err != nil {
t.Fatal(err)
}
s := job.LastStatus()
if s.State != Done {
t.Errorf("state is %v, expected Done", s.State)
}
if s.Statistics == nil {
t.Fatal("no statistics")
}
if s.Statistics.Details.(*QueryStatistics).Schema == nil {
t.Fatal("no schema")
}
}
func TestIntegration_ExtractExternal(t *testing.T) {
// Create a table, extract it to GCS, then query it externally.
if client == nil {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
schema := Schema{
{Name: "name", Type: StringFieldType},
{Name: "num", Type: IntegerFieldType},
}
table := newTable(t, schema)
defer table.Delete(ctx)
// Insert table data.
sql := fmt.Sprintf(`INSERT %s.%s (name, num)
VALUES ('a', 1), ('b', 2), ('c', 3)`,
table.DatasetID, table.TableID)
if err := dmlInsert(ctx, sql); err != nil {
t.Fatal(err)
}
// Extract to a GCS object as CSV.
bucketName := testutil.ProjID()
objectName := fmt.Sprintf("bq-test-%s.csv", table.TableID)
uri := fmt.Sprintf("gs://%s/%s", bucketName, objectName)
defer storageClient.Bucket(bucketName).Object(objectName).Delete(ctx)
gr := NewGCSReference(uri)
gr.DestinationFormat = CSV
e := table.ExtractorTo(gr)
job, err := e.Run(ctx)
if err != nil {
t.Fatal(err)
}
conf, err := job.Config()
if err != nil {
t.Fatal(err)
}
config, ok := conf.(*ExtractConfig)
if !ok {
t.Fatalf("got %T, want ExtractConfig", conf)
}
diff := testutil.Diff(config, &e.ExtractConfig,
cmp.AllowUnexported(Table{}),
cmpopts.IgnoreUnexported(Client{}))
if diff != "" {
t.Errorf("got=-, want=+:\n%s", diff)
}
if err := wait(ctx, job); err != nil {
t.Fatal(err)
}
edc := &ExternalDataConfig{
SourceFormat: CSV,
SourceURIs: []string{uri},
Schema: schema,
Options: &CSVOptions{SkipLeadingRows: 1},
}
// Query that CSV file directly.
q := client.Query("SELECT * FROM csv")
q.TableDefinitions = map[string]ExternalData{"csv": edc}
wantRows := [][]Value{
[]Value{"a", int64(1)},
[]Value{"b", int64(2)},
[]Value{"c", int64(3)},
}
iter, err := q.Read(ctx)
if err != nil {
t.Fatal(err)
}
checkRead(t, "external query", iter, wantRows)
// Make a table pointing to the file, and query it.
// BigQuery does not allow a Table.Read on an external table.
table = dataset.Table(tableIDs.New())
err = table.Create(context.Background(), &TableMetadata{
Schema: schema,
ExpirationTime: testTableExpiration,
ExternalDataConfig: edc,
})
if err != nil {
t.Fatal(err)
}
q = client.Query(fmt.Sprintf("SELECT * FROM %s.%s", table.DatasetID, table.TableID))
iter, err = q.Read(ctx)
if err != nil {
t.Fatal(err)
}
checkRead(t, "external table", iter, wantRows)
// While we're here, check that the table metadata is correct.
md, err := table.Metadata(ctx)
if err != nil {
t.Fatal(err)
}
// One difference: since BigQuery returns the schema as part of the ordinary
// table metadata, it does not populate ExternalDataConfig.Schema.
md.ExternalDataConfig.Schema = md.Schema
if diff := testutil.Diff(md.ExternalDataConfig, edc); diff != "" {
t.Errorf("got=-, want=+\n%s", diff)
}
}
func TestIntegration_ReadNullIntoStruct(t *testing.T) { func TestIntegration_ReadNullIntoStruct(t *testing.T) {
// Reading a null into a struct field should return an error (not panic). // Reading a null into a struct field should return an error (not panic).
if client == nil { if client == nil {
@@ -1167,31 +1431,30 @@ func TestIntegration_ListJobs(t *testing.T) {
// About all we can do is list a few jobs. // About all we can do is list a few jobs.
const max = 20 const max = 20
var jis []JobInfo var jobs []*Job
it := client.Jobs(ctx) it := client.Jobs(ctx)
for { for {
ji, err := it.Next() job, err := it.Next()
if err == iterator.Done { if err == iterator.Done {
break break
} }
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
jis = append(jis, ji) jobs = append(jobs, job)
if len(jis) >= max { if len(jobs) >= max {
break break
} }
} }
// We expect that there is at least one job in the last few months. // We expect that there is at least one job in the last few months.
if len(jis) == 0 { if len(jobs) == 0 {
t.Fatal("did not get any jobs") t.Fatal("did not get any jobs")
} }
} }
// Creates a new, temporary table with a unique name and the given schema. // Creates a new, temporary table with a unique name and the given schema.
func newTable(t *testing.T, s Schema) *Table { func newTable(t *testing.T, s Schema) *Table {
name := fmt.Sprintf("t%d", time.Now().UnixNano()) table := dataset.Table(tableIDs.New())
table := dataset.Table(name)
err := table.Create(context.Background(), &TableMetadata{ err := table.Create(context.Background(), &TableMetadata{
Schema: s, Schema: s,
ExpirationTime: testTableExpiration, ExpirationTime: testTableExpiration,
@@ -1268,7 +1531,7 @@ func hasStatusCode(err error, code int) bool {
func wait(ctx context.Context, job *Job) error { func wait(ctx context.Context, job *Job) error {
status, err := job.Wait(ctx) status, err := job.Wait(ctx)
if err != nil { if err != nil {
return fmt.Errorf("getting job status: %v", err) return err
} }
if status.Err() != nil { if status.Err() != nil {
return fmt.Errorf("job status error: %#v", status.Err()) return fmt.Errorf("job status error: %#v", status.Err())

View File

@@ -19,20 +19,15 @@ import (
"reflect" "reflect"
"golang.org/x/net/context" "golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/iterator" "google.golang.org/api/iterator"
) )
// A pageFetcher returns a page of rows, starting from the row specified by token. func newRowIterator(ctx context.Context, t *Table, pf pageFetcher) *RowIterator {
type pageFetcher interface {
fetch(ctx context.Context, s service, token string) (*readDataResult, error)
setPaging(*pagingConf)
}
func newRowIterator(ctx context.Context, s service, pf pageFetcher) *RowIterator {
it := &RowIterator{ it := &RowIterator{
ctx: ctx, ctx: ctx,
service: s, table: t,
pf: pf, pf: pf,
} }
it.pageInfo, it.nextFunc = iterator.NewPageInfo( it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch, it.fetch,
@@ -44,7 +39,7 @@ func newRowIterator(ctx context.Context, s service, pf pageFetcher) *RowIterator
// A RowIterator provides access to the result of a BigQuery lookup. // A RowIterator provides access to the result of a BigQuery lookup.
type RowIterator struct { type RowIterator struct {
ctx context.Context ctx context.Context
service service table *Table
pf pageFetcher pf pageFetcher
pageInfo *iterator.PageInfo pageInfo *iterator.PageInfo
nextFunc func() error nextFunc func() error
@@ -135,16 +130,7 @@ func isStructPtr(x interface{}) bool {
func (it *RowIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } func (it *RowIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) { func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) {
pc := &pagingConf{} res, err := it.pf(it.ctx, it.table, it.schema, it.StartIndex, int64(pageSize), pageToken)
if pageSize > 0 {
pc.recordsPerRequest = int64(pageSize)
pc.setRecordsPerRequest = true
}
if pageToken == "" {
pc.startIndex = it.StartIndex
}
it.pf.setPaging(pc)
res, err := it.pf.fetch(it.ctx, it.service, pageToken)
if err != nil { if err != nil {
return "", err return "", err
} }
@@ -152,3 +138,69 @@ func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) {
it.schema = res.schema it.schema = res.schema
return res.pageToken, nil return res.pageToken, nil
} }
// A pageFetcher returns a page of rows from a destination table.
type pageFetcher func(ctx context.Context, _ *Table, _ Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error)
type fetchPageResult struct {
pageToken string
rows [][]Value
totalRows uint64
schema Schema
}
// fetchPage gets a page of rows from t.
func fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) {
// Fetch the table schema in the background, if necessary.
errc := make(chan error, 1)
if schema != nil {
errc <- nil
} else {
go func() {
var bqt *bq.Table
err := runWithRetry(ctx, func() (err error) {
bqt, err = t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).
Fields("schema").
Context(ctx).
Do()
return err
})
if err == nil && bqt.Schema != nil {
schema = bqToSchema(bqt.Schema)
}
errc <- err
}()
}
call := t.c.bqs.Tabledata.List(t.ProjectID, t.DatasetID, t.TableID)
setClientHeader(call.Header())
if pageToken != "" {
call.PageToken(pageToken)
} else {
call.StartIndex(startIndex)
}
if pageSize > 0 {
call.MaxResults(pageSize)
}
var res *bq.TableDataList
err := runWithRetry(ctx, func() (err error) {
res, err = call.Context(ctx).Do()
return err
})
if err != nil {
return nil, err
}
err = <-errc
if err != nil {
return nil, err
}
rows, err := convertRows(res.Rows, schema)
if err != nil {
return nil, err
}
return &fetchPageResult{
pageToken: res.PageToken,
rows: rows,
totalRows: uint64(res.TotalRows),
schema: schema,
}, nil
}

View File

@@ -26,27 +26,24 @@ import (
) )
type fetchResponse struct { type fetchResponse struct {
result *readDataResult // The result to return. result *fetchPageResult // The result to return.
err error // The error to return. err error // The error to return.
} }
// pageFetcherStub services fetch requests by returning data from an in-memory list of values. // pageFetcherStub services fetch requests by returning data from an in-memory list of values.
type pageFetcherStub struct { type pageFetcherStub struct {
fetchResponses map[string]fetchResponse fetchResponses map[string]fetchResponse
err error
err error
} }
func (pf *pageFetcherStub) fetch(ctx context.Context, s service, token string) (*readDataResult, error) { func (pf *pageFetcherStub) fetchPage(ctx context.Context, _ *Table, _ Schema, _ uint64, _ int64, pageToken string) (*fetchPageResult, error) {
call, ok := pf.fetchResponses[token] call, ok := pf.fetchResponses[pageToken]
if !ok { if !ok {
pf.err = fmt.Errorf("Unexpected page token: %q", token) pf.err = fmt.Errorf("Unexpected page token: %q", pageToken)
} }
return call.result, call.err return call.result, call.err
} }
func (pf *pageFetcherStub) setPaging(pc *pagingConf) {}
func TestIterator(t *testing.T) { func TestIterator(t *testing.T) {
var ( var (
iiSchema = Schema{ iiSchema = Schema{
@@ -72,7 +69,7 @@ func TestIterator(t *testing.T) {
desc: "Iteration over single empty page", desc: "Iteration over single empty page",
fetchResponses: map[string]fetchResponse{ fetchResponses: map[string]fetchResponse{
"": { "": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "", pageToken: "",
rows: [][]Value{}, rows: [][]Value{},
schema: Schema{}, schema: Schema{},
@@ -86,7 +83,7 @@ func TestIterator(t *testing.T) {
desc: "Iteration over single page", desc: "Iteration over single page",
fetchResponses: map[string]fetchResponse{ fetchResponses: map[string]fetchResponse{
"": { "": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "", pageToken: "",
rows: [][]Value{{1, 2}, {11, 12}}, rows: [][]Value{{1, 2}, {11, 12}},
schema: iiSchema, schema: iiSchema,
@@ -100,7 +97,7 @@ func TestIterator(t *testing.T) {
desc: "Iteration over single page with different schema", desc: "Iteration over single page with different schema",
fetchResponses: map[string]fetchResponse{ fetchResponses: map[string]fetchResponse{
"": { "": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "", pageToken: "",
rows: [][]Value{{"1", 2}, {"11", 12}}, rows: [][]Value{{"1", 2}, {"11", 12}},
schema: siSchema, schema: siSchema,
@@ -114,14 +111,14 @@ func TestIterator(t *testing.T) {
desc: "Iteration over two pages", desc: "Iteration over two pages",
fetchResponses: map[string]fetchResponse{ fetchResponses: map[string]fetchResponse{
"": { "": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "a", pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}}, rows: [][]Value{{1, 2}, {11, 12}},
schema: iiSchema, schema: iiSchema,
}, },
}, },
"a": { "a": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "", pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}}, rows: [][]Value{{101, 102}, {111, 112}},
schema: iiSchema, schema: iiSchema,
@@ -135,21 +132,21 @@ func TestIterator(t *testing.T) {
desc: "Server response includes empty page", desc: "Server response includes empty page",
fetchResponses: map[string]fetchResponse{ fetchResponses: map[string]fetchResponse{
"": { "": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "a", pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}}, rows: [][]Value{{1, 2}, {11, 12}},
schema: iiSchema, schema: iiSchema,
}, },
}, },
"a": { "a": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "b", pageToken: "b",
rows: [][]Value{}, rows: [][]Value{},
schema: iiSchema, schema: iiSchema,
}, },
}, },
"b": { "b": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "", pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}}, rows: [][]Value{{101, 102}, {111, 112}},
schema: iiSchema, schema: iiSchema,
@@ -163,7 +160,7 @@ func TestIterator(t *testing.T) {
desc: "Fetch error", desc: "Fetch error",
fetchResponses: map[string]fetchResponse{ fetchResponses: map[string]fetchResponse{
"": { "": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "a", pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}}, rows: [][]Value{{1, 2}, {11, 12}},
schema: iiSchema, schema: iiSchema,
@@ -173,7 +170,7 @@ func TestIterator(t *testing.T) {
// We returns some data from this fetch, but also an error. // We returns some data from this fetch, but also an error.
// So the end result should include only data from the previous fetch. // So the end result should include only data from the previous fetch.
err: fetchFailure, err: fetchFailure,
result: &readDataResult{ result: &fetchPageResult{
pageToken: "b", pageToken: "b",
rows: [][]Value{{101, 102}, {111, 112}}, rows: [][]Value{{101, 102}, {111, 112}},
schema: iiSchema, schema: iiSchema,
@@ -190,14 +187,14 @@ func TestIterator(t *testing.T) {
pageToken: "a", pageToken: "a",
fetchResponses: map[string]fetchResponse{ fetchResponses: map[string]fetchResponse{
"": { "": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "a", pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}}, rows: [][]Value{{1, 2}, {11, 12}},
schema: iiSchema, schema: iiSchema,
}, },
}, },
"a": { "a": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "", pageToken: "",
rows: [][]Value{{101, 102}, {111, 112}}, rows: [][]Value{{101, 102}, {111, 112}},
schema: iiSchema, schema: iiSchema,
@@ -213,21 +210,21 @@ func TestIterator(t *testing.T) {
pageToken: "b", pageToken: "b",
fetchResponses: map[string]fetchResponse{ fetchResponses: map[string]fetchResponse{
"": { "": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "a", pageToken: "a",
rows: [][]Value{{1, 2}, {11, 12}}, rows: [][]Value{{1, 2}, {11, 12}},
schema: iiSchema, schema: iiSchema,
}, },
}, },
"a": { "a": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "b", pageToken: "b",
rows: [][]Value{{101, 102}, {111, 112}}, rows: [][]Value{{101, 102}, {111, 112}},
schema: iiSchema, schema: iiSchema,
}, },
}, },
"b": { "b": {
result: &readDataResult{}, result: &fetchPageResult{},
}, },
}, },
// In this test case, Next will return false on its first call, // In this test case, Next will return false on its first call,
@@ -241,7 +238,7 @@ func TestIterator(t *testing.T) {
pf := &pageFetcherStub{ pf := &pageFetcherStub{
fetchResponses: tc.fetchResponses, fetchResponses: tc.fetchResponses,
} }
it := newRowIterator(context.Background(), nil, pf) it := newRowIterator(context.Background(), nil, pf.fetchPage)
it.PageInfo().Token = tc.pageToken it.PageInfo().Token = tc.pageToken
values, schema, err := consumeRowIterator(it) values, schema, err := consumeRowIterator(it)
if err != tc.wantErr { if err != tc.wantErr {
@@ -291,7 +288,7 @@ func TestNextDuringErrorState(t *testing.T) {
"": {err: errors.New("bang")}, "": {err: errors.New("bang")},
}, },
} }
it := newRowIterator(context.Background(), nil, pf) it := newRowIterator(context.Background(), nil, pf.fetchPage)
var vals []Value var vals []Value
if err := it.Next(&vals); err == nil { if err := it.Next(&vals); err == nil {
t.Errorf("Expected error after calling Next") t.Errorf("Expected error after calling Next")
@@ -309,7 +306,7 @@ func TestNextAfterFinished(t *testing.T) {
{ {
fetchResponses: map[string]fetchResponse{ fetchResponses: map[string]fetchResponse{
"": { "": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "", pageToken: "",
rows: [][]Value{{1, 2}, {11, 12}}, rows: [][]Value{{1, 2}, {11, 12}},
}, },
@@ -320,7 +317,7 @@ func TestNextAfterFinished(t *testing.T) {
{ {
fetchResponses: map[string]fetchResponse{ fetchResponses: map[string]fetchResponse{
"": { "": {
result: &readDataResult{ result: &fetchPageResult{
pageToken: "", pageToken: "",
rows: [][]Value{}, rows: [][]Value{},
}, },
@@ -334,7 +331,7 @@ func TestNextAfterFinished(t *testing.T) {
pf := &pageFetcherStub{ pf := &pageFetcherStub{
fetchResponses: tc.fetchResponses, fetchResponses: tc.fetchResponses,
} }
it := newRowIterator(context.Background(), nil, pf) it := newRowIterator(context.Background(), nil, pf.fetchPage)
values, _, err := consumeRowIterator(it) values, _, err := consumeRowIterator(it)
if err != nil { if err != nil {
@@ -358,7 +355,7 @@ func TestIteratorNextTypes(t *testing.T) {
struct{}{}, struct{}{},
} { } {
if err := it.Next(v); err == nil { if err := it.Next(v); err == nil {
t.Error("%v: want error, got nil", v) t.Errorf("%v: want error, got nil", v)
} }
} }
} }

View File

@@ -26,6 +26,7 @@ import (
gax "github.com/googleapis/gax-go" gax "github.com/googleapis/gax-go"
"golang.org/x/net/context" "golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator" "google.golang.org/api/iterator"
) )
@@ -35,20 +36,19 @@ type Job struct {
projectID string projectID string
jobID string jobID string
isQuery bool config *bq.JobConfiguration
destinationTable *bq.TableReference // table to read query results from lastStatus *JobStatus
} }
// JobFromID creates a Job which refers to an existing BigQuery job. The job // JobFromID creates a Job which refers to an existing BigQuery job. The job
// need not have been created by this package. For example, the job may have // need not have been created by this package. For example, the job may have
// been created in the BigQuery console. // been created in the BigQuery console.
func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) { func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {
job, err := c.service.getJob(ctx, c.projectID, id) bqjob, err := c.getJobInternal(ctx, id, "configuration", "jobReference", "status", "statistics")
if err != nil { if err != nil {
return nil, err return nil, err
} }
job.c = c return bqToJob(bqjob, c)
return job, nil
} }
// ID returns the job's ID. // ID returns the job's ID.
@@ -80,20 +80,62 @@ type JobStatus struct {
Statistics *JobStatistics Statistics *JobStatistics
} }
// JobConfig contains configuration information for a job. It is implemented by
// *CopyConfig, *ExtractConfig, *LoadConfig and *QueryConfig.
type JobConfig interface {
isJobConfig()
}
func (*CopyConfig) isJobConfig() {}
func (*ExtractConfig) isJobConfig() {}
func (*LoadConfig) isJobConfig() {}
func (*QueryConfig) isJobConfig() {}
// Config returns the configuration information for j.
func (j *Job) Config() (JobConfig, error) {
return bqToJobConfig(j.config, j.c)
}
func bqToJobConfig(q *bq.JobConfiguration, c *Client) (JobConfig, error) {
switch {
case q == nil:
return nil, nil
case q.Copy != nil:
return bqToCopyConfig(q, c), nil
case q.Extract != nil:
return bqToExtractConfig(q, c), nil
case q.Load != nil:
return bqToLoadConfig(q, c), nil
case q.Query != nil:
return bqToQueryConfig(q, c)
default:
return nil, nil
}
}
// JobIDConfig describes how to create an ID for a job.
type JobIDConfig struct {
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
JobID string
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
AddJobIDSuffix bool
}
// createJobRef creates a JobReference. // createJobRef creates a JobReference.
// projectID must be non-empty. // projectID must be non-empty.
func createJobRef(jobID string, addJobIDSuffix bool, projectID string) *bq.JobReference { func (j *JobIDConfig) createJobRef(projectID string) *bq.JobReference {
if jobID == "" {
jobID = randomJobIDFn()
} else if addJobIDSuffix {
jobID += "-" + randomJobIDFn()
}
// We don't check whether projectID is empty; the server will return an // We don't check whether projectID is empty; the server will return an
// error when it encounters the resulting JobReference. // error when it encounters the resulting JobReference.
return &bq.JobReference{ jr := &bq.JobReference{ProjectId: projectID}
JobId: jobID, if j.JobID == "" {
ProjectId: projectID, jr.JobId = randomIDFn()
} else if j.AddJobIDSuffix {
jr.JobId = j.JobID + "-" + randomIDFn()
} else {
jr.JobId = j.JobID
} }
return jr
} }
const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
@@ -104,11 +146,15 @@ var (
) )
// For testing. // For testing.
var randomJobIDFn = randomJobID var randomIDFn = randomID
func randomJobID() string { // As of August 2017, the BigQuery service uses 27 alphanumeric characters for
// As of August 2017, the BigQuery service uses 27 alphanumeric characters for suffixes. // suffixes.
var b [27]byte const randomIDLen = 27
func randomID() string {
// This is used for both job IDs and insert IDs.
var b [randomIDLen]byte
rngMu.Lock() rngMu.Lock()
for i := 0; i < len(b); i++ { for i := 0; i < len(b); i++ {
b[i] = alphanum[rng.Intn(len(alphanum))] b[i] = alphanum[rng.Intn(len(alphanum))]
@@ -128,33 +174,43 @@ func (s *JobStatus) Err() error {
return s.err return s.err
} }
// Fill in the client field of Tables in the statistics. // Status retrieves the current status of the job from BigQuery. It fails if the Status could not be determined.
func (s *JobStatus) setClient(c *Client) {
if s.Statistics == nil {
return
}
if qs, ok := s.Statistics.Details.(*QueryStatistics); ok {
for _, t := range qs.ReferencedTables {
t.c = c
}
}
}
// Status returns the current status of the job. It fails if the Status could not be determined.
func (j *Job) Status(ctx context.Context) (*JobStatus, error) { func (j *Job) Status(ctx context.Context) (*JobStatus, error) {
js, err := j.c.service.jobStatus(ctx, j.projectID, j.jobID) bqjob, err := j.c.getJobInternal(ctx, j.jobID, "status", "statistics")
if err != nil { if err != nil {
return nil, err return nil, err
} }
js.setClient(j.c) if err := j.setStatus(bqjob.Status); err != nil {
return js, nil return nil, err
}
j.setStatistics(bqjob.Statistics, j.c)
return j.lastStatus, nil
}
// LastStatus returns the most recently retrieved status of the job. The status is
// retrieved when a new job is created, or when JobFromID or Job.Status is called.
// Call Job.Status to get the most up-to-date information about a job.
func (j *Job) LastStatus() *JobStatus {
return j.lastStatus
} }
// Cancel requests that a job be cancelled. This method returns without waiting for // Cancel requests that a job be cancelled. This method returns without waiting for
// cancellation to take effect. To check whether the job has terminated, use Job.Status. // cancellation to take effect. To check whether the job has terminated, use Job.Status.
// Cancelled jobs may still incur costs. // Cancelled jobs may still incur costs.
func (j *Job) Cancel(ctx context.Context) error { func (j *Job) Cancel(ctx context.Context) error {
return j.c.service.jobCancel(ctx, j.projectID, j.jobID) // Jobs.Cancel returns a job entity, but the only relevant piece of
// data it may contain (the status of the job) is unreliable. From the
// docs: "This call will return immediately, and the client will need
// to poll for the job status to see if the cancel completed
// successfully". So it would be misleading to return a status.
call := j.c.bqs.Jobs.Cancel(j.projectID, j.jobID).
Fields(). // We don't need any of the response data.
Context(ctx)
setClientHeader(call.Header())
return runWithRetry(ctx, func() error {
_, err := call.Do()
return err
})
} }
// Wait blocks until the job or the context is done. It returns the final status // Wait blocks until the job or the context is done. It returns the final status
@@ -163,9 +219,9 @@ func (j *Job) Cancel(ctx context.Context) error {
// Wait returns nil if the status was retrieved successfully, even if // Wait returns nil if the status was retrieved successfully, even if
// status.Err() != nil. So callers must check both errors. See the example. // status.Err() != nil. So callers must check both errors. See the example.
func (j *Job) Wait(ctx context.Context) (*JobStatus, error) { func (j *Job) Wait(ctx context.Context) (*JobStatus, error) {
if j.isQuery { if j.isQuery() {
// We can avoid polling for query jobs. // We can avoid polling for query jobs.
if _, err := j.c.service.waitForQuery(ctx, j.projectID, j.jobID); err != nil { if _, err := j.waitForQuery(ctx, j.projectID); err != nil {
return nil, err return nil, err
} }
// Note: extra RPC even if you just want to wait for the query to finish. // Note: extra RPC even if you just want to wait for the query to finish.
@@ -196,30 +252,54 @@ func (j *Job) Wait(ctx context.Context) (*JobStatus, error) {
// Read fetches the results of a query job. // Read fetches the results of a query job.
// If j is not a query job, Read returns an error. // If j is not a query job, Read returns an error.
func (j *Job) Read(ctx context.Context) (*RowIterator, error) { func (j *Job) Read(ctx context.Context) (*RowIterator, error) {
if !j.isQuery { return j.read(ctx, j.waitForQuery, fetchPage)
}
func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, string) (Schema, error), pf pageFetcher) (*RowIterator, error) {
if !j.isQuery() {
return nil, errors.New("bigquery: cannot read from a non-query job") return nil, errors.New("bigquery: cannot read from a non-query job")
} }
var projectID string destTable := j.config.Query.DestinationTable
if j.destinationTable != nil { // The destination table should only be nil if there was a query error.
projectID = j.destinationTable.ProjectId if destTable == nil {
} else { return nil, errors.New("bigquery: query job missing destination table")
projectID = j.c.projectID
} }
projectID := destTable.ProjectId
schema, err := j.c.service.waitForQuery(ctx, projectID, j.jobID) schema, err := waitForQuery(ctx, projectID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// The destination table should only be nil if there was a query error. dt := bqToTable(destTable, j.c)
if j.destinationTable == nil { it := newRowIterator(ctx, dt, pf)
return nil, errors.New("bigquery: query job missing destination table") it.schema = schema
return it, nil
}
// waitForQuery waits for the query job to complete and returns its schema.
func (j *Job) waitForQuery(ctx context.Context, projectID string) (Schema, error) {
// Use GetQueryResults only to wait for completion, not to read results.
call := j.c.bqs.Jobs.GetQueryResults(projectID, j.jobID).Context(ctx).MaxResults(0)
setClientHeader(call.Header())
backoff := gax.Backoff{
Initial: 1 * time.Second,
Multiplier: 2,
Max: 60 * time.Second,
} }
return newRowIterator(ctx, j.c.service, &readTableConf{ var res *bq.GetQueryResultsResponse
projectID: j.destinationTable.ProjectId, err := internal.Retry(ctx, backoff, func() (stop bool, err error) {
datasetID: j.destinationTable.DatasetId, res, err = call.Do()
tableID: j.destinationTable.TableId, if err != nil {
schema: schema, return !retryableError(err), err
}), nil }
if !res.JobComplete { // GetQueryResults may return early without error; retry.
return false, nil
}
return true, nil
})
if err != nil {
return nil, err
}
return bqToSchema(res.Schema), nil
} }
// JobStatistics contains statistics about a job. // JobStatistics contains statistics about a job.
@@ -373,12 +453,6 @@ func (c *Client) Jobs(ctx context.Context) *JobIterator {
return it return it
} }
// A JobInfo consists of a Job and a JobStatus.
type JobInfo struct {
Job *Job
Status *JobStatus
}
// JobIterator iterates over jobs in a project. // JobIterator iterates over jobs in a project.
type JobIterator struct { type JobIterator struct {
ProjectID string // Project ID of the jobs to list. Default is the client's project. ProjectID string // Project ID of the jobs to list. Default is the client's project.
@@ -389,14 +463,14 @@ type JobIterator struct {
c *Client c *Client
pageInfo *iterator.PageInfo pageInfo *iterator.PageInfo
nextFunc func() error nextFunc func() error
items []JobInfo items []*Job
} }
func (it *JobIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } func (it *JobIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
func (it *JobIterator) Next() (JobInfo, error) { func (it *JobIterator) Next() (*Job, error) {
if err := it.nextFunc(); err != nil { if err := it.nextFunc(); err != nil {
return JobInfo{}, err return nil, err
} }
item := it.items[0] item := it.items[0]
it.items = it.items[1:] it.items = it.items[1:]
@@ -417,14 +491,179 @@ func (it *JobIterator) fetch(pageSize int, pageToken string) (string, error) {
default: default:
return "", fmt.Errorf("bigquery: invalid value for JobIterator.State: %d", it.State) return "", fmt.Errorf("bigquery: invalid value for JobIterator.State: %d", it.State)
} }
jobInfos, nextPageToken, err := it.c.service.listJobs(it.ctx, it.ProjectID, pageSize, pageToken, it.AllUsers, st)
req := it.c.bqs.Jobs.List(it.ProjectID).
Context(it.ctx).
PageToken(pageToken).
Projection("full").
AllUsers(it.AllUsers)
if st != "" {
req.StateFilter(st)
}
setClientHeader(req.Header())
if pageSize > 0 {
req.MaxResults(int64(pageSize))
}
res, err := req.Do()
if err != nil { if err != nil {
return "", err return "", err
} }
for _, ji := range jobInfos { for _, j := range res.Jobs {
ji.Job.c = it.c job, err := convertListedJob(j, it.c)
ji.Status.setClient(it.c) if err != nil {
it.items = append(it.items, ji) return "", err
}
it.items = append(it.items, job)
} }
return nextPageToken, nil return res.NextPageToken, nil
}
func convertListedJob(j *bq.JobListJobs, c *Client) (*Job, error) {
return bqToJob2(j.JobReference, j.Configuration, j.Status, j.Statistics, c)
}
func (c *Client) getJobInternal(ctx context.Context, jobID string, fields ...googleapi.Field) (*bq.Job, error) {
var job *bq.Job
call := c.bqs.Jobs.Get(c.projectID, jobID).Context(ctx)
if len(fields) > 0 {
call = call.Fields(fields...)
}
setClientHeader(call.Header())
err := runWithRetry(ctx, func() (err error) {
job, err = call.Do()
return err
})
if err != nil {
return nil, err
}
return job, nil
}
func bqToJob(q *bq.Job, c *Client) (*Job, error) {
return bqToJob2(q.JobReference, q.Configuration, q.Status, q.Statistics, c)
}
func bqToJob2(qr *bq.JobReference, qc *bq.JobConfiguration, qs *bq.JobStatus, qt *bq.JobStatistics, c *Client) (*Job, error) {
j := &Job{
projectID: qr.ProjectId,
jobID: qr.JobId,
c: c,
}
j.setConfig(qc)
if err := j.setStatus(qs); err != nil {
return nil, err
}
j.setStatistics(qt, c)
return j, nil
}
func (j *Job) setConfig(config *bq.JobConfiguration) {
if config == nil {
return
}
j.config = config
}
func (j *Job) isQuery() bool {
return j.config != nil && j.config.Query != nil
}
var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}
func (j *Job) setStatus(qs *bq.JobStatus) error {
if qs == nil {
return nil
}
state, ok := stateMap[qs.State]
if !ok {
return fmt.Errorf("unexpected job state: %v", qs.State)
}
j.lastStatus = &JobStatus{
State: state,
err: nil,
}
if err := bqToError(qs.ErrorResult); state == Done && err != nil {
j.lastStatus.err = err
}
for _, ep := range qs.Errors {
j.lastStatus.Errors = append(j.lastStatus.Errors, bqToError(ep))
}
return nil
}
func (j *Job) setStatistics(s *bq.JobStatistics, c *Client) {
if s == nil || j.lastStatus == nil {
return
}
js := &JobStatistics{
CreationTime: unixMillisToTime(s.CreationTime),
StartTime: unixMillisToTime(s.StartTime),
EndTime: unixMillisToTime(s.EndTime),
TotalBytesProcessed: s.TotalBytesProcessed,
}
switch {
case s.Extract != nil:
js.Details = &ExtractStatistics{
DestinationURIFileCounts: []int64(s.Extract.DestinationUriFileCounts),
}
case s.Load != nil:
js.Details = &LoadStatistics{
InputFileBytes: s.Load.InputFileBytes,
InputFiles: s.Load.InputFiles,
OutputBytes: s.Load.OutputBytes,
OutputRows: s.Load.OutputRows,
}
case s.Query != nil:
var names []string
for _, qp := range s.Query.UndeclaredQueryParameters {
names = append(names, qp.Name)
}
var tables []*Table
for _, tr := range s.Query.ReferencedTables {
tables = append(tables, bqToTable(tr, c))
}
js.Details = &QueryStatistics{
BillingTier: s.Query.BillingTier,
CacheHit: s.Query.CacheHit,
StatementType: s.Query.StatementType,
TotalBytesBilled: s.Query.TotalBytesBilled,
TotalBytesProcessed: s.Query.TotalBytesProcessed,
NumDMLAffectedRows: s.Query.NumDmlAffectedRows,
QueryPlan: queryPlanFromProto(s.Query.QueryPlan),
Schema: bqToSchema(s.Query.Schema),
ReferencedTables: tables,
UndeclaredQueryParameterNames: names,
}
}
j.lastStatus.Statistics = js
}
func queryPlanFromProto(stages []*bq.ExplainQueryStage) []*ExplainQueryStage {
var res []*ExplainQueryStage
for _, s := range stages {
var steps []*ExplainQueryStep
for _, p := range s.Steps {
steps = append(steps, &ExplainQueryStep{
Kind: p.Kind,
Substeps: p.Substeps,
})
}
res = append(res, &ExplainQueryStage{
ComputeRatioAvg: s.ComputeRatioAvg,
ComputeRatioMax: s.ComputeRatioMax,
ID: s.Id,
Name: s.Name,
ReadRatioAvg: s.ReadRatioAvg,
ReadRatioMax: s.ReadRatioMax,
RecordsRead: s.RecordsRead,
RecordsWritten: s.RecordsWritten,
Status: s.Status,
Steps: steps,
WaitRatioAvg: s.WaitRatioAvg,
WaitRatioMax: s.WaitRatioMax,
WriteRatioAvg: s.WriteRatioAvg,
WriteRatioMax: s.WriteRatioMax,
})
}
return res
} }

View File

@@ -18,12 +18,11 @@ import (
"testing" "testing"
"cloud.google.com/go/internal/testutil" "cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
) )
func TestCreateJobRef(t *testing.T) { func TestCreateJobRef(t *testing.T) {
defer fixRandomJobID("RANDOM")() defer fixRandomID("RANDOM")()
for _, test := range []struct { for _, test := range []struct {
jobID string jobID string
addJobIDSuffix bool addJobIDSuffix bool
@@ -50,7 +49,8 @@ func TestCreateJobRef(t *testing.T) {
want: "foo-RANDOM", want: "foo-RANDOM",
}, },
} { } {
jr := createJobRef(test.jobID, test.addJobIDSuffix, "projectID") jc := JobIDConfig{JobID: test.jobID, AddJobIDSuffix: test.addJobIDSuffix}
jr := jc.createJobRef("projectID")
got := jr.JobId got := jr.JobId
if got != test.want { if got != test.want {
t.Errorf("%q, %t: got %q, want %q", test.jobID, test.addJobIDSuffix, got, test.want) t.Errorf("%q, %t: got %q, want %q", test.jobID, test.addJobIDSuffix, got, test.want)
@@ -58,10 +58,10 @@ func TestCreateJobRef(t *testing.T) {
} }
} }
func fixRandomJobID(s string) func() { func fixRandomID(s string) func() {
prev := randomJobIDFn prev := randomIDFn
randomJobIDFn = func() string { return s } randomIDFn = func() string { return s }
return func() { randomJobIDFn = prev } return func() { randomIDFn = prev }
} }
func checkJob(t *testing.T, i int, got, want *bq.Job) { func checkJob(t *testing.T, i int, got, want *bq.Job) {
@@ -78,18 +78,3 @@ func checkJob(t *testing.T, i int, got, want *bq.Job) {
t.Errorf("#%d: (got=-, want=+) %s", i, d) t.Errorf("#%d: (got=-, want=+) %s", i, d)
} }
} }
type testService struct {
*bq.Job
service
}
func (s *testService) insertJob(ctx context.Context, projectID string, conf *insertJobConf) (*Job, error) {
s.Job = conf.job
return &Job{}, nil
}
func (s *testService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
return &JobStatus{State: Done}, nil
}

View File

@@ -15,18 +15,14 @@
package bigquery package bigquery
import ( import (
"io"
"golang.org/x/net/context" "golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
) )
// LoadConfig holds the configuration for a load job. // LoadConfig holds the configuration for a load job.
type LoadConfig struct { type LoadConfig struct {
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
JobID string
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
AddJobIDSuffix bool
// Src is the source from which data will be loaded. // Src is the source from which data will be loaded.
Src LoadSource Src LoadSource
@@ -40,10 +36,53 @@ type LoadConfig struct {
// WriteDisposition specifies how existing data in the destination table is treated. // WriteDisposition specifies how existing data in the destination table is treated.
// The default is WriteAppend. // The default is WriteAppend.
WriteDisposition TableWriteDisposition WriteDisposition TableWriteDisposition
// The labels associated with this job.
Labels map[string]string
// If non-nil, the destination table is partitioned by time.
TimePartitioning *TimePartitioning
}
func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) {
config := &bq.JobConfiguration{
Labels: l.Labels,
Load: &bq.JobConfigurationLoad{
CreateDisposition: string(l.CreateDisposition),
WriteDisposition: string(l.WriteDisposition),
DestinationTable: l.Dst.toBQ(),
TimePartitioning: l.TimePartitioning.toBQ(),
},
}
media := l.Src.populateLoadConfig(config.Load)
return config, media
}
func bqToLoadConfig(q *bq.JobConfiguration, c *Client) *LoadConfig {
lc := &LoadConfig{
Labels: q.Labels,
CreateDisposition: TableCreateDisposition(q.Load.CreateDisposition),
WriteDisposition: TableWriteDisposition(q.Load.WriteDisposition),
Dst: bqToTable(q.Load.DestinationTable, c),
TimePartitioning: bqToTimePartitioning(q.Load.TimePartitioning),
}
var fc *FileConfig
if len(q.Load.SourceUris) == 0 {
s := NewReaderSource(nil)
fc = &s.FileConfig
lc.Src = s
} else {
s := NewGCSReference(q.Load.SourceUris...)
fc = &s.FileConfig
lc.Src = s
}
bqPopulateFileConfig(q.Load, fc)
return lc
} }
// A Loader loads data from Google Cloud Storage into a BigQuery table. // A Loader loads data from Google Cloud Storage into a BigQuery table.
type Loader struct { type Loader struct {
JobIDConfig
LoadConfig LoadConfig
c *Client c *Client
} }
@@ -54,7 +93,8 @@ type Loader struct {
// This package defines two LoadSources: GCSReference, for Google Cloud Storage // This package defines two LoadSources: GCSReference, for Google Cloud Storage
// objects, and ReaderSource, for data read from an io.Reader. // objects, and ReaderSource, for data read from an io.Reader.
type LoadSource interface { type LoadSource interface {
populateInsertJobConfForLoad(conf *insertJobConf) // populates config, returns media
populateLoadConfig(*bq.JobConfigurationLoad) io.Reader
} }
// LoaderFrom returns a Loader which can be used to load data into a BigQuery table. // LoaderFrom returns a Loader which can be used to load data into a BigQuery table.
@@ -73,17 +113,14 @@ func (t *Table) LoaderFrom(src LoadSource) *Loader {
// Run initiates a load job. // Run initiates a load job.
func (l *Loader) Run(ctx context.Context) (*Job, error) { func (l *Loader) Run(ctx context.Context) (*Job, error) {
job := &bq.Job{ job, media := l.newJob()
JobReference: createJobRef(l.JobID, l.AddJobIDSuffix, l.c.projectID), return l.c.insertJob(ctx, job, media)
Configuration: &bq.JobConfiguration{ }
Load: &bq.JobConfigurationLoad{
CreateDisposition: string(l.CreateDisposition), func (l *Loader) newJob() (*bq.Job, io.Reader) {
WriteDisposition: string(l.WriteDisposition), config, media := l.LoadConfig.toBQ()
}, return &bq.Job{
}, JobReference: l.JobIDConfig.createJobRef(l.c.projectID),
} Configuration: config,
conf := &insertJobConf{job: job} }, media
l.Src.populateInsertJobConfForLoad(conf)
job.Configuration.Load.DestinationTable = l.Dst.tableRefProto()
return l.c.insertJob(ctx, conf)
} }

View File

@@ -17,8 +17,11 @@ package bigquery
import ( import (
"strings" "strings"
"testing" "testing"
"time"
"golang.org/x/net/context" "cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
) )
@@ -67,12 +70,13 @@ func bqNestedFieldSchema() *bq.TableFieldSchema {
} }
func TestLoad(t *testing.T) { func TestLoad(t *testing.T) {
defer fixRandomJobID("RANDOM")() defer fixRandomID("RANDOM")()
c := &Client{projectID: "client-project-id"} c := &Client{projectID: "client-project-id"}
testCases := []struct { testCases := []struct {
dst *Table dst *Table
src LoadSource src LoadSource
jobID string
config LoadConfig config LoadConfig
want *bq.Job want *bq.Job
}{ }{
@@ -82,17 +86,24 @@ func TestLoad(t *testing.T) {
want: defaultLoadJob(), want: defaultLoadJob(),
}, },
{ {
dst: c.Dataset("dataset-id").Table("table-id"), dst: c.Dataset("dataset-id").Table("table-id"),
jobID: "ajob",
config: LoadConfig{ config: LoadConfig{
CreateDisposition: CreateNever, CreateDisposition: CreateNever,
WriteDisposition: WriteTruncate, WriteDisposition: WriteTruncate,
JobID: "ajob", Labels: map[string]string{"a": "b"},
TimePartitioning: &TimePartitioning{Expiration: 1234 * time.Millisecond},
}, },
src: NewGCSReference("uri"), src: NewGCSReference("uri"),
want: func() *bq.Job { want: func() *bq.Job {
j := defaultLoadJob() j := defaultLoadJob()
j.Configuration.Labels = map[string]string{"a": "b"}
j.Configuration.Load.CreateDisposition = "CREATE_NEVER" j.Configuration.Load.CreateDisposition = "CREATE_NEVER"
j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE" j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE"
j.Configuration.Load.TimePartitioning = &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: 1234,
}
j.JobReference = &bq.JobReference{ j.JobReference = &bq.JobReference{
JobId: "ajob", JobId: "ajob",
ProjectId: "client-project-id", ProjectId: "client-project-id",
@@ -211,16 +222,23 @@ func TestLoad(t *testing.T) {
} }
for i, tc := range testCases { for i, tc := range testCases {
s := &testService{}
c.service = s
loader := tc.dst.LoaderFrom(tc.src) loader := tc.dst.LoaderFrom(tc.src)
loader.JobID = tc.jobID
tc.config.Src = tc.src tc.config.Src = tc.src
tc.config.Dst = tc.dst tc.config.Dst = tc.dst
loader.LoadConfig = tc.config loader.LoadConfig = tc.config
if _, err := loader.Run(context.Background()); err != nil { got, _ := loader.newJob()
t.Errorf("#%d: err calling Loader.Run: %v", i, err) checkJob(t, i, got, tc.want)
continue
jc, err := bqToJobConfig(got.Configuration, c)
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
diff := testutil.Diff(jc.(*LoadConfig), &loader.LoadConfig,
cmp.AllowUnexported(Table{}, Client{}),
cmpopts.IgnoreUnexported(ReaderSource{}))
if diff != "" {
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
} }
checkJob(t, i, s.Job, tc.want)
} }
} }

View File

@@ -20,6 +20,7 @@ import (
"fmt" "fmt"
"reflect" "reflect"
"regexp" "regexp"
"strings"
"time" "time"
"cloud.google.com/go/civil" "cloud.google.com/go/civil"
@@ -36,17 +37,24 @@ var (
validFieldName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]{0,127}$") validFieldName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]{0,127}$")
) )
const nullableTagOption = "nullable"
func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) {
if s := t.Get("bigquery"); s != "" { name, keep, opts, err := fields.ParseStandardTag("bigquery", t)
if s == "-" { if err != nil {
return "", false, nil, nil return "", false, nil, err
}
if !validFieldName.MatchString(s) {
return "", false, nil, errInvalidFieldName
}
return s, true, nil, nil
} }
return "", true, nil, nil if name != "" && !validFieldName.MatchString(name) {
return "", false, nil, errInvalidFieldName
}
for _, opt := range opts {
if opt != nullableTagOption {
return "", false, nil, fmt.Errorf(
"bigquery: invalid tag option %q. The only valid option is %q",
opt, nullableTagOption)
}
}
return name, keep, opts, nil
} }
var fieldCache = fields.NewCache(bqTagParser, nil, nil) var fieldCache = fields.NewCache(bqTagParser, nil, nil)
@@ -77,8 +85,9 @@ type QueryParameter struct {
Name string Name string
// Value is the value of the parameter. // Value is the value of the parameter.
// The following Go types are supported, with their corresponding //
// Bigquery types: // When you create a QueryParameter to send to BigQuery, the following Go types
// are supported, with their corresponding Bigquery types:
// int, int8, int16, int32, int64, uint8, uint16, uint32: INT64 // int, int8, int16, int32, int64, uint8, uint16, uint32: INT64
// Note that uint, uint64 and uintptr are not supported, because // Note that uint, uint64 and uintptr are not supported, because
// they may contain values that cannot fit into a 64-bit signed integer. // they may contain values that cannot fit into a 64-bit signed integer.
@@ -89,10 +98,17 @@ type QueryParameter struct {
// time.Time: TIMESTAMP // time.Time: TIMESTAMP
// Arrays and slices of the above. // Arrays and slices of the above.
// Structs of the above. Only the exported fields are used. // Structs of the above. Only the exported fields are used.
//
// When a QueryParameter is returned inside a QueryConfig from a call to
// Job.Config:
// Integers are of type int64.
// Floating-point values are of type float64.
// Arrays are of type []interface{}, regardless of the array element type.
// Structs are of type map[string]interface{}.
Value interface{} Value interface{}
} }
func (p QueryParameter) toRaw() (*bq.QueryParameter, error) { func (p QueryParameter) toBQ() (*bq.QueryParameter, error) {
pv, err := paramValue(reflect.ValueOf(p.Value)) pv, err := paramValue(reflect.ValueOf(p.Value))
if err != nil { if err != nil {
return nil, err return nil, err
@@ -189,12 +205,11 @@ func paramValue(v reflect.Value) (bq.QueryParameterValue, error) {
case typeOfTime: case typeOfTime:
// civil.Time has nanosecond resolution, but BigQuery TIME only microsecond. // civil.Time has nanosecond resolution, but BigQuery TIME only microsecond.
res.Value = civilTimeParamString(v.Interface().(civil.Time)) res.Value = CivilTimeString(v.Interface().(civil.Time))
return res, nil return res, nil
case typeOfDateTime: case typeOfDateTime:
dt := v.Interface().(civil.DateTime) res.Value = CivilDateTimeString(v.Interface().(civil.DateTime))
res.Value = dt.Date.String() + " " + civilTimeParamString(dt.Time)
return res, nil return res, nil
case typeOfGoTime: case typeOfGoTime:
@@ -254,12 +269,81 @@ func paramValue(v reflect.Value) (bq.QueryParameterValue, error) {
return res, nil return res, nil
} }
func civilTimeParamString(t civil.Time) string { func bqToQueryParameter(q *bq.QueryParameter) (QueryParameter, error) {
if t.Nanosecond == 0 { p := QueryParameter{Name: q.Name}
return t.String() val, err := convertParamValue(q.ParameterValue, q.ParameterType)
} else { if err != nil {
micro := (t.Nanosecond + 500) / 1000 // round to nearest microsecond return QueryParameter{}, err
t.Nanosecond = 0 }
return t.String() + fmt.Sprintf(".%06d", micro) p.Value = val
return p, nil
}
var paramTypeToFieldType = map[string]FieldType{
int64ParamType.Type: IntegerFieldType,
float64ParamType.Type: FloatFieldType,
boolParamType.Type: BooleanFieldType,
stringParamType.Type: StringFieldType,
bytesParamType.Type: BytesFieldType,
dateParamType.Type: DateFieldType,
timeParamType.Type: TimeFieldType,
}
// Convert a parameter value from the service to a Go value. This is similar to, but
// not quite the same as, converting data values.
func convertParamValue(qval *bq.QueryParameterValue, qtype *bq.QueryParameterType) (interface{}, error) {
switch qtype.Type {
case "ARRAY":
if qval == nil {
return []interface{}(nil), nil
}
return convertParamArray(qval.ArrayValues, qtype.ArrayType)
case "STRUCT":
if qval == nil {
return map[string]interface{}(nil), nil
}
return convertParamStruct(qval.StructValues, qtype.StructTypes)
case "TIMESTAMP":
return time.Parse(timestampFormat, qval.Value)
case "DATETIME":
parts := strings.Fields(qval.Value)
if len(parts) != 2 {
return nil, fmt.Errorf("bigquery: bad DATETIME value %q", qval.Value)
}
return civil.ParseDateTime(parts[0] + "T" + parts[1])
default:
return convertBasicType(qval.Value, paramTypeToFieldType[qtype.Type])
} }
} }
// convertParamArray converts a query parameter array value to a Go value. It
// always returns a []interface{}.
func convertParamArray(elVals []*bq.QueryParameterValue, elType *bq.QueryParameterType) ([]interface{}, error) {
var vals []interface{}
for _, el := range elVals {
val, err := convertParamValue(el, elType)
if err != nil {
return nil, err
}
vals = append(vals, val)
}
return vals, nil
}
// convertParamValue converts a query parameter struct value into a Go value. It
// always returns a map[string]interface{}.
func convertParamStruct(sVals map[string]bq.QueryParameterValue, sTypes []*bq.QueryParameterTypeStructTypes) (map[string]interface{}, error) {
vals := map[string]interface{}{}
for _, st := range sTypes {
if sv, ok := sVals[st.Name]; ok {
val, err := convertParamValue(&sv, st.Type)
if err != nil {
return nil, err
}
vals[st.Name] = val
} else {
vals[st.Name] = nil
}
}
return vals, nil
}

View File

@@ -30,40 +30,79 @@ import (
) )
var scalarTests = []struct { var scalarTests = []struct {
val interface{} val interface{} // The Go value
want string wantVal string // paramValue's desired output
wantType *bq.QueryParameterType // paramType's desired output
}{ }{
{int64(0), "0"}, {int64(0), "0", int64ParamType},
{3.14, "3.14"}, {3.14, "3.14", float64ParamType},
{3.14159e-87, "3.14159e-87"}, {3.14159e-87, "3.14159e-87", float64ParamType},
{true, "true"}, {true, "true", boolParamType},
{"string", "string"}, {"string", "string", stringParamType},
{"\u65e5\u672c\u8a9e\n", "\u65e5\u672c\u8a9e\n"}, {"\u65e5\u672c\u8a9e\n", "\u65e5\u672c\u8a9e\n", stringParamType},
{math.NaN(), "NaN"}, {math.NaN(), "NaN", float64ParamType},
{[]byte("foo"), "Zm9v"}, // base64 encoding of "foo" {[]byte("foo"), "Zm9v", bytesParamType}, // base64 encoding of "foo"
{time.Date(2016, 3, 20, 4, 22, 9, 5000, time.FixedZone("neg1-2", -3720)), {time.Date(2016, 3, 20, 4, 22, 9, 5000, time.FixedZone("neg1-2", -3720)),
"2016-03-20 04:22:09.000005-01:02"}, "2016-03-20 04:22:09.000005-01:02",
{civil.Date{2016, 3, 20}, "2016-03-20"}, timestampParamType},
{civil.Time{4, 5, 6, 789000000}, "04:05:06.789000"}, {civil.Date{2016, 3, 20}, "2016-03-20", dateParamType},
{civil.DateTime{civil.Date{2016, 3, 20}, civil.Time{4, 5, 6, 789000000}}, "2016-03-20 04:05:06.789000"}, {civil.Time{4, 5, 6, 789000000}, "04:05:06.789000", timeParamType},
{civil.DateTime{civil.Date{2016, 3, 20}, civil.Time{4, 5, 6, 789000000}},
"2016-03-20 04:05:06.789000",
dateTimeParamType},
} }
type S1 struct { type (
A int S1 struct {
B *S2 A int
C bool B *S2
} C bool
}
S2 struct {
D string
e int
}
)
type S2 struct { var (
D string s1 = S1{
e int A: 1,
} B: &S2{D: "s"},
C: true,
}
var s1 = S1{ s1ParamType = &bq.QueryParameterType{
A: 1, Type: "STRUCT",
B: &S2{D: "s"}, StructTypes: []*bq.QueryParameterTypeStructTypes{
C: true, {Name: "A", Type: int64ParamType},
} {Name: "B", Type: &bq.QueryParameterType{
Type: "STRUCT",
StructTypes: []*bq.QueryParameterTypeStructTypes{
{Name: "D", Type: stringParamType},
},
}},
{Name: "C", Type: boolParamType},
},
}
s1ParamValue = bq.QueryParameterValue{
StructValues: map[string]bq.QueryParameterValue{
"A": sval("1"),
"B": bq.QueryParameterValue{
StructValues: map[string]bq.QueryParameterValue{
"D": sval("s"),
},
},
"C": sval("true"),
},
}
s1ParamReturnValue = map[string]interface{}{
"A": int64(1),
"B": map[string]interface{}{"D": "s"},
"C": true,
}
)
func sval(s string) bq.QueryParameterValue { func sval(s string) bq.QueryParameterValue {
return bq.QueryParameterValue{Value: s} return bq.QueryParameterValue{Value: s}
@@ -76,7 +115,7 @@ func TestParamValueScalar(t *testing.T) {
t.Errorf("%v: got %v, want nil", test.val, err) t.Errorf("%v: got %v, want nil", test.val, err)
continue continue
} }
want := sval(test.want) want := sval(test.wantVal)
if !testutil.Equal(got, want) { if !testutil.Equal(got, want) {
t.Errorf("%v:\ngot %+v\nwant %+v", test.val, got, want) t.Errorf("%v:\ngot %+v\nwant %+v", test.val, got, want)
} }
@@ -113,19 +152,8 @@ func TestParamValueStruct(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
want := bq.QueryParameterValue{ if !testutil.Equal(got, s1ParamValue) {
StructValues: map[string]bq.QueryParameterValue{ t.Errorf("got %+v\nwant %+v", got, s1ParamValue)
"A": sval("1"),
"B": bq.QueryParameterValue{
StructValues: map[string]bq.QueryParameterValue{
"D": sval("s"),
},
},
"C": sval("true"),
},
}
if !testutil.Equal(got, want) {
t.Errorf("got %+v\nwant %+v", got, want)
} }
} }
@@ -141,35 +169,24 @@ func TestParamValueErrors(t *testing.T) {
} }
func TestParamType(t *testing.T) { func TestParamType(t *testing.T) {
for _, test := range scalarTests {
got, err := paramType(reflect.TypeOf(test.val))
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, test.wantType) {
t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.wantType)
}
}
for _, test := range []struct { for _, test := range []struct {
val interface{} val interface{}
want *bq.QueryParameterType want *bq.QueryParameterType
}{ }{
{0, int64ParamType},
{uint32(32767), int64ParamType}, {uint32(32767), int64ParamType},
{3.14, float64ParamType},
{float32(3.14), float64ParamType},
{math.NaN(), float64ParamType},
{true, boolParamType},
{"", stringParamType},
{"string", stringParamType},
{time.Now(), timestampParamType},
{[]byte("foo"), bytesParamType}, {[]byte("foo"), bytesParamType},
{[]int{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}}, {[]int{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}},
{[3]bool{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: boolParamType}}, {[3]bool{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: boolParamType}},
{S1{}, &bq.QueryParameterType{ {S1{}, s1ParamType},
Type: "STRUCT",
StructTypes: []*bq.QueryParameterTypeStructTypes{
{Name: "A", Type: int64ParamType},
{Name: "B", Type: &bq.QueryParameterType{
Type: "STRUCT",
StructTypes: []*bq.QueryParameterTypeStructTypes{
{Name: "D", Type: stringParamType},
},
}},
{Name: "C", Type: boolParamType},
},
}},
} { } {
got, err := paramType(reflect.TypeOf(test.val)) got, err := paramType(reflect.TypeOf(test.val))
if err != nil { if err != nil {
@@ -192,17 +209,74 @@ func TestParamTypeErrors(t *testing.T) {
} }
} }
func TestIntegration_ScalarParam(t *testing.T) { func TestConvertParamValue(t *testing.T) {
c := getClient(t) // Scalars.
for _, test := range scalarTests { for _, test := range scalarTests {
got, err := paramRoundTrip(c, test.val) pval, err := paramValue(reflect.ValueOf(test.val))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if !testutil.Equal(got, test.val, cmp.Comparer(func(t1, t2 time.Time) bool { ptype, err := paramType(reflect.TypeOf(test.val))
return t1.Round(time.Microsecond).Equal(t2.Round(time.Microsecond)) if err != nil {
})) { t.Fatal(err)
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", got, got, test.val, test.val) }
got, err := convertParamValue(&pval, ptype)
if err != nil {
t.Fatalf("convertParamValue(%+v, %+v): %v", pval, ptype, err)
}
if !testutil.Equal(got, test.val) {
t.Errorf("%#v: got %#v", test.val, got)
}
}
// Arrays.
for _, test := range []struct {
pval *bq.QueryParameterValue
want []interface{}
}{
{
&bq.QueryParameterValue{},
nil,
},
{
&bq.QueryParameterValue{
ArrayValues: []*bq.QueryParameterValue{{Value: "1"}, {Value: "2"}},
},
[]interface{}{int64(1), int64(2)},
},
} {
ptype := &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}
got, err := convertParamValue(test.pval, ptype)
if err != nil {
t.Fatalf("%+v: %v", test.pval, err)
}
if !testutil.Equal(got, test.want) {
t.Errorf("%+v: got %+v, want %+v", test.pval, got, test.want)
}
}
// Structs.
got, err := convertParamValue(&s1ParamValue, s1ParamType)
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, s1ParamReturnValue) {
t.Errorf("got %+v, want %+v", got, s1ParamReturnValue)
}
}
func TestIntegration_ScalarParam(t *testing.T) {
roundToMicros := cmp.Transformer("RoundToMicros",
func(t time.Time) time.Time { return t.Round(time.Microsecond) })
c := getClient(t)
for _, test := range scalarTests {
gotData, gotParam, err := paramRoundTrip(c, test.val)
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(gotData, test.val, roundToMicros) {
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotData, gotData, test.val, test.val)
}
if !testutil.Equal(gotParam, test.val, roundToMicros) {
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotParam, gotParam, test.val, test.val)
} }
} }
} }
@@ -210,40 +284,78 @@ func TestIntegration_ScalarParam(t *testing.T) {
func TestIntegration_OtherParam(t *testing.T) { func TestIntegration_OtherParam(t *testing.T) {
c := getClient(t) c := getClient(t)
for _, test := range []struct { for _, test := range []struct {
val interface{} val interface{}
want interface{} wantData interface{}
wantParam interface{}
}{ }{
{[]int(nil), []Value(nil)}, {[]int(nil), []Value(nil), []interface{}(nil)},
{[]int{}, []Value(nil)}, {[]int{}, []Value(nil), []interface{}(nil)},
{[]int{1, 2}, []Value{int64(1), int64(2)}}, {
{[3]int{1, 2, 3}, []Value{int64(1), int64(2), int64(3)}}, []int{1, 2},
{S1{}, []Value{int64(0), nil, false}}, []Value{int64(1), int64(2)},
{s1, []Value{int64(1), []Value{"s"}, true}}, []interface{}{int64(1), int64(2)},
},
{
[3]int{1, 2, 3},
[]Value{int64(1), int64(2), int64(3)},
[]interface{}{int64(1), int64(2), int64(3)},
},
{
S1{},
[]Value{int64(0), nil, false},
map[string]interface{}{
"A": int64(0),
"B": nil,
"C": false,
},
},
{
s1,
[]Value{int64(1), []Value{"s"}, true},
s1ParamReturnValue,
},
} { } {
got, err := paramRoundTrip(c, test.val) gotData, gotParam, err := paramRoundTrip(c, test.val)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if !testutil.Equal(got, test.want) { if !testutil.Equal(gotData, test.wantData) {
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", got, got, test.want, test.want) t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)",
test.val, gotData, gotData, test.wantData, test.wantData)
}
if !testutil.Equal(gotParam, test.wantParam) {
t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)",
test.val, gotParam, gotParam, test.wantParam, test.wantParam)
} }
} }
} }
func paramRoundTrip(c *Client, x interface{}) (Value, error) { // paramRoundTrip passes x as a query parameter to BigQuery. It returns
// the resulting data value from running the query and the parameter value from
// the returned job configuration.
func paramRoundTrip(c *Client, x interface{}) (data Value, param interface{}, err error) {
ctx := context.Background()
q := c.Query("select ?") q := c.Query("select ?")
q.Parameters = []QueryParameter{{Value: x}} q.Parameters = []QueryParameter{{Value: x}}
it, err := q.Read(context.Background()) job, err := q.Run(ctx)
if err != nil { if err != nil {
return nil, err return nil, nil, err
}
it, err := job.Read(ctx)
if err != nil {
return nil, nil, err
} }
var val []Value var val []Value
err = it.Next(&val) err = it.Next(&val)
if err != nil { if err != nil {
return nil, err return nil, nil, err
} }
if len(val) != 1 { if len(val) != 1 {
return nil, errors.New("wrong number of values") return nil, nil, errors.New("wrong number of values")
} }
return val[0], nil conf, err := job.Config()
if err != nil {
return nil, nil, err
}
return val[0], conf.(*QueryConfig).Parameters[0].Value, nil
} }

View File

@@ -23,12 +23,6 @@ import (
// QueryConfig holds the configuration for a query job. // QueryConfig holds the configuration for a query job.
type QueryConfig struct { type QueryConfig struct {
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
JobID string
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
AddJobIDSuffix bool
// Dst is the table into which the results of the query will be written. // Dst is the table into which the results of the query will be written.
// If this field is nil, a temporary table will be created. // If this field is nil, a temporary table will be created.
Dst *Table Dst *Table
@@ -43,6 +37,9 @@ type QueryConfig struct {
// TableDefinitions describes data sources outside of BigQuery. // TableDefinitions describes data sources outside of BigQuery.
// The map keys may be used as table names in the query string. // The map keys may be used as table names in the query string.
//
// When a QueryConfig is returned from Job.Config, the map values
// are always of type *ExternalDataConfig.
TableDefinitions map[string]ExternalData TableDefinitions map[string]ExternalData
// CreateDisposition specifies the circumstances under which the destination table will be created. // CreateDisposition specifies the circumstances under which the destination table will be created.
@@ -90,6 +87,7 @@ type QueryConfig struct {
MaxBytesBilled int64 MaxBytesBilled int64
// UseStandardSQL causes the query to use standard SQL. The default. // UseStandardSQL causes the query to use standard SQL. The default.
// Deprecated: use UseLegacySQL.
UseStandardSQL bool UseStandardSQL bool
// UseLegacySQL causes the query to use legacy SQL. // UseLegacySQL causes the query to use legacy SQL.
@@ -101,6 +99,130 @@ type QueryConfig struct {
// If the query uses named syntax ("@p"), then all parameters must have names. // If the query uses named syntax ("@p"), then all parameters must have names.
// It is illegal to mix positional and named syntax. // It is illegal to mix positional and named syntax.
Parameters []QueryParameter Parameters []QueryParameter
// The labels associated with this job.
Labels map[string]string
// If true, don't actually run this job. A valid query will return a mostly
// empty response with some processing statistics, while an invalid query will
// return the same error it would if it wasn't a dry run.
//
// Query.Read will fail with dry-run queries. Call Query.Run instead, and then
// call LastStatus on the returned job to get statistics. Calling Status on a
// dry-run job will fail.
DryRun bool
}
func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) {
qconf := &bq.JobConfigurationQuery{
Query: qc.Q,
CreateDisposition: string(qc.CreateDisposition),
WriteDisposition: string(qc.WriteDisposition),
AllowLargeResults: qc.AllowLargeResults,
Priority: string(qc.Priority),
MaximumBytesBilled: qc.MaxBytesBilled,
}
if len(qc.TableDefinitions) > 0 {
qconf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
}
for name, data := range qc.TableDefinitions {
qconf.TableDefinitions[name] = data.toBQ()
}
if qc.DefaultProjectID != "" || qc.DefaultDatasetID != "" {
qconf.DefaultDataset = &bq.DatasetReference{
DatasetId: qc.DefaultDatasetID,
ProjectId: qc.DefaultProjectID,
}
}
if tier := int64(qc.MaxBillingTier); tier > 0 {
qconf.MaximumBillingTier = &tier
}
f := false
if qc.DisableQueryCache {
qconf.UseQueryCache = &f
}
if qc.DisableFlattenedResults {
qconf.FlattenResults = &f
// DisableFlattenResults implies AllowLargeResults.
qconf.AllowLargeResults = true
}
if qc.UseStandardSQL && qc.UseLegacySQL {
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
}
if len(qc.Parameters) > 0 && qc.UseLegacySQL {
return nil, errors.New("bigquery: cannot provide both Parameters (implying standard SQL) and UseLegacySQL")
}
if qc.UseLegacySQL {
qconf.UseLegacySql = true
} else {
qconf.UseLegacySql = false
qconf.ForceSendFields = append(qconf.ForceSendFields, "UseLegacySql")
}
if qc.Dst != nil && !qc.Dst.implicitTable() {
qconf.DestinationTable = qc.Dst.toBQ()
}
for _, p := range qc.Parameters {
qp, err := p.toBQ()
if err != nil {
return nil, err
}
qconf.QueryParameters = append(qconf.QueryParameters, qp)
}
return &bq.JobConfiguration{
Labels: qc.Labels,
DryRun: qc.DryRun,
Query: qconf,
}, nil
}
func bqToQueryConfig(q *bq.JobConfiguration, c *Client) (*QueryConfig, error) {
qq := q.Query
qc := &QueryConfig{
Labels: q.Labels,
DryRun: q.DryRun,
Q: qq.Query,
CreateDisposition: TableCreateDisposition(qq.CreateDisposition),
WriteDisposition: TableWriteDisposition(qq.WriteDisposition),
AllowLargeResults: qq.AllowLargeResults,
Priority: QueryPriority(qq.Priority),
MaxBytesBilled: qq.MaximumBytesBilled,
UseLegacySQL: qq.UseLegacySql,
UseStandardSQL: !qq.UseLegacySql,
}
if len(qq.TableDefinitions) > 0 {
qc.TableDefinitions = make(map[string]ExternalData)
}
for name, qedc := range qq.TableDefinitions {
edc, err := bqToExternalDataConfig(&qedc)
if err != nil {
return nil, err
}
qc.TableDefinitions[name] = edc
}
if qq.DefaultDataset != nil {
qc.DefaultProjectID = qq.DefaultDataset.ProjectId
qc.DefaultDatasetID = qq.DefaultDataset.DatasetId
}
if qq.MaximumBillingTier != nil {
qc.MaxBillingTier = int(*qq.MaximumBillingTier)
}
if qq.UseQueryCache != nil && !*qq.UseQueryCache {
qc.DisableQueryCache = true
}
if qq.FlattenResults != nil && !*qq.FlattenResults {
qc.DisableFlattenedResults = true
}
if qq.DestinationTable != nil {
qc.Dst = bqToTable(qq.DestinationTable, c)
}
for _, qp := range qq.QueryParameters {
p, err := bqToQueryParameter(qp)
if err != nil {
return nil, err
}
qc.Parameters = append(qc.Parameters, p)
}
return qc, nil
} }
// QueryPriority specifies a priority with which a query is to be executed. // QueryPriority specifies a priority with which a query is to be executed.
@@ -113,8 +235,9 @@ const (
// A Query queries data from a BigQuery table. Use Client.Query to create a Query. // A Query queries data from a BigQuery table. Use Client.Query to create a Query.
type Query struct { type Query struct {
client *Client JobIDConfig
QueryConfig QueryConfig
client *Client
} }
// Query creates a query with string q. // Query creates a query with string q.
@@ -128,83 +251,26 @@ func (c *Client) Query(q string) *Query {
// Run initiates a query job. // Run initiates a query job.
func (q *Query) Run(ctx context.Context) (*Job, error) { func (q *Query) Run(ctx context.Context) (*Job, error) {
job := &bq.Job{ job, err := q.newJob()
JobReference: createJobRef(q.JobID, q.AddJobIDSuffix, q.client.projectID), if err != nil {
Configuration: &bq.JobConfiguration{ return nil, err
Query: &bq.JobConfigurationQuery{}, }
}, j, err := q.client.insertJob(ctx, job, nil)
}
if err := q.QueryConfig.populateJobQueryConfig(job.Configuration.Query); err != nil {
return nil, err
}
j, err := q.client.insertJob(ctx, &insertJobConf{job: job})
if err != nil { if err != nil {
return nil, err return nil, err
} }
j.isQuery = true
return j, nil return j, nil
} }
func (q *QueryConfig) populateJobQueryConfig(conf *bq.JobConfigurationQuery) error { func (q *Query) newJob() (*bq.Job, error) {
conf.Query = q.Q config, err := q.QueryConfig.toBQ()
if err != nil {
if len(q.TableDefinitions) > 0 { return nil, err
conf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
} }
for name, data := range q.TableDefinitions { return &bq.Job{
conf.TableDefinitions[name] = data.externalDataConfig() JobReference: q.JobIDConfig.createJobRef(q.client.projectID),
} Configuration: config,
}, nil
if q.DefaultProjectID != "" || q.DefaultDatasetID != "" {
conf.DefaultDataset = &bq.DatasetReference{
DatasetId: q.DefaultDatasetID,
ProjectId: q.DefaultProjectID,
}
}
if tier := int64(q.MaxBillingTier); tier > 0 {
conf.MaximumBillingTier = &tier
}
conf.CreateDisposition = string(q.CreateDisposition)
conf.WriteDisposition = string(q.WriteDisposition)
conf.AllowLargeResults = q.AllowLargeResults
conf.Priority = string(q.Priority)
f := false
if q.DisableQueryCache {
conf.UseQueryCache = &f
}
if q.DisableFlattenedResults {
conf.FlattenResults = &f
// DisableFlattenResults implies AllowLargeResults.
conf.AllowLargeResults = true
}
if q.MaxBytesBilled >= 1 {
conf.MaximumBytesBilled = q.MaxBytesBilled
}
if q.UseStandardSQL && q.UseLegacySQL {
return errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
}
if len(q.Parameters) > 0 && q.UseLegacySQL {
return errors.New("bigquery: cannot provide both Parameters (implying standard SQL) and UseLegacySQL")
}
if q.UseLegacySQL {
conf.UseLegacySql = true
} else {
conf.UseLegacySql = false
conf.ForceSendFields = append(conf.ForceSendFields, "UseLegacySql")
}
if q.Dst != nil && !q.Dst.implicitTable() {
conf.DestinationTable = q.Dst.tableRefProto()
}
for _, p := range q.Parameters {
qp, err := p.toRaw()
if err != nil {
return err
}
conf.QueryParameters = append(conf.QueryParameters, qp)
}
return nil
} }
// Read submits a query for execution and returns the results via a RowIterator. // Read submits a query for execution and returns the results via a RowIterator.

View File

@@ -17,9 +17,9 @@ package bigquery
import ( import (
"testing" "testing"
"cloud.google.com/go/internal/testutil" "github.com/google/go-cmp/cmp"
"golang.org/x/net/context" "cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
) )
@@ -46,15 +46,22 @@ func defaultQueryJob() *bq.Job {
} }
} }
var defaultQuery = &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
}
func TestQuery(t *testing.T) { func TestQuery(t *testing.T) {
defer fixRandomJobID("RANDOM")() defer fixRandomID("RANDOM")()
c := &Client{ c := &Client{
projectID: "client-project-id", projectID: "client-project-id",
} }
testCases := []struct { testCases := []struct {
dst *Table dst *Table
src *QueryConfig src *QueryConfig
want *bq.Job jobIDConfig JobIDConfig
want *bq.Job
}{ }{
{ {
dst: c.Dataset("dataset-id").Table("table-id"), dst: c.Dataset("dataset-id").Table("table-id"),
@@ -64,21 +71,22 @@ func TestQuery(t *testing.T) {
{ {
dst: c.Dataset("dataset-id").Table("table-id"), dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{ src: &QueryConfig{
Q: "query string", Q: "query string",
Labels: map[string]string{"a": "b"},
DryRun: true,
}, },
want: func() *bq.Job { want: func() *bq.Job {
j := defaultQueryJob() j := defaultQueryJob()
j.Configuration.Labels = map[string]string{"a": "b"}
j.Configuration.DryRun = true
j.Configuration.Query.DefaultDataset = nil j.Configuration.Query.DefaultDataset = nil
return j return j
}(), }(),
}, },
{ {
dst: c.Dataset("dataset-id").Table("table-id"), dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{ jobIDConfig: JobIDConfig{JobID: "jobID", AddJobIDSuffix: true},
Q: "query string", src: &QueryConfig{Q: "query string"},
JobID: "jobID",
AddJobIDSuffix: true,
},
want: func() *bq.Job { want: func() *bq.Job {
j := defaultQueryJob() j := defaultQueryJob()
j.Configuration.Query.DefaultDataset = nil j.Configuration.Query.DefaultDataset = nil
@@ -244,16 +252,6 @@ func TestQuery(t *testing.T) {
return j return j
}(), }(),
}, },
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
MaxBytesBilled: -1,
},
want: defaultQueryJob(),
},
{ {
dst: c.Dataset("dataset-id").Table("table-id"), dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{ src: &QueryConfig{
@@ -281,24 +279,71 @@ func TestQuery(t *testing.T) {
}, },
} }
for i, tc := range testCases { for i, tc := range testCases {
s := &testService{}
c.service = s
query := c.Query("") query := c.Query("")
query.JobIDConfig = tc.jobIDConfig
query.QueryConfig = *tc.src query.QueryConfig = *tc.src
query.Dst = tc.dst query.Dst = tc.dst
if _, err := query.Run(context.Background()); err != nil { got, err := query.newJob()
if err != nil {
t.Errorf("#%d: err calling query: %v", i, err) t.Errorf("#%d: err calling query: %v", i, err)
continue continue
} }
checkJob(t, i, s.Job, tc.want) checkJob(t, i, got, tc.want)
// Round-trip.
jc, err := bqToJobConfig(got.Configuration, c)
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
wantConfig := query.QueryConfig
// We set AllowLargeResults to true when DisableFlattenedResults is true.
if wantConfig.DisableFlattenedResults {
wantConfig.AllowLargeResults = true
}
// A QueryConfig with neither UseXXXSQL field set is equivalent
// to one where UseStandardSQL = true.
if !wantConfig.UseLegacySQL && !wantConfig.UseStandardSQL {
wantConfig.UseStandardSQL = true
}
// Treat nil and empty tables the same, and ignore the client.
tableEqual := func(t1, t2 *Table) bool {
if t1 == nil {
t1 = &Table{}
}
if t2 == nil {
t2 = &Table{}
}
return t1.ProjectID == t2.ProjectID && t1.DatasetID == t2.DatasetID && t1.TableID == t2.TableID
}
// A table definition that is a GCSReference round-trips as an ExternalDataConfig.
// TODO(jba): see if there is a way to express this with a transformer.
gcsRefToEDC := func(g *GCSReference) *ExternalDataConfig {
q := g.toBQ()
e, _ := bqToExternalDataConfig(&q)
return e
}
externalDataEqual := func(e1, e2 ExternalData) bool {
if r, ok := e1.(*GCSReference); ok {
e1 = gcsRefToEDC(r)
}
if r, ok := e2.(*GCSReference); ok {
e2 = gcsRefToEDC(r)
}
return cmp.Equal(e1, e2)
}
diff := testutil.Diff(jc.(*QueryConfig), &wantConfig,
cmp.Comparer(tableEqual),
cmp.Comparer(externalDataEqual),
)
if diff != "" {
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
}
} }
} }
func TestConfiguringQuery(t *testing.T) { func TestConfiguringQuery(t *testing.T) {
s := &testService{}
c := &Client{ c := &Client{
projectID: "project-id", projectID: "project-id",
service: s,
} }
query := c.Query("q") query := c.Query("q")
@@ -326,30 +371,28 @@ func TestConfiguringQuery(t *testing.T) {
}, },
} }
if _, err := query.Run(context.Background()); err != nil { got, err := query.newJob()
t.Fatalf("err calling Query.Run: %v", err) if err != nil {
t.Fatalf("err calling Query.newJob: %v", err)
} }
if diff := testutil.Diff(s.Job, want); diff != "" { if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("querying: -got +want:\n%s", diff) t.Errorf("querying: -got +want:\n%s", diff)
} }
} }
func TestQueryLegacySQL(t *testing.T) { func TestQueryLegacySQL(t *testing.T) {
c := &Client{ c := &Client{projectID: "project-id"}
projectID: "project-id",
service: &testService{},
}
q := c.Query("q") q := c.Query("q")
q.UseStandardSQL = true q.UseStandardSQL = true
q.UseLegacySQL = true q.UseLegacySQL = true
_, err := q.Run(context.Background()) _, err := q.newJob()
if err == nil { if err == nil {
t.Error("UseStandardSQL and UseLegacySQL: got nil, want error") t.Error("UseStandardSQL and UseLegacySQL: got nil, want error")
} }
q = c.Query("q") q = c.Query("q")
q.Parameters = []QueryParameter{{Name: "p", Value: 3}} q.Parameters = []QueryParameter{{Name: "p", Value: 3}}
q.UseLegacySQL = true q.UseLegacySQL = true
_, err = q.Run(context.Background()) _, err = q.newJob()
if err == nil { if err == nil {
t.Error("Parameters and UseLegacySQL: got nil, want error") t.Error("Parameters and UseLegacySQL: got nil, want error")
} }

View File

@@ -27,69 +27,65 @@ import (
"google.golang.org/api/iterator" "google.golang.org/api/iterator"
) )
type readTabledataArgs struct { type pageFetcherArgs struct {
conf *readTableConf table *Table
tok string schema Schema
startIndex uint64
pageSize int64
pageToken string
} }
// readServiceStub services read requests by returning data from an in-memory list of values. // pageFetcherReadStub services read requests by returning data from an in-memory list of values.
type readServiceStub struct { type pageFetcherReadStub struct {
// values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery. // values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery.
values [][][]Value // contains pages / rows / columns. values [][][]Value // contains pages / rows / columns.
pageTokens map[string]string // maps incoming page token to returned page token. pageTokens map[string]string // maps incoming page token to returned page token.
// arguments are recorded for later inspection. // arguments are recorded for later inspection.
readTabledataCalls []readTabledataArgs calls []pageFetcherArgs
service
} }
func (s *readServiceStub) readValues(tok string) *readDataResult { func (s *pageFetcherReadStub) fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) {
result := &readDataResult{ s.calls = append(s.calls,
pageToken: s.pageTokens[tok], pageFetcherArgs{t, schema, startIndex, pageSize, pageToken})
result := &fetchPageResult{
pageToken: s.pageTokens[pageToken],
rows: s.values[0], rows: s.values[0],
} }
s.values = s.values[1:] s.values = s.values[1:]
return result, nil
return result
} }
func (s *readServiceStub) waitForQuery(ctx context.Context, projectID, jobID string) (Schema, error) { func waitForQueryStub(context.Context, string) (Schema, error) {
return nil, nil return nil, nil
} }
func (s *readServiceStub) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) {
s.readTabledataCalls = append(s.readTabledataCalls, readTabledataArgs{conf, token})
return s.readValues(token), nil
}
func TestRead(t *testing.T) { func TestRead(t *testing.T) {
// The data for the service stub to return is populated for each test case in the testCases for loop. // The data for the service stub to return is populated for each test case in the testCases for loop.
ctx := context.Background() ctx := context.Background()
service := &readServiceStub{} c := &Client{projectID: "project-id"}
c := &Client{ pf := &pageFetcherReadStub{}
projectID: "project-id",
service: service,
}
queryJob := &Job{ queryJob := &Job{
projectID: "project-id", projectID: "project-id",
jobID: "job-id", jobID: "job-id",
c: c, c: c,
isQuery: true, config: &bq.JobConfiguration{
destinationTable: &bq.TableReference{ Query: &bq.JobConfigurationQuery{
ProjectId: "project-id", DestinationTable: &bq.TableReference{
DatasetId: "dataset-id", ProjectId: "project-id",
TableId: "table-id", DatasetId: "dataset-id",
TableId: "table-id",
},
},
}, },
} }
for _, readFunc := range []func() *RowIterator{ for _, readFunc := range []func() *RowIterator{
func() *RowIterator { func() *RowIterator {
return c.Dataset("dataset-id").Table("table-id").Read(ctx) return c.Dataset("dataset-id").Table("table-id").read(ctx, pf.fetchPage)
}, },
func() *RowIterator { func() *RowIterator {
it, err := queryJob.Read(ctx) it, err := queryJob.read(ctx, waitForQueryStub, pf.fetchPage)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -113,8 +109,8 @@ func TestRead(t *testing.T) {
}, },
} }
for _, tc := range testCases { for _, tc := range testCases {
service.values = tc.data pf.values = tc.data
service.pageTokens = tc.pageTokens pf.pageTokens = tc.pageTokens
if got, ok := collectValues(t, readFunc()); ok { if got, ok := collectValues(t, readFunc()); ok {
if !testutil.Equal(got, tc.want) { if !testutil.Equal(got, tc.want) {
t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want) t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want)
@@ -142,13 +138,11 @@ func collectValues(t *testing.T, it *RowIterator) ([][]Value, bool) {
} }
func TestNoMoreValues(t *testing.T) { func TestNoMoreValues(t *testing.T) {
c := &Client{ c := &Client{projectID: "project-id"}
projectID: "project-id", pf := &pageFetcherReadStub{
service: &readServiceStub{ values: [][][]Value{{{1, 2}, {11, 12}}},
values: [][][]Value{{{1, 2}, {11, 12}}},
},
} }
it := c.Dataset("dataset-id").Table("table-id").Read(context.Background()) it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), pf.fetchPage)
var vals []Value var vals []Value
// We expect to retrieve two values and then fail on the next attempt. // We expect to retrieve two values and then fail on the next attempt.
if err := it.Next(&vals); err != nil { if err := it.Next(&vals); err != nil {
@@ -162,23 +156,16 @@ func TestNoMoreValues(t *testing.T) {
} }
} }
type errorReadService struct {
service
}
var errBang = errors.New("bang!") var errBang = errors.New("bang!")
func (s *errorReadService) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) { func errorFetchPage(context.Context, *Table, Schema, uint64, int64, string) (*fetchPageResult, error) {
return nil, errBang return nil, errBang
} }
func TestReadError(t *testing.T) { func TestReadError(t *testing.T) {
// test that service read errors are propagated back to the caller. // test that service read errors are propagated back to the caller.
c := &Client{ c := &Client{projectID: "project-id"}
projectID: "project-id", it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), errorFetchPage)
service: &errorReadService{},
}
it := c.Dataset("dataset-id").Table("table-id").Read(context.Background())
var vals []Value var vals []Value
if err := it.Next(&vals); err != errBang { if err := it.Next(&vals); err != errBang {
t.Fatalf("Get: got: %v: want: %v", err, errBang) t.Fatalf("Get: got: %v: want: %v", err, errBang)
@@ -187,54 +174,47 @@ func TestReadError(t *testing.T) {
func TestReadTabledataOptions(t *testing.T) { func TestReadTabledataOptions(t *testing.T) {
// test that read options are propagated. // test that read options are propagated.
s := &readServiceStub{ s := &pageFetcherReadStub{
values: [][][]Value{{{1, 2}}}, values: [][][]Value{{{1, 2}}},
} }
c := &Client{ c := &Client{projectID: "project-id"}
projectID: "project-id", tr := c.Dataset("dataset-id").Table("table-id")
service: s, it := tr.read(context.Background(), s.fetchPage)
}
it := c.Dataset("dataset-id").Table("table-id").Read(context.Background())
it.PageInfo().MaxSize = 5 it.PageInfo().MaxSize = 5
var vals []Value var vals []Value
if err := it.Next(&vals); err != nil { if err := it.Next(&vals); err != nil {
t.Fatal(err) t.Fatal(err)
} }
want := []readTabledataArgs{{ want := []pageFetcherArgs{{
conf: &readTableConf{ table: tr,
projectID: "project-id", pageSize: 5,
datasetID: "dataset-id", pageToken: "",
tableID: "table-id",
paging: pagingConf{
recordsPerRequest: 5,
setRecordsPerRequest: true,
},
},
tok: "",
}} }}
if diff := testutil.Diff(s.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, pageFetcherReadStub{}, Table{}, Client{})); diff != "" {
if !testutil.Equal(s.readTabledataCalls, want, cmp.AllowUnexported(readTabledataArgs{}, readTableConf{}, pagingConf{})) { t.Errorf("reading (got=-, want=+):\n%s", diff)
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want)
} }
} }
func TestReadQueryOptions(t *testing.T) { func TestReadQueryOptions(t *testing.T) {
// test that read options are propagated. // test that read options are propagated.
s := &readServiceStub{ c := &Client{projectID: "project-id"}
pf := &pageFetcherReadStub{
values: [][][]Value{{{1, 2}}}, values: [][][]Value{{{1, 2}}},
} }
tr := &bq.TableReference{
ProjectId: "project-id",
DatasetId: "dataset-id",
TableId: "table-id",
}
queryJob := &Job{ queryJob := &Job{
projectID: "project-id", projectID: "project-id",
jobID: "job-id", jobID: "job-id",
c: &Client{service: s}, c: c,
isQuery: true, config: &bq.JobConfiguration{
destinationTable: &bq.TableReference{ Query: &bq.JobConfigurationQuery{DestinationTable: tr},
ProjectId: "project-id",
DatasetId: "dataset-id",
TableId: "table-id",
}, },
} }
it, err := queryJob.Read(context.Background()) it, err := queryJob.read(context.Background(), waitForQueryStub, pf.fetchPage)
if err != nil { if err != nil {
t.Fatalf("err calling Read: %v", err) t.Fatalf("err calling Read: %v", err)
} }
@@ -244,20 +224,12 @@ func TestReadQueryOptions(t *testing.T) {
t.Fatalf("Next: got: %v: want: nil", err) t.Fatalf("Next: got: %v: want: nil", err)
} }
want := []readTabledataArgs{{ want := []pageFetcherArgs{{
conf: &readTableConf{ table: bqToTable(tr, c),
projectID: "project-id", pageSize: 5,
datasetID: "dataset-id", pageToken: "",
tableID: "table-id",
paging: pagingConf{
recordsPerRequest: 5,
setRecordsPerRequest: true,
},
},
tok: "",
}} }}
if !testutil.Equal(pf.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, Table{}, Client{})) {
if !testutil.Equal(s.readTabledataCalls, want, cmp.AllowUnexported(readTabledataArgs{}, readTableConf{}, pagingConf{})) { t.Errorf("reading: got:\n%v\nwant:\n%v", pf.calls, want)
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want)
} }
} }

View File

@@ -49,7 +49,7 @@ type FieldSchema struct {
Schema Schema Schema Schema
} }
func (fs *FieldSchema) asTableFieldSchema() *bq.TableFieldSchema { func (fs *FieldSchema) toBQ() *bq.TableFieldSchema {
tfs := &bq.TableFieldSchema{ tfs := &bq.TableFieldSchema{
Description: fs.Description, Description: fs.Description,
Name: fs.Name, Name: fs.Name,
@@ -63,21 +63,21 @@ func (fs *FieldSchema) asTableFieldSchema() *bq.TableFieldSchema {
} // else leave as default, which is interpreted as NULLABLE. } // else leave as default, which is interpreted as NULLABLE.
for _, f := range fs.Schema { for _, f := range fs.Schema {
tfs.Fields = append(tfs.Fields, f.asTableFieldSchema()) tfs.Fields = append(tfs.Fields, f.toBQ())
} }
return tfs return tfs
} }
func (s Schema) asTableSchema() *bq.TableSchema { func (s Schema) toBQ() *bq.TableSchema {
var fields []*bq.TableFieldSchema var fields []*bq.TableFieldSchema
for _, f := range s { for _, f := range s {
fields = append(fields, f.asTableFieldSchema()) fields = append(fields, f.toBQ())
} }
return &bq.TableSchema{Fields: fields} return &bq.TableSchema{Fields: fields}
} }
func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema { func bqToFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
fs := &FieldSchema{ fs := &FieldSchema{
Description: tfs.Description, Description: tfs.Description,
Name: tfs.Name, Name: tfs.Name,
@@ -87,18 +87,18 @@ func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
} }
for _, f := range tfs.Fields { for _, f := range tfs.Fields {
fs.Schema = append(fs.Schema, convertTableFieldSchema(f)) fs.Schema = append(fs.Schema, bqToFieldSchema(f))
} }
return fs return fs
} }
func convertTableSchema(ts *bq.TableSchema) Schema { func bqToSchema(ts *bq.TableSchema) Schema {
if ts == nil { if ts == nil {
return nil return nil
} }
var s Schema var s Schema
for _, f := range ts.Fields { for _, f := range ts.Fields {
s = append(s, convertTableFieldSchema(f)) s = append(s, bqToFieldSchema(f))
} }
return s return s
} }
@@ -141,6 +141,7 @@ func InferSchema(st interface{}) (Schema, error) {
return inferSchemaReflectCached(reflect.TypeOf(st)) return inferSchemaReflectCached(reflect.TypeOf(st))
} }
// TODO(jba): replace with sync.Map for Go 1.9.
var schemaCache atomiccache.Cache var schemaCache atomiccache.Cache
type cacheVal struct { type cacheVal struct {
@@ -184,21 +185,21 @@ func inferStruct(t reflect.Type) (Schema, error) {
} }
// inferFieldSchema infers the FieldSchema for a Go type // inferFieldSchema infers the FieldSchema for a Go type
func inferFieldSchema(rt reflect.Type) (*FieldSchema, error) { func inferFieldSchema(rt reflect.Type, nullable bool) (*FieldSchema, error) {
switch rt { switch rt {
case typeOfByteSlice: case typeOfByteSlice:
return &FieldSchema{Required: true, Type: BytesFieldType}, nil return &FieldSchema{Required: !nullable, Type: BytesFieldType}, nil
case typeOfGoTime: case typeOfGoTime:
return &FieldSchema{Required: true, Type: TimestampFieldType}, nil return &FieldSchema{Required: !nullable, Type: TimestampFieldType}, nil
case typeOfDate: case typeOfDate:
return &FieldSchema{Required: true, Type: DateFieldType}, nil return &FieldSchema{Required: !nullable, Type: DateFieldType}, nil
case typeOfTime: case typeOfTime:
return &FieldSchema{Required: true, Type: TimeFieldType}, nil return &FieldSchema{Required: !nullable, Type: TimeFieldType}, nil
case typeOfDateTime: case typeOfDateTime:
return &FieldSchema{Required: true, Type: DateTimeFieldType}, nil return &FieldSchema{Required: !nullable, Type: DateTimeFieldType}, nil
} }
if isSupportedIntType(rt) { if isSupportedIntType(rt) {
return &FieldSchema{Required: true, Type: IntegerFieldType}, nil return &FieldSchema{Required: !nullable, Type: IntegerFieldType}, nil
} }
switch rt.Kind() { switch rt.Kind() {
case reflect.Slice, reflect.Array: case reflect.Slice, reflect.Array:
@@ -208,7 +209,7 @@ func inferFieldSchema(rt reflect.Type) (*FieldSchema, error) {
return nil, errUnsupportedFieldType return nil, errUnsupportedFieldType
} }
f, err := inferFieldSchema(et) f, err := inferFieldSchema(et, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -220,13 +221,13 @@ func inferFieldSchema(rt reflect.Type) (*FieldSchema, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &FieldSchema{Required: true, Type: RecordFieldType, Schema: nested}, nil return &FieldSchema{Required: !nullable, Type: RecordFieldType, Schema: nested}, nil
case reflect.String: case reflect.String:
return &FieldSchema{Required: true, Type: StringFieldType}, nil return &FieldSchema{Required: !nullable, Type: StringFieldType}, nil
case reflect.Bool: case reflect.Bool:
return &FieldSchema{Required: true, Type: BooleanFieldType}, nil return &FieldSchema{Required: !nullable, Type: BooleanFieldType}, nil
case reflect.Float32, reflect.Float64: case reflect.Float32, reflect.Float64:
return &FieldSchema{Required: true, Type: FloatFieldType}, nil return &FieldSchema{Required: !nullable, Type: FloatFieldType}, nil
default: default:
return nil, errUnsupportedFieldType return nil, errUnsupportedFieldType
} }
@@ -240,7 +241,14 @@ func inferFields(rt reflect.Type) (Schema, error) {
return nil, err return nil, err
} }
for _, field := range fields { for _, field := range fields {
f, err := inferFieldSchema(field.Type) var nullable bool
for _, opt := range field.ParsedTag.([]string) {
if opt == nullableTagOption {
nullable = true
break
}
}
f, err := inferFieldSchema(field.Type, nullable)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@@ -192,12 +192,12 @@ func TestSchemaConversion(t *testing.T) {
} }
for _, tc := range testCases { for _, tc := range testCases {
bqSchema := tc.schema.asTableSchema() bqSchema := tc.schema.toBQ()
if !testutil.Equal(bqSchema, tc.bqSchema) { if !testutil.Equal(bqSchema, tc.bqSchema) {
t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v", t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v",
pretty.Value(bqSchema), pretty.Value(tc.bqSchema)) pretty.Value(bqSchema), pretty.Value(tc.bqSchema))
} }
schema := convertTableSchema(tc.bqSchema) schema := bqToSchema(tc.bqSchema)
if !testutil.Equal(schema, tc.schema) { if !testutil.Equal(schema, tc.schema) {
t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema) t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema)
} }
@@ -536,6 +536,7 @@ type withTags struct {
SimpleTag int `bigquery:"simple_tag"` SimpleTag int `bigquery:"simple_tag"`
UnderscoreTag int `bigquery:"_id"` UnderscoreTag int `bigquery:"_id"`
MixedCase int `bigquery:"MIXEDcase"` MixedCase int `bigquery:"MIXEDcase"`
Nullable int `bigquery:",nullable"`
} }
type withTagsNested struct { type withTagsNested struct {
@@ -563,6 +564,7 @@ var withTagsSchema = Schema{
reqField("simple_tag", "INTEGER"), reqField("simple_tag", "INTEGER"),
reqField("_id", "INTEGER"), reqField("_id", "INTEGER"),
reqField("MIXEDcase", "INTEGER"), reqField("MIXEDcase", "INTEGER"),
{Name: "Nullable", Type: FieldType("INTEGER"), Required: false},
} }
func TestTagInference(t *testing.T) { func TestTagInference(t *testing.T) {
@@ -666,12 +668,6 @@ func TestTagInferenceErrors(t *testing.T) {
}{}, }{},
err: errInvalidFieldName, err: errInvalidFieldName,
}, },
{
in: struct {
OmitEmpty int `bigquery:"abc,omitempty"`
}{},
err: errInvalidFieldName,
},
} }
for i, tc := range testCases { for i, tc := range testCases {
want := tc.err want := tc.err
@@ -680,6 +676,13 @@ func TestTagInferenceErrors(t *testing.T) {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, want) t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, want)
} }
} }
_, err := InferSchema(struct {
X int `bigquery:",optional"`
}{})
if err == nil {
t.Error("got nil, want error")
}
} }
func TestSchemaErrors(t *testing.T) { func TestSchemaErrors(t *testing.T) {

View File

@@ -1,940 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"errors"
"fmt"
"io"
"net/http"
"time"
"cloud.google.com/go/internal"
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/googleapi"
)
// service provides an internal abstraction to isolate the generated
// BigQuery API; most of this package uses this interface instead.
// The single implementation, *bigqueryService, contains all the knowledge
// of the generated BigQuery API.
type service interface {
// Jobs
insertJob(ctx context.Context, projectId string, conf *insertJobConf) (*Job, error)
getJob(ctx context.Context, projectId, jobID string) (*Job, error)
jobCancel(ctx context.Context, projectId, jobID string) error
jobStatus(ctx context.Context, projectId, jobID string) (*JobStatus, error)
listJobs(ctx context.Context, projectId string, maxResults int, pageToken string, all bool, state string) ([]JobInfo, string, error)
// Tables
createTable(ctx context.Context, projectID, datasetID, tableID string, tm *TableMetadata) error
getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error)
deleteTable(ctx context.Context, projectID, datasetID, tableID string) error
// listTables returns a page of Tables and a next page token. Note: the Tables do not have their c field populated.
listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error)
patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf, etag string) (*TableMetadata, error)
// Table data
readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error)
insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error
// Datasets
insertDataset(ctx context.Context, datasetID, projectID string, dm *DatasetMetadata) error
deleteDataset(ctx context.Context, datasetID, projectID string) error
getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error)
patchDataset(ctx context.Context, projectID, datasetID string, dm *DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error)
// Misc
// Waits for a query to complete.
waitForQuery(ctx context.Context, projectID, jobID string) (Schema, error)
// listDatasets returns a page of Datasets and a next page token. Note: the Datasets do not have their c field populated.
listDatasets(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, filter string) ([]*Dataset, string, error)
}
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
func setClientHeader(headers http.Header) {
headers.Set("x-goog-api-client", xGoogHeader)
}
type bigqueryService struct {
s *bq.Service
}
func newBigqueryService(client *http.Client, endpoint string) (*bigqueryService, error) {
s, err := bq.New(client)
if err != nil {
return nil, fmt.Errorf("constructing bigquery client: %v", err)
}
s.BasePath = endpoint
return &bigqueryService{s: s}, nil
}
// getPages calls the supplied getPage function repeatedly until there are no pages left to get.
// token is the token of the initial page to start from. Use an empty string to start from the beginning.
func getPages(token string, getPage func(token string) (nextToken string, err error)) error {
for {
var err error
token, err = getPage(token)
if err != nil {
return err
}
if token == "" {
return nil
}
}
}
type insertJobConf struct {
job *bq.Job
media io.Reader
}
// Calls the Jobs.Insert RPC and returns a Job. Callers must set the returned Job's
// client.
func (s *bigqueryService) insertJob(ctx context.Context, projectID string, conf *insertJobConf) (*Job, error) {
call := s.s.Jobs.Insert(projectID, conf.job).Context(ctx)
setClientHeader(call.Header())
if conf.media != nil {
call.Media(conf.media)
}
var res *bq.Job
var err error
invoke := func() error {
res, err = call.Do()
return err
}
// A job with a client-generated ID can be retried; the presence of the
// ID makes the insert operation idempotent.
// We don't retry if there is media, because it is an io.Reader. We'd
// have to read the contents and keep it in memory, and that could be expensive.
// TODO(jba): Look into retrying if media != nil.
if conf.job.JobReference != nil && conf.media == nil {
err = runWithRetry(ctx, invoke)
} else {
err = invoke()
}
if err != nil {
return nil, err
}
var dt *bq.TableReference
if qc := res.Configuration.Query; qc != nil {
dt = qc.DestinationTable
}
return &Job{
projectID: projectID,
jobID: res.JobReference.JobId,
destinationTable: dt,
}, nil
}
type pagingConf struct {
recordsPerRequest int64
setRecordsPerRequest bool
startIndex uint64
}
type readTableConf struct {
projectID, datasetID, tableID string
paging pagingConf
schema Schema // lazily initialized when the first page of data is fetched.
}
func (conf *readTableConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
return s.readTabledata(ctx, conf, token)
}
func (conf *readTableConf) setPaging(pc *pagingConf) { conf.paging = *pc }
type readDataResult struct {
pageToken string
rows [][]Value
totalRows uint64
schema Schema
}
func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) {
// Prepare request to fetch one page of table data.
req := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID)
setClientHeader(req.Header())
if pageToken != "" {
req.PageToken(pageToken)
} else {
req.StartIndex(conf.paging.startIndex)
}
if conf.paging.setRecordsPerRequest {
req.MaxResults(conf.paging.recordsPerRequest)
}
// Fetch the table schema in the background, if necessary.
errc := make(chan error, 1)
if conf.schema != nil {
errc <- nil
} else {
go func() {
var t *bq.Table
err := runWithRetry(ctx, func() (err error) {
t, err = s.s.Tables.Get(conf.projectID, conf.datasetID, conf.tableID).
Fields("schema").
Context(ctx).
Do()
return err
})
if err == nil && t.Schema != nil {
conf.schema = convertTableSchema(t.Schema)
}
errc <- err
}()
}
var res *bq.TableDataList
err := runWithRetry(ctx, func() (err error) {
res, err = req.Context(ctx).Do()
return err
})
if err != nil {
return nil, err
}
err = <-errc
if err != nil {
return nil, err
}
result := &readDataResult{
pageToken: res.PageToken,
totalRows: uint64(res.TotalRows),
schema: conf.schema,
}
result.rows, err = convertRows(res.Rows, conf.schema)
if err != nil {
return nil, err
}
return result, nil
}
func (s *bigqueryService) waitForQuery(ctx context.Context, projectID, jobID string) (Schema, error) {
// Use GetQueryResults only to wait for completion, not to read results.
req := s.s.Jobs.GetQueryResults(projectID, jobID).Context(ctx).MaxResults(0)
setClientHeader(req.Header())
backoff := gax.Backoff{
Initial: 1 * time.Second,
Multiplier: 2,
Max: 60 * time.Second,
}
var res *bq.GetQueryResultsResponse
err := internal.Retry(ctx, backoff, func() (stop bool, err error) {
res, err = req.Do()
if err != nil {
return !retryableError(err), err
}
if !res.JobComplete { // GetQueryResults may return early without error; retry.
return false, nil
}
return true, nil
})
if err != nil {
return nil, err
}
return convertTableSchema(res.Schema), nil
}
type insertRowsConf struct {
templateSuffix string
ignoreUnknownValues bool
skipInvalidRows bool
}
func (s *bigqueryService) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
req := &bq.TableDataInsertAllRequest{
TemplateSuffix: conf.templateSuffix,
IgnoreUnknownValues: conf.ignoreUnknownValues,
SkipInvalidRows: conf.skipInvalidRows,
}
for _, row := range rows {
m := make(map[string]bq.JsonValue)
for k, v := range row.Row {
m[k] = bq.JsonValue(v)
}
req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{
InsertId: row.InsertID,
Json: m,
})
}
call := s.s.Tabledata.InsertAll(projectID, datasetID, tableID, req).Context(ctx)
setClientHeader(call.Header())
var res *bq.TableDataInsertAllResponse
err := runWithRetry(ctx, func() (err error) {
res, err = call.Do()
return err
})
if err != nil {
return err
}
if len(res.InsertErrors) == 0 {
return nil
}
var errs PutMultiError
for _, e := range res.InsertErrors {
if int(e.Index) > len(rows) {
return fmt.Errorf("internal error: unexpected row index: %v", e.Index)
}
rie := RowInsertionError{
InsertID: rows[e.Index].InsertID,
RowIndex: int(e.Index),
}
for _, errp := range e.Errors {
rie.Errors = append(rie.Errors, errorFromErrorProto(errp))
}
errs = append(errs, rie)
}
return errs
}
func (s *bigqueryService) getJob(ctx context.Context, projectID, jobID string) (*Job, error) {
bqjob, err := s.getJobInternal(ctx, projectID, jobID, "configuration", "jobReference")
if err != nil {
return nil, err
}
return jobFromProtos(bqjob.JobReference, bqjob.Configuration), nil
}
func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
job, err := s.getJobInternal(ctx, projectID, jobID, "status", "statistics")
if err != nil {
return nil, err
}
st, err := jobStatusFromProto(job.Status)
if err != nil {
return nil, err
}
st.Statistics = jobStatisticsFromProto(job.Statistics)
return st, nil
}
func (s *bigqueryService) getJobInternal(ctx context.Context, projectID, jobID string, fields ...googleapi.Field) (*bq.Job, error) {
var job *bq.Job
call := s.s.Jobs.Get(projectID, jobID).Context(ctx)
if len(fields) > 0 {
call = call.Fields(fields...)
}
setClientHeader(call.Header())
err := runWithRetry(ctx, func() (err error) {
job, err = call.Do()
return err
})
if err != nil {
return nil, err
}
return job, nil
}
func (s *bigqueryService) jobCancel(ctx context.Context, projectID, jobID string) error {
// Jobs.Cancel returns a job entity, but the only relevant piece of
// data it may contain (the status of the job) is unreliable. From the
// docs: "This call will return immediately, and the client will need
// to poll for the job status to see if the cancel completed
// successfully". So it would be misleading to return a status.
call := s.s.Jobs.Cancel(projectID, jobID).
Fields(). // We don't need any of the response data.
Context(ctx)
setClientHeader(call.Header())
return runWithRetry(ctx, func() error {
_, err := call.Do()
return err
})
}
func jobFromProtos(jr *bq.JobReference, config *bq.JobConfiguration) *Job {
var isQuery bool
var dest *bq.TableReference
if config.Query != nil {
isQuery = true
dest = config.Query.DestinationTable
}
return &Job{
projectID: jr.ProjectId,
jobID: jr.JobId,
isQuery: isQuery,
destinationTable: dest,
}
}
var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}
func jobStatusFromProto(status *bq.JobStatus) (*JobStatus, error) {
state, ok := stateMap[status.State]
if !ok {
return nil, fmt.Errorf("unexpected job state: %v", status.State)
}
newStatus := &JobStatus{
State: state,
err: nil,
}
if err := errorFromErrorProto(status.ErrorResult); state == Done && err != nil {
newStatus.err = err
}
for _, ep := range status.Errors {
newStatus.Errors = append(newStatus.Errors, errorFromErrorProto(ep))
}
return newStatus, nil
}
func jobStatisticsFromProto(s *bq.JobStatistics) *JobStatistics {
js := &JobStatistics{
CreationTime: unixMillisToTime(s.CreationTime),
StartTime: unixMillisToTime(s.StartTime),
EndTime: unixMillisToTime(s.EndTime),
TotalBytesProcessed: s.TotalBytesProcessed,
}
switch {
case s.Extract != nil:
js.Details = &ExtractStatistics{
DestinationURIFileCounts: []int64(s.Extract.DestinationUriFileCounts),
}
case s.Load != nil:
js.Details = &LoadStatistics{
InputFileBytes: s.Load.InputFileBytes,
InputFiles: s.Load.InputFiles,
OutputBytes: s.Load.OutputBytes,
OutputRows: s.Load.OutputRows,
}
case s.Query != nil:
var names []string
for _, qp := range s.Query.UndeclaredQueryParameters {
names = append(names, qp.Name)
}
var tables []*Table
for _, tr := range s.Query.ReferencedTables {
tables = append(tables, convertTableReference(tr))
}
js.Details = &QueryStatistics{
BillingTier: s.Query.BillingTier,
CacheHit: s.Query.CacheHit,
StatementType: s.Query.StatementType,
TotalBytesBilled: s.Query.TotalBytesBilled,
TotalBytesProcessed: s.Query.TotalBytesProcessed,
NumDMLAffectedRows: s.Query.NumDmlAffectedRows,
QueryPlan: queryPlanFromProto(s.Query.QueryPlan),
Schema: convertTableSchema(s.Query.Schema),
ReferencedTables: tables,
UndeclaredQueryParameterNames: names,
}
}
return js
}
func queryPlanFromProto(stages []*bq.ExplainQueryStage) []*ExplainQueryStage {
var res []*ExplainQueryStage
for _, s := range stages {
var steps []*ExplainQueryStep
for _, p := range s.Steps {
steps = append(steps, &ExplainQueryStep{
Kind: p.Kind,
Substeps: p.Substeps,
})
}
res = append(res, &ExplainQueryStage{
ComputeRatioAvg: s.ComputeRatioAvg,
ComputeRatioMax: s.ComputeRatioMax,
ID: s.Id,
Name: s.Name,
ReadRatioAvg: s.ReadRatioAvg,
ReadRatioMax: s.ReadRatioMax,
RecordsRead: s.RecordsRead,
RecordsWritten: s.RecordsWritten,
Status: s.Status,
Steps: steps,
WaitRatioAvg: s.WaitRatioAvg,
WaitRatioMax: s.WaitRatioMax,
WriteRatioAvg: s.WriteRatioAvg,
WriteRatioMax: s.WriteRatioMax,
})
}
return res
}
// listTables returns a subset of tables that belong to a dataset, and a token for fetching the next subset.
func (s *bigqueryService) listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error) {
var tables []*Table
req := s.s.Tables.List(projectID, datasetID).
PageToken(pageToken).
Context(ctx)
setClientHeader(req.Header())
if pageSize > 0 {
req.MaxResults(int64(pageSize))
}
var res *bq.TableList
err := runWithRetry(ctx, func() (err error) {
res, err = req.Do()
return err
})
if err != nil {
return nil, "", err
}
for _, t := range res.Tables {
tables = append(tables, convertTableReference(t.TableReference))
}
return tables, res.NextPageToken, nil
}
// createTable creates a table in the BigQuery service.
// If tm.ViewQuery is non-empty, the created table will be of type VIEW.
// Note: expiration can only be set during table creation.
// Note: after table creation, a view can be modified only if its table was initially created with a view.
func (s *bigqueryService) createTable(ctx context.Context, projectID, datasetID, tableID string, tm *TableMetadata) error {
table, err := bqTableFromMetadata(tm)
if err != nil {
return err
}
table.TableReference = &bq.TableReference{
ProjectId: projectID,
DatasetId: datasetID,
TableId: tableID,
}
req := s.s.Tables.Insert(projectID, datasetID, table).Context(ctx)
setClientHeader(req.Header())
_, err = req.Do()
return err
}
func bqTableFromMetadata(tm *TableMetadata) (*bq.Table, error) {
t := &bq.Table{}
if tm == nil {
return t, nil
}
if tm.Schema != nil && tm.ViewQuery != "" {
return nil, errors.New("bigquery: provide Schema or ViewQuery, not both")
}
t.FriendlyName = tm.Name
t.Description = tm.Description
if tm.Schema != nil {
t.Schema = tm.Schema.asTableSchema()
}
if tm.ViewQuery != "" {
if tm.UseStandardSQL && tm.UseLegacySQL {
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
}
t.View = &bq.ViewDefinition{Query: tm.ViewQuery}
if tm.UseLegacySQL {
t.View.UseLegacySql = true
} else {
t.View.UseLegacySql = false
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
}
} else if tm.UseLegacySQL || tm.UseStandardSQL {
return nil, errors.New("bigquery: UseLegacy/StandardSQL requires ViewQuery")
}
if tm.TimePartitioning != nil {
t.TimePartitioning = &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: int64(tm.TimePartitioning.Expiration / time.Millisecond),
}
}
if !tm.ExpirationTime.IsZero() {
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
}
if tm.FullID != "" {
return nil, errors.New("cannot set FullID on create")
}
if tm.Type != "" {
return nil, errors.New("cannot set Type on create")
}
if !tm.CreationTime.IsZero() {
return nil, errors.New("cannot set CreationTime on create")
}
if !tm.LastModifiedTime.IsZero() {
return nil, errors.New("cannot set LastModifiedTime on create")
}
if tm.NumBytes != 0 {
return nil, errors.New("cannot set NumBytes on create")
}
if tm.NumRows != 0 {
return nil, errors.New("cannot set NumRows on create")
}
if tm.StreamingBuffer != nil {
return nil, errors.New("cannot set StreamingBuffer on create")
}
if tm.ETag != "" {
return nil, errors.New("cannot set ETag on create")
}
return t, nil
}
func (s *bigqueryService) getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error) {
req := s.s.Tables.Get(projectID, datasetID, tableID).Context(ctx)
setClientHeader(req.Header())
var table *bq.Table
err := runWithRetry(ctx, func() (err error) {
table, err = req.Do()
return err
})
if err != nil {
return nil, err
}
return bqTableToMetadata(table), nil
}
func (s *bigqueryService) deleteTable(ctx context.Context, projectID, datasetID, tableID string) error {
req := s.s.Tables.Delete(projectID, datasetID, tableID).Context(ctx)
setClientHeader(req.Header())
return runWithRetry(ctx, func() error { return req.Do() })
}
func bqTableToMetadata(t *bq.Table) *TableMetadata {
md := &TableMetadata{
Description: t.Description,
Name: t.FriendlyName,
Type: TableType(t.Type),
FullID: t.Id,
NumBytes: t.NumBytes,
NumRows: t.NumRows,
ExpirationTime: unixMillisToTime(t.ExpirationTime),
CreationTime: unixMillisToTime(t.CreationTime),
LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)),
ETag: t.Etag,
}
if t.Schema != nil {
md.Schema = convertTableSchema(t.Schema)
}
if t.View != nil {
md.ViewQuery = t.View.Query
md.UseLegacySQL = t.View.UseLegacySql
}
if t.TimePartitioning != nil {
md.TimePartitioning = &TimePartitioning{
Expiration: time.Duration(t.TimePartitioning.ExpirationMs) * time.Millisecond,
}
}
if t.StreamingBuffer != nil {
md.StreamingBuffer = &StreamingBuffer{
EstimatedBytes: t.StreamingBuffer.EstimatedBytes,
EstimatedRows: t.StreamingBuffer.EstimatedRows,
OldestEntryTime: unixMillisToTime(int64(t.StreamingBuffer.OldestEntryTime)),
}
}
return md
}
func bqDatasetToMetadata(d *bq.Dataset) *DatasetMetadata {
/// TODO(jba): access
return &DatasetMetadata{
CreationTime: unixMillisToTime(d.CreationTime),
LastModifiedTime: unixMillisToTime(d.LastModifiedTime),
DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond,
Description: d.Description,
Name: d.FriendlyName,
FullID: d.Id,
Location: d.Location,
Labels: d.Labels,
ETag: d.Etag,
}
}
// Convert a number of milliseconds since the Unix epoch to a time.Time.
// Treat an input of zero specially: convert it to the zero time,
// rather than the start of the epoch.
func unixMillisToTime(m int64) time.Time {
if m == 0 {
return time.Time{}
}
return time.Unix(0, m*1e6)
}
func convertTableReference(tr *bq.TableReference) *Table {
return &Table{
ProjectID: tr.ProjectId,
DatasetID: tr.DatasetId,
TableID: tr.TableId,
}
}
// patchTableConf contains fields to be patched.
type patchTableConf struct {
// These fields are omitted from the patch operation if nil.
Description *string
Name *string
Schema Schema
ExpirationTime time.Time
}
func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf, etag string) (*TableMetadata, error) {
t := &bq.Table{}
forceSend := func(field string) {
t.ForceSendFields = append(t.ForceSendFields, field)
}
if conf.Description != nil {
t.Description = *conf.Description
forceSend("Description")
}
if conf.Name != nil {
t.FriendlyName = *conf.Name
forceSend("FriendlyName")
}
if conf.Schema != nil {
t.Schema = conf.Schema.asTableSchema()
forceSend("Schema")
}
if !conf.ExpirationTime.IsZero() {
t.ExpirationTime = conf.ExpirationTime.UnixNano() / 1e6
forceSend("ExpirationTime")
}
call := s.s.Tables.Patch(projectID, datasetID, tableID, t).Context(ctx)
setClientHeader(call.Header())
if etag != "" {
call.Header().Set("If-Match", etag)
}
var table *bq.Table
if err := runWithRetry(ctx, func() (err error) {
table, err = call.Do()
return err
}); err != nil {
return nil, err
}
return bqTableToMetadata(table), nil
}
func (s *bigqueryService) insertDataset(ctx context.Context, datasetID, projectID string, dm *DatasetMetadata) error {
// TODO(jba): retry?
ds, err := bqDatasetFromMetadata(dm)
if err != nil {
return err
}
ds.DatasetReference = &bq.DatasetReference{DatasetId: datasetID}
req := s.s.Datasets.Insert(projectID, ds).Context(ctx)
setClientHeader(req.Header())
_, err = req.Do()
return err
}
func (s *bigqueryService) patchDataset(ctx context.Context, projectID, datasetID string, dm *DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error) {
ds := bqDatasetFromUpdateMetadata(dm)
call := s.s.Datasets.Patch(projectID, datasetID, ds).Context(ctx)
setClientHeader(call.Header())
if etag != "" {
call.Header().Set("If-Match", etag)
}
var ds2 *bq.Dataset
if err := runWithRetry(ctx, func() (err error) {
ds2, err = call.Do()
return err
}); err != nil {
return nil, err
}
return bqDatasetToMetadata(ds2), nil
}
func bqDatasetFromMetadata(dm *DatasetMetadata) (*bq.Dataset, error) {
ds := &bq.Dataset{}
if dm == nil {
return ds, nil
}
ds.FriendlyName = dm.Name
ds.Description = dm.Description
ds.Location = dm.Location
ds.DefaultTableExpirationMs = int64(dm.DefaultTableExpiration / time.Millisecond)
ds.Labels = dm.Labels
if !dm.CreationTime.IsZero() {
return nil, errors.New("bigquery: Dataset.CreationTime is not writable")
}
if !dm.LastModifiedTime.IsZero() {
return nil, errors.New("bigquery: Dataset.LastModifiedTime is not writable")
}
if dm.FullID != "" {
return nil, errors.New("bigquery: Dataset.FullID is not writable")
}
if dm.ETag != "" {
return nil, errors.New("bigquery: Dataset.ETag is not writable")
}
return ds, nil
}
func bqDatasetFromUpdateMetadata(dm *DatasetMetadataToUpdate) *bq.Dataset {
ds := &bq.Dataset{}
forceSend := func(field string) {
ds.ForceSendFields = append(ds.ForceSendFields, field)
}
if dm.Description != nil {
ds.Description = optional.ToString(dm.Description)
forceSend("Description")
}
if dm.Name != nil {
ds.FriendlyName = optional.ToString(dm.Name)
forceSend("FriendlyName")
}
if dm.DefaultTableExpiration != nil {
dur := optional.ToDuration(dm.DefaultTableExpiration)
if dur == 0 {
// Send a null to delete the field.
ds.NullFields = append(ds.NullFields, "DefaultTableExpirationMs")
} else {
ds.DefaultTableExpirationMs = int64(dur / time.Millisecond)
}
}
if dm.setLabels != nil || dm.deleteLabels != nil {
ds.Labels = map[string]string{}
for k, v := range dm.setLabels {
ds.Labels[k] = v
}
if len(ds.Labels) == 0 && len(dm.deleteLabels) > 0 {
forceSend("Labels")
}
for l := range dm.deleteLabels {
ds.NullFields = append(ds.NullFields, "Labels."+l)
}
}
return ds
}
func (s *bigqueryService) deleteDataset(ctx context.Context, datasetID, projectID string) error {
req := s.s.Datasets.Delete(projectID, datasetID).Context(ctx)
setClientHeader(req.Header())
return runWithRetry(ctx, func() error { return req.Do() })
}
func (s *bigqueryService) getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error) {
req := s.s.Datasets.Get(projectID, datasetID).Context(ctx)
setClientHeader(req.Header())
var ds *bq.Dataset
if err := runWithRetry(ctx, func() (err error) {
ds, err = req.Do()
return err
}); err != nil {
return nil, err
}
return bqDatasetToMetadata(ds), nil
}
func (s *bigqueryService) listDatasets(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, filter string) ([]*Dataset, string, error) {
req := s.s.Datasets.List(projectID).
Context(ctx).
PageToken(pageToken).
All(all)
setClientHeader(req.Header())
if maxResults > 0 {
req.MaxResults(int64(maxResults))
}
if filter != "" {
req.Filter(filter)
}
var res *bq.DatasetList
err := runWithRetry(ctx, func() (err error) {
res, err = req.Do()
return err
})
if err != nil {
return nil, "", err
}
var datasets []*Dataset
for _, d := range res.Datasets {
datasets = append(datasets, s.convertListedDataset(d))
}
return datasets, res.NextPageToken, nil
}
func (s *bigqueryService) convertListedDataset(d *bq.DatasetListDatasets) *Dataset {
return &Dataset{
ProjectID: d.DatasetReference.ProjectId,
DatasetID: d.DatasetReference.DatasetId,
}
}
func (s *bigqueryService) listJobs(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, state string) ([]JobInfo, string, error) {
req := s.s.Jobs.List(projectID).
Context(ctx).
PageToken(pageToken).
Projection("full").
AllUsers(all)
if state != "" {
req.StateFilter(state)
}
setClientHeader(req.Header())
if maxResults > 0 {
req.MaxResults(int64(maxResults))
}
res, err := req.Do()
if err != nil {
return nil, "", err
}
var jobInfos []JobInfo
for _, j := range res.Jobs {
ji, err := s.convertListedJob(j)
if err != nil {
return nil, "", err
}
jobInfos = append(jobInfos, ji)
}
return jobInfos, res.NextPageToken, nil
}
func (s *bigqueryService) convertListedJob(j *bq.JobListJobs) (JobInfo, error) {
st, err := jobStatusFromProto(j.Status)
if err != nil {
return JobInfo{}, err
}
st.Statistics = jobStatisticsFromProto(j.Statistics)
return JobInfo{
Job: jobFromProtos(j.JobReference, j.Configuration),
Status: st,
}, nil
}
// runWithRetry calls the function until it returns nil or a non-retryable error, or
// the context is done.
// See the similar function in ../storage/invoke.go. The main difference is the
// reason for retrying.
func runWithRetry(ctx context.Context, call func() error) error {
// These parameters match the suggestions in https://cloud.google.com/bigquery/sla.
backoff := gax.Backoff{
Initial: 1 * time.Second,
Max: 32 * time.Second,
Multiplier: 2,
}
return internal.Retry(ctx, backoff, func() (stop bool, err error) {
err = call()
if err == nil {
return true, nil
}
return !retryableError(err), err
})
}
// This is the correct definition of retryable according to the BigQuery team.
func retryableError(err error) bool {
e, ok := err.(*googleapi.Error)
if !ok {
return false
}
var reason string
if len(e.Errors) > 0 {
reason = e.Errors[0].Reason
}
return reason == "backendError" || reason == "rateLimitExceeded"
}

View File

@@ -15,6 +15,7 @@
package bigquery package bigquery
import ( import (
"errors"
"fmt" "fmt"
"time" "time"
@@ -59,6 +60,7 @@ type TableMetadata struct {
// Use Legacy SQL for the view query. The default. // Use Legacy SQL for the view query. The default.
// At most one of UseLegacySQL and UseStandardSQL can be true. // At most one of UseLegacySQL and UseStandardSQL can be true.
// Deprecated: use UseLegacySQL.
UseStandardSQL bool UseStandardSQL bool
// If non-nil, the table is partitioned by time. // If non-nil, the table is partitioned by time.
@@ -68,6 +70,12 @@ type TableMetadata struct {
// indefinitely. Expired tables will be deleted and their storage reclaimed. // indefinitely. Expired tables will be deleted and their storage reclaimed.
ExpirationTime time.Time ExpirationTime time.Time
// User-provided labels.
Labels map[string]string
// Information about a table stored outside of BigQuery.
ExternalDataConfig *ExternalDataConfig
// All the fields below are read-only. // All the fields below are read-only.
FullID string // An opaque ID uniquely identifying the table. FullID string // An opaque ID uniquely identifying the table.
@@ -139,6 +147,32 @@ type TimePartitioning struct {
// The amount of time to keep the storage for a partition. // The amount of time to keep the storage for a partition.
// If the duration is empty (0), the data in the partitions do not expire. // If the duration is empty (0), the data in the partitions do not expire.
Expiration time.Duration Expiration time.Duration
// If empty, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the
// table is partitioned by this field. The field must be a top-level TIMESTAMP or
// DATE field. Its mode must be NULLABLE or REQUIRED.
Field string
}
func (p *TimePartitioning) toBQ() *bq.TimePartitioning {
if p == nil {
return nil
}
return &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: int64(p.Expiration / time.Millisecond),
Field: p.Field,
}
}
func bqToTimePartitioning(q *bq.TimePartitioning) *TimePartitioning {
if q == nil {
return nil
}
return &TimePartitioning{
Expiration: time.Duration(q.ExpirationMs) * time.Millisecond,
Field: q.Field,
}
} }
// StreamingBuffer holds information about the streaming buffer. // StreamingBuffer holds information about the streaming buffer.
@@ -155,7 +189,7 @@ type StreamingBuffer struct {
OldestEntryTime time.Time OldestEntryTime time.Time
} }
func (t *Table) tableRefProto() *bq.TableReference { func (t *Table) toBQ() *bq.TableReference {
return &bq.TableReference{ return &bq.TableReference{
ProjectId: t.ProjectID, ProjectId: t.ProjectID,
DatasetId: t.DatasetID, DatasetId: t.DatasetID,
@@ -174,60 +208,280 @@ func (t *Table) implicitTable() bool {
} }
// Create creates a table in the BigQuery service. // Create creates a table in the BigQuery service.
// Pass in a TableMetadata value to configure the dataset. // Pass in a TableMetadata value to configure the table.
// If tm.View.Query is non-empty, the created table will be of type VIEW.
// Expiration can only be set during table creation.
// After table creation, a view can be modified only if its table was initially created
// with a view.
func (t *Table) Create(ctx context.Context, tm *TableMetadata) error { func (t *Table) Create(ctx context.Context, tm *TableMetadata) error {
return t.c.service.createTable(ctx, t.ProjectID, t.DatasetID, t.TableID, tm) table, err := tm.toBQ()
if err != nil {
return err
}
table.TableReference = &bq.TableReference{
ProjectId: t.ProjectID,
DatasetId: t.DatasetID,
TableId: t.TableID,
}
req := t.c.bqs.Tables.Insert(t.ProjectID, t.DatasetID, table).Context(ctx)
setClientHeader(req.Header())
_, err = req.Do()
return err
}
func (tm *TableMetadata) toBQ() (*bq.Table, error) {
t := &bq.Table{}
if tm == nil {
return t, nil
}
if tm.Schema != nil && tm.ViewQuery != "" {
return nil, errors.New("bigquery: provide Schema or ViewQuery, not both")
}
t.FriendlyName = tm.Name
t.Description = tm.Description
t.Labels = tm.Labels
if tm.Schema != nil {
t.Schema = tm.Schema.toBQ()
}
if tm.ViewQuery != "" {
if tm.UseStandardSQL && tm.UseLegacySQL {
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
}
t.View = &bq.ViewDefinition{Query: tm.ViewQuery}
if tm.UseLegacySQL {
t.View.UseLegacySql = true
} else {
t.View.UseLegacySql = false
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
}
} else if tm.UseLegacySQL || tm.UseStandardSQL {
return nil, errors.New("bigquery: UseLegacy/StandardSQL requires ViewQuery")
}
t.TimePartitioning = tm.TimePartitioning.toBQ()
if !tm.ExpirationTime.IsZero() {
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
}
if tm.ExternalDataConfig != nil {
edc := tm.ExternalDataConfig.toBQ()
t.ExternalDataConfiguration = &edc
}
if tm.FullID != "" {
return nil, errors.New("cannot set FullID on create")
}
if tm.Type != "" {
return nil, errors.New("cannot set Type on create")
}
if !tm.CreationTime.IsZero() {
return nil, errors.New("cannot set CreationTime on create")
}
if !tm.LastModifiedTime.IsZero() {
return nil, errors.New("cannot set LastModifiedTime on create")
}
if tm.NumBytes != 0 {
return nil, errors.New("cannot set NumBytes on create")
}
if tm.NumRows != 0 {
return nil, errors.New("cannot set NumRows on create")
}
if tm.StreamingBuffer != nil {
return nil, errors.New("cannot set StreamingBuffer on create")
}
if tm.ETag != "" {
return nil, errors.New("cannot set ETag on create")
}
return t, nil
} }
// Metadata fetches the metadata for the table. // Metadata fetches the metadata for the table.
func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error) { func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error) {
return t.c.service.getTableMetadata(ctx, t.ProjectID, t.DatasetID, t.TableID) req := t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).Context(ctx)
setClientHeader(req.Header())
var table *bq.Table
err := runWithRetry(ctx, func() (err error) {
table, err = req.Do()
return err
})
if err != nil {
return nil, err
}
return bqToTableMetadata(table)
}
func bqToTableMetadata(t *bq.Table) (*TableMetadata, error) {
md := &TableMetadata{
Description: t.Description,
Name: t.FriendlyName,
Type: TableType(t.Type),
FullID: t.Id,
Labels: t.Labels,
NumBytes: t.NumBytes,
NumRows: t.NumRows,
ExpirationTime: unixMillisToTime(t.ExpirationTime),
CreationTime: unixMillisToTime(t.CreationTime),
LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)),
ETag: t.Etag,
}
if t.Schema != nil {
md.Schema = bqToSchema(t.Schema)
}
if t.View != nil {
md.ViewQuery = t.View.Query
md.UseLegacySQL = t.View.UseLegacySql
}
md.TimePartitioning = bqToTimePartitioning(t.TimePartitioning)
if t.StreamingBuffer != nil {
md.StreamingBuffer = &StreamingBuffer{
EstimatedBytes: t.StreamingBuffer.EstimatedBytes,
EstimatedRows: t.StreamingBuffer.EstimatedRows,
OldestEntryTime: unixMillisToTime(int64(t.StreamingBuffer.OldestEntryTime)),
}
}
if t.ExternalDataConfiguration != nil {
edc, err := bqToExternalDataConfig(t.ExternalDataConfiguration)
if err != nil {
return nil, err
}
md.ExternalDataConfig = edc
}
return md, nil
} }
// Delete deletes the table. // Delete deletes the table.
func (t *Table) Delete(ctx context.Context) error { func (t *Table) Delete(ctx context.Context) error {
return t.c.service.deleteTable(ctx, t.ProjectID, t.DatasetID, t.TableID) req := t.c.bqs.Tables.Delete(t.ProjectID, t.DatasetID, t.TableID).Context(ctx)
setClientHeader(req.Header())
return req.Do()
} }
// Read fetches the contents of the table. // Read fetches the contents of the table.
func (t *Table) Read(ctx context.Context) *RowIterator { func (t *Table) Read(ctx context.Context) *RowIterator {
return newRowIterator(ctx, t.c.service, &readTableConf{ return t.read(ctx, fetchPage)
projectID: t.ProjectID, }
datasetID: t.DatasetID,
tableID: t.TableID, func (t *Table) read(ctx context.Context, pf pageFetcher) *RowIterator {
}) return newRowIterator(ctx, t, pf)
} }
// Update modifies specific Table metadata fields. // Update modifies specific Table metadata fields.
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string) (*TableMetadata, error) { func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string) (*TableMetadata, error) {
var conf patchTableConf bqt := tm.toBQ()
call := t.c.bqs.Tables.Patch(t.ProjectID, t.DatasetID, t.TableID, bqt).Context(ctx)
setClientHeader(call.Header())
if etag != "" {
call.Header().Set("If-Match", etag)
}
var res *bq.Table
if err := runWithRetry(ctx, func() (err error) {
res, err = call.Do()
return err
}); err != nil {
return nil, err
}
return bqToTableMetadata(res)
}
func (tm *TableMetadataToUpdate) toBQ() *bq.Table {
t := &bq.Table{}
forceSend := func(field string) {
t.ForceSendFields = append(t.ForceSendFields, field)
}
if tm.Description != nil { if tm.Description != nil {
s := optional.ToString(tm.Description) t.Description = optional.ToString(tm.Description)
conf.Description = &s forceSend("Description")
} }
if tm.Name != nil { if tm.Name != nil {
s := optional.ToString(tm.Name) t.FriendlyName = optional.ToString(tm.Name)
conf.Name = &s forceSend("FriendlyName")
} }
conf.Schema = tm.Schema if tm.Schema != nil {
conf.ExpirationTime = tm.ExpirationTime t.Schema = tm.Schema.toBQ()
return t.c.service.patchTable(ctx, t.ProjectID, t.DatasetID, t.TableID, &conf, etag) forceSend("Schema")
}
if !tm.ExpirationTime.IsZero() {
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
forceSend("ExpirationTime")
}
if tm.ViewQuery != nil {
t.View = &bq.ViewDefinition{
Query: optional.ToString(tm.ViewQuery),
ForceSendFields: []string{"Query"},
}
}
if tm.UseLegacySQL != nil {
if t.View == nil {
t.View = &bq.ViewDefinition{}
}
t.View.UseLegacySql = optional.ToBool(tm.UseLegacySQL)
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
}
labels, forces, nulls := tm.update()
t.Labels = labels
t.ForceSendFields = append(t.ForceSendFields, forces...)
t.NullFields = append(t.NullFields, nulls...)
return t
} }
// TableMetadataToUpdate is used when updating a table's metadata. // TableMetadataToUpdate is used when updating a table's metadata.
// Only non-nil fields will be updated. // Only non-nil fields will be updated.
type TableMetadataToUpdate struct { type TableMetadataToUpdate struct {
// Description is the user-friendly description of this table. // The user-friendly description of this table.
Description optional.String Description optional.String
// Name is the user-friendly name for this table. // The user-friendly name for this table.
Name optional.String Name optional.String
// Schema is the table's schema. // The table's schema.
// When updating a schema, you can add columns but not remove them. // When updating a schema, you can add columns but not remove them.
Schema Schema Schema Schema
// TODO(jba): support updating the view
// ExpirationTime is the time when this table expires. // The time when this table expires.
ExpirationTime time.Time ExpirationTime time.Time
// The query to use for a view.
ViewQuery optional.String
// Use Legacy SQL for the view query.
UseLegacySQL optional.Bool
labelUpdater
}
// labelUpdater contains common code for updating labels.
type labelUpdater struct {
setLabels map[string]string
deleteLabels map[string]bool
}
// SetLabel causes a label to be added or modified on a call to Update.
func (u *labelUpdater) SetLabel(name, value string) {
if u.setLabels == nil {
u.setLabels = map[string]string{}
}
u.setLabels[name] = value
}
// DeleteLabel causes a label to be deleted on a call to Update.
func (u *labelUpdater) DeleteLabel(name string) {
if u.deleteLabels == nil {
u.deleteLabels = map[string]bool{}
}
u.deleteLabels[name] = true
}
func (u *labelUpdater) update() (labels map[string]string, forces, nulls []string) {
if u.setLabels == nil && u.deleteLabels == nil {
return nil, nil, nil
}
labels = map[string]string{}
for k, v := range u.setLabels {
labels[k] = v
}
if len(labels) == 0 && len(u.deleteLabels) > 0 {
forces = []string{"Labels"}
}
for l := range u.deleteLabels {
nulls = append(nulls, "Labels."+l)
}
return labels, forces, nulls
} }

View File

@@ -1,4 +1,4 @@
// Copyright 2015 Google Inc. All Rights Reserved. // Copyright 2017 Google Inc. All Rights Reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -19,11 +19,10 @@ import (
"time" "time"
"cloud.google.com/go/internal/testutil" "cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
) )
func TestBQTableToMetadata(t *testing.T) { func TestBQToTableMetadata(t *testing.T) {
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
aTimeMillis := aTime.UnixNano() / 1e6 aTimeMillis := aTime.UnixNano() / 1e6
for _, test := range []struct { for _, test := range []struct {
@@ -52,22 +51,32 @@ func TestBQTableToMetadata(t *testing.T) {
TimePartitioning: &bq.TimePartitioning{ TimePartitioning: &bq.TimePartitioning{
ExpirationMs: 7890, ExpirationMs: 7890,
Type: "DAY", Type: "DAY",
Field: "pfield",
},
Type: "EXTERNAL",
View: &bq.ViewDefinition{Query: "view-query"},
Labels: map[string]string{"a": "b"},
ExternalDataConfiguration: &bq.ExternalDataConfiguration{
SourceFormat: "GOOGLE_SHEETS",
}, },
Type: "EXTERNAL",
View: &bq.ViewDefinition{Query: "view-query"},
}, },
&TableMetadata{ &TableMetadata{
Description: "desc", Description: "desc",
Name: "fname", Name: "fname",
ViewQuery: "view-query", ViewQuery: "view-query",
FullID: "id", FullID: "id",
Type: ExternalTable, Type: ExternalTable,
ExpirationTime: aTime.Truncate(time.Millisecond), Labels: map[string]string{"a": "b"},
CreationTime: aTime.Truncate(time.Millisecond), ExternalDataConfig: &ExternalDataConfig{SourceFormat: GoogleSheets},
LastModifiedTime: aTime.Truncate(time.Millisecond), ExpirationTime: aTime.Truncate(time.Millisecond),
NumBytes: 123, CreationTime: aTime.Truncate(time.Millisecond),
NumRows: 7, LastModifiedTime: aTime.Truncate(time.Millisecond),
TimePartitioning: &TimePartitioning{Expiration: 7890 * time.Millisecond}, NumBytes: 123,
NumRows: 7,
TimePartitioning: &TimePartitioning{
Expiration: 7890 * time.Millisecond,
Field: "pfield",
},
StreamingBuffer: &StreamingBuffer{ StreamingBuffer: &StreamingBuffer{
EstimatedBytes: 11, EstimatedBytes: 11,
EstimatedRows: 3, EstimatedRows: 3,
@@ -77,14 +86,17 @@ func TestBQTableToMetadata(t *testing.T) {
}, },
}, },
} { } {
got := bqTableToMetadata(test.in) got, err := bqToTableMetadata(test.in)
if err != nil {
t.Fatal(err)
}
if diff := testutil.Diff(got, test.want); diff != "" { if diff := testutil.Diff(got, test.want); diff != "" {
t.Errorf("%+v:\n, -got, +want:\n%s", test.in, diff) t.Errorf("%+v:\n, -got, +want:\n%s", test.in, diff)
} }
} }
} }
func TestBQTableFromMetadata(t *testing.T) { func TestTableMetadataToBQ(t *testing.T) {
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
aTimeMillis := aTime.UnixNano() / 1e6 aTimeMillis := aTime.UnixNano() / 1e6
sc := Schema{fieldSchema("desc", "name", "STRING", false, true)} sc := Schema{fieldSchema("desc", "name", "STRING", false, true)}
@@ -97,10 +109,12 @@ func TestBQTableFromMetadata(t *testing.T) {
{&TableMetadata{}, &bq.Table{}}, {&TableMetadata{}, &bq.Table{}},
{ {
&TableMetadata{ &TableMetadata{
Name: "n", Name: "n",
Description: "d", Description: "d",
Schema: sc, Schema: sc,
ExpirationTime: aTime, ExpirationTime: aTime,
Labels: map[string]string{"a": "b"},
ExternalDataConfig: &ExternalDataConfig{SourceFormat: Bigtable},
}, },
&bq.Table{ &bq.Table{
FriendlyName: "n", FriendlyName: "n",
@@ -111,6 +125,8 @@ func TestBQTableFromMetadata(t *testing.T) {
}, },
}, },
ExpirationTime: aTimeMillis, ExpirationTime: aTimeMillis,
Labels: map[string]string{"a": "b"},
ExternalDataConfiguration: &bq.ExternalDataConfiguration{SourceFormat: "BIGTABLE"},
}, },
}, },
{ {
@@ -142,9 +158,12 @@ func TestBQTableFromMetadata(t *testing.T) {
}, },
{ {
&TableMetadata{ &TableMetadata{
ViewQuery: "q", ViewQuery: "q",
UseStandardSQL: true, UseStandardSQL: true,
TimePartitioning: &TimePartitioning{time.Second}, TimePartitioning: &TimePartitioning{
Expiration: time.Second,
Field: "ofDreams",
},
}, },
&bq.Table{ &bq.Table{
View: &bq.ViewDefinition{ View: &bq.ViewDefinition{
@@ -155,11 +174,12 @@ func TestBQTableFromMetadata(t *testing.T) {
TimePartitioning: &bq.TimePartitioning{ TimePartitioning: &bq.TimePartitioning{
Type: "DAY", Type: "DAY",
ExpirationMs: 1000, ExpirationMs: 1000,
Field: "ofDreams",
}, },
}, },
}, },
} { } {
got, err := bqTableFromMetadata(test.in) got, err := test.in.toBQ()
if err != nil { if err != nil {
t.Fatalf("%+v: %v", test.in, err) t.Fatalf("%+v: %v", test.in, err)
} }
@@ -183,69 +203,89 @@ func TestBQTableFromMetadata(t *testing.T) {
{StreamingBuffer: &StreamingBuffer{}}, {StreamingBuffer: &StreamingBuffer{}},
{ETag: "x"}, {ETag: "x"},
} { } {
_, err := bqTableFromMetadata(in) _, err := in.toBQ()
if err == nil { if err == nil {
t.Errorf("%+v: got nil, want error", in) t.Errorf("%+v: got nil, want error", in)
} }
} }
} }
func TestBQDatasetFromMetadata(t *testing.T) { func TestTableMetadataToUpdateToBQ(t *testing.T) {
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
for _, test := range []struct { for _, test := range []struct {
in *DatasetMetadata tm TableMetadataToUpdate
want *bq.Dataset want *bq.Table
}{ }{
{nil, &bq.Dataset{}}, {
{&DatasetMetadata{Name: "name"}, &bq.Dataset{FriendlyName: "name"}}, tm: TableMetadataToUpdate{},
{&DatasetMetadata{ want: &bq.Table{},
Name: "name", },
Description: "desc", {
DefaultTableExpiration: time.Hour, tm: TableMetadataToUpdate{
Location: "EU", Description: "d",
Labels: map[string]string{"x": "y"}, Name: "n",
}, &bq.Dataset{ },
FriendlyName: "name", want: &bq.Table{
Description: "desc", Description: "d",
DefaultTableExpirationMs: 60 * 60 * 1000, FriendlyName: "n",
Location: "EU", ForceSendFields: []string{"Description", "FriendlyName"},
Labels: map[string]string{"x": "y"}, },
}}, },
{
tm: TableMetadataToUpdate{
Schema: Schema{fieldSchema("desc", "name", "STRING", false, true)},
ExpirationTime: aTime,
},
want: &bq.Table{
Schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
},
},
ExpirationTime: aTime.UnixNano() / 1e6,
ForceSendFields: []string{"Schema", "ExpirationTime"},
},
},
{
tm: TableMetadataToUpdate{ViewQuery: "q"},
want: &bq.Table{
View: &bq.ViewDefinition{Query: "q", ForceSendFields: []string{"Query"}},
},
},
{
tm: TableMetadataToUpdate{UseLegacySQL: false},
want: &bq.Table{
View: &bq.ViewDefinition{
UseLegacySql: false,
ForceSendFields: []string{"UseLegacySql"},
},
},
},
{
tm: TableMetadataToUpdate{ViewQuery: "q", UseLegacySQL: true},
want: &bq.Table{
View: &bq.ViewDefinition{
Query: "q",
UseLegacySql: true,
ForceSendFields: []string{"Query", "UseLegacySql"},
},
},
},
{
tm: func() (tm TableMetadataToUpdate) {
tm.SetLabel("L", "V")
tm.DeleteLabel("D")
return tm
}(),
want: &bq.Table{
Labels: map[string]string{"L": "V"},
NullFields: []string{"Labels.D"},
},
},
} { } {
got, err := bqDatasetFromMetadata(test.in) got := test.tm.toBQ()
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, test.want) { if !testutil.Equal(got, test.want) {
t.Errorf("%v:\ngot %+v\nwant %+v", test.in, got, test.want) t.Errorf("%+v:\ngot %+v\nwant %+v", test.tm, got, test.want)
} }
} }
// Check that non-writeable fields are unset.
_, err := bqDatasetFromMetadata(&DatasetMetadata{FullID: "x"})
if err == nil {
t.Error("got nil, want error")
}
}
func TestBQDatasetFromUpdateMetadata(t *testing.T) {
dm := DatasetMetadataToUpdate{
Description: "desc",
Name: "name",
DefaultTableExpiration: time.Hour,
}
dm.SetLabel("label", "value")
dm.DeleteLabel("del")
got := bqDatasetFromUpdateMetadata(&dm)
want := &bq.Dataset{
Description: "desc",
FriendlyName: "name",
DefaultTableExpirationMs: 60 * 60 * 1000,
Labels: map[string]string{"label": "value"},
ForceSendFields: []string{"Description", "FriendlyName"},
NullFields: []string{"Labels.del"},
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("-got, +want:\n%s", diff)
}
} }

View File

@@ -20,6 +20,7 @@ import (
"reflect" "reflect"
"golang.org/x/net/context" "golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
) )
// An Uploader does streaming inserts into a BigQuery table. // An Uploader does streaming inserts into a BigQuery table.
@@ -151,27 +152,73 @@ func toValueSaver(x interface{}) (ValueSaver, bool, error) {
} }
func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error { func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error {
var rows []*insertionRow req, err := u.newInsertRequest(src)
for _, saver := range src { if err != nil {
return err
}
if req == nil {
return nil
}
call := u.t.c.bqs.Tabledata.InsertAll(u.t.ProjectID, u.t.DatasetID, u.t.TableID, req)
call = call.Context(ctx)
setClientHeader(call.Header())
var res *bq.TableDataInsertAllResponse
err = runWithRetry(ctx, func() (err error) {
res, err = call.Do()
return err
})
if err != nil {
return err
}
return handleInsertErrors(res.InsertErrors, req.Rows)
}
func (u *Uploader) newInsertRequest(savers []ValueSaver) (*bq.TableDataInsertAllRequest, error) {
if savers == nil { // If there are no rows, do nothing.
return nil, nil
}
req := &bq.TableDataInsertAllRequest{
TemplateSuffix: u.TableTemplateSuffix,
IgnoreUnknownValues: u.IgnoreUnknownValues,
SkipInvalidRows: u.SkipInvalidRows,
}
for _, saver := range savers {
row, insertID, err := saver.Save() row, insertID, err := saver.Save()
if err != nil { if err != nil {
return err return nil, err
} }
rows = append(rows, &insertionRow{InsertID: insertID, Row: row}) if insertID == "" {
insertID = randomIDFn()
}
m := make(map[string]bq.JsonValue)
for k, v := range row {
m[k] = bq.JsonValue(v)
}
req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{
InsertId: insertID,
Json: m,
})
} }
return req, nil
return u.t.c.service.insertRows(ctx, u.t.ProjectID, u.t.DatasetID, u.t.TableID, rows, &insertRowsConf{
skipInvalidRows: u.SkipInvalidRows,
ignoreUnknownValues: u.IgnoreUnknownValues,
templateSuffix: u.TableTemplateSuffix,
})
} }
// An insertionRow represents a row of data to be inserted into a table. func handleInsertErrors(ierrs []*bq.TableDataInsertAllResponseInsertErrors, rows []*bq.TableDataInsertAllRequestRows) error {
type insertionRow struct { if len(ierrs) == 0 {
// If InsertID is non-empty, BigQuery will use it to de-duplicate insertions of return nil
// this row on a best-effort basis. }
InsertID string var errs PutMultiError
// The data to be inserted, represented as a map from field name to Value. for _, e := range ierrs {
Row map[string]Value if int(e.Index) > len(rows) {
return fmt.Errorf("internal error: unexpected row index: %v", e.Index)
}
rie := RowInsertionError{
InsertID: rows[e.Index].InsertId,
RowIndex: int(e.Index),
}
for _, errp := range e.Errors {
rie.Errors = append(rie.Errors, bqToError(errp))
}
errs = append(errs, rie)
}
return errs
} }

View File

@@ -15,228 +15,138 @@
package bigquery package bigquery
import ( import (
"errors"
"strconv"
"testing" "testing"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"cloud.google.com/go/internal/pretty" "cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil" "cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
"golang.org/x/net/context"
) )
type testSaver struct { type testSaver struct {
ir *insertionRow row map[string]Value
err error insertID string
err error
} }
func (ts testSaver) Save() (map[string]Value, string, error) { func (ts testSaver) Save() (map[string]Value, string, error) {
return ts.ir.Row, ts.ir.InsertID, ts.err return ts.row, ts.insertID, ts.err
} }
func TestRejectsNonValueSavers(t *testing.T) { func TestNewInsertRequest(t *testing.T) {
client := &Client{projectID: "project-id"} prev := randomIDFn
u := Uploader{t: client.Dataset("dataset-id").Table("table-id")} n := 0
inputs := []interface{}{ randomIDFn = func() string { n++; return strconv.Itoa(n) }
1, defer func() { randomIDFn = prev }()
[]int{1, 2},
[]interface{}{
testSaver{ir: &insertionRow{"a", map[string]Value{"one": 1}}},
1,
},
StructSaver{},
}
for _, in := range inputs {
if err := u.Put(context.Background(), in); err == nil {
t.Errorf("put value: %v; got nil, want error", in)
}
}
}
type insertRowsRecorder struct { tests := []struct {
rowBatches [][]*insertionRow ul *Uploader
service savers []ValueSaver
} req *bq.TableDataInsertAllRequest
func (irr *insertRowsRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
irr.rowBatches = append(irr.rowBatches, rows)
return nil
}
func TestInsertsData(t *testing.T) {
testCases := []struct {
data [][]*insertionRow
}{ }{
{ {
data: [][]*insertionRow{ ul: &Uploader{},
{ savers: nil,
&insertionRow{"a", map[string]Value{"one": 1}}, req: nil,
},
{
ul: &Uploader{},
savers: []ValueSaver{
testSaver{row: map[string]Value{"one": 1}},
testSaver{row: map[string]Value{"two": 2}},
},
req: &bq.TableDataInsertAllRequest{
Rows: []*bq.TableDataInsertAllRequestRows{
{InsertId: "1", Json: map[string]bq.JsonValue{"one": 1}},
{InsertId: "2", Json: map[string]bq.JsonValue{"two": 2}},
}, },
}, },
}, },
{ {
ul: &Uploader{
data: [][]*insertionRow{ TableTemplateSuffix: "suffix",
{ IgnoreUnknownValues: true,
&insertionRow{"a", map[string]Value{"one": 1}}, SkipInvalidRows: true,
&insertionRow{"b", map[string]Value{"two": 2}},
},
}, },
}, savers: []ValueSaver{
{ testSaver{insertID: "a", row: map[string]Value{"one": 1}},
testSaver{insertID: "", row: map[string]Value{"two": 2}},
data: [][]*insertionRow{
{
&insertionRow{"a", map[string]Value{"one": 1}},
},
{
&insertionRow{"b", map[string]Value{"two": 2}},
},
}, },
}, req: &bq.TableDataInsertAllRequest{
{ Rows: []*bq.TableDataInsertAllRequestRows{
{InsertId: "a", Json: map[string]bq.JsonValue{"one": 1}},
data: [][]*insertionRow{ {InsertId: "3", Json: map[string]bq.JsonValue{"two": 2}},
{
&insertionRow{"a", map[string]Value{"one": 1}},
&insertionRow{"b", map[string]Value{"two": 2}},
},
{
&insertionRow{"c", map[string]Value{"three": 3}},
&insertionRow{"d", map[string]Value{"four": 4}},
}, },
TemplateSuffix: "suffix",
SkipInvalidRows: true,
IgnoreUnknownValues: true,
}, },
}, },
} }
for _, tc := range testCases {
irr := &insertRowsRecorder{}
client := &Client{
projectID: "project-id",
service: irr,
}
u := client.Dataset("dataset-id").Table("table-id").Uploader()
for _, batch := range tc.data {
if len(batch) == 0 {
continue
}
var toUpload interface{}
if len(batch) == 1 {
toUpload = testSaver{ir: batch[0]}
} else {
savers := []testSaver{}
for _, row := range batch {
savers = append(savers, testSaver{ir: row})
}
toUpload = savers
}
err := u.Put(context.Background(), toUpload)
if err != nil {
t.Errorf("expected successful Put of ValueSaver; got: %v", err)
}
}
if got, want := irr.rowBatches, tc.data; !testutil.Equal(got, want) {
t.Errorf("got: %v, want: %v", got, want)
}
}
}
type uploadOptionRecorder struct {
received *insertRowsConf
service
}
func (u *uploadOptionRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
u.received = conf
return nil
}
func TestUploadOptionsPropagate(t *testing.T) {
// we don't care for the data in this testcase.
dummyData := testSaver{ir: &insertionRow{}}
recorder := new(uploadOptionRecorder)
c := &Client{service: recorder}
table := &Table{
ProjectID: "project-id",
DatasetID: "dataset-id",
TableID: "table-id",
c: c,
}
tests := [...]struct {
ul *Uploader
conf insertRowsConf
}{
{
// test zero options lead to zero value for insertRowsConf
ul: table.Uploader(),
},
{
ul: func() *Uploader {
u := table.Uploader()
u.TableTemplateSuffix = "suffix"
return u
}(),
conf: insertRowsConf{
templateSuffix: "suffix",
},
},
{
ul: func() *Uploader {
u := table.Uploader()
u.IgnoreUnknownValues = true
return u
}(),
conf: insertRowsConf{
ignoreUnknownValues: true,
},
},
{
ul: func() *Uploader {
u := table.Uploader()
u.SkipInvalidRows = true
return u
}(),
conf: insertRowsConf{
skipInvalidRows: true,
},
},
{ // multiple upload options combine
ul: func() *Uploader {
u := table.Uploader()
u.TableTemplateSuffix = "suffix"
u.IgnoreUnknownValues = true
u.SkipInvalidRows = true
return u
}(),
conf: insertRowsConf{
templateSuffix: "suffix",
skipInvalidRows: true,
ignoreUnknownValues: true,
},
},
}
for i, tc := range tests { for i, tc := range tests {
err := tc.ul.Put(context.Background(), dummyData) got, err := tc.ul.newInsertRequest(tc.savers)
if err != nil { if err != nil {
t.Fatalf("%d: expected successful Put of ValueSaver; got: %v", i, err) t.Fatal(err)
} }
want := tc.req
if recorder.received == nil { if !testutil.Equal(got, want) {
t.Fatalf("%d: received no options at all!", i) t.Errorf("%d: %#v: got %#v, want %#v", i, tc.ul, got, want)
} }
}
}
want := tc.conf func TestNewInsertRequestErrors(t *testing.T) {
got := *recorder.received var u Uploader
if got != want { _, err := u.newInsertRequest([]ValueSaver{testSaver{err: errors.New("!")}})
t.Errorf("%d: got %#v, want %#v, ul=%#v", i, got, want, tc.ul) if err == nil {
t.Error("got nil, want error")
}
}
func TestHandleInsertErrors(t *testing.T) {
rows := []*bq.TableDataInsertAllRequestRows{
{InsertId: "a"},
{InsertId: "b"},
}
for _, test := range []struct {
in []*bq.TableDataInsertAllResponseInsertErrors
want error
}{
{
in: nil,
want: nil,
},
{
in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}},
want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}},
},
{
in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}},
want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}},
},
{
in: []*bq.TableDataInsertAllResponseInsertErrors{
{Errors: []*bq.ErrorProto{{Message: "m0"}}, Index: 0},
{Errors: []*bq.ErrorProto{{Message: "m1"}}, Index: 1},
},
want: PutMultiError{
RowInsertionError{InsertID: "a", RowIndex: 0, Errors: []error{&Error{Message: "m0"}}},
RowInsertionError{InsertID: "b", RowIndex: 1, Errors: []error{&Error{Message: "m1"}}},
},
},
} {
got := handleInsertErrors(test.in, rows)
if !testutil.Equal(got, test.want) {
t.Errorf("%#v:\ngot\n%#v\nwant\n%#v", test.in, got, test.want)
} }
} }
} }
func TestValueSavers(t *testing.T) { func TestValueSavers(t *testing.T) {
ts := &testSaver{ir: &insertionRow{}} ts := &testSaver{}
type T struct{ I int } type T struct{ I int }
schema, err := InferSchema(T{}) schema, err := InferSchema(T{})
if err != nil { if err != nil {
@@ -246,6 +156,8 @@ func TestValueSavers(t *testing.T) {
in interface{} in interface{}
want []ValueSaver want []ValueSaver
}{ }{
{[]interface{}(nil), nil},
{[]interface{}{}, nil},
{ts, []ValueSaver{ts}}, {ts, []ValueSaver{ts}},
{T{I: 1}, []ValueSaver{&StructSaver{Schema: schema, Struct: T{I: 1}}}}, {T{I: 1}, []ValueSaver{&StructSaver{Schema: schema, Struct: T{I: 1}}}},
{[]ValueSaver{ts, ts}, []ValueSaver{ts, ts}}, {[]ValueSaver{ts, ts}, []ValueSaver{ts, ts}},
@@ -279,3 +191,21 @@ func TestValueSavers(t *testing.T) {
} }
} }
} }
func TestValueSaversErrors(t *testing.T) {
inputs := []interface{}{
nil,
1,
[]int{1, 2},
[]interface{}{
testSaver{row: map[string]Value{"one": 1}, insertID: "a"},
1,
},
StructSaver{},
}
for _, in := range inputs {
if _, err := valueSavers(in); err == nil {
t.Errorf("%#v: got nil, want error", in)
}
}
}

View File

@@ -243,7 +243,7 @@ func compileToOps(structType reflect.Type, schema Schema) ([]structLoaderOp, err
} }
// determineSetFunc chooses the best function for setting a field of type ftype // determineSetFunc chooses the best function for setting a field of type ftype
// to a value whose schema field type is sftype. It returns nil if stype // to a value whose schema field type is stype. It returns nil if stype
// is not assignable to ftype. // is not assignable to ftype.
// determineSetFunc considers only basic types. See compileToOps for // determineSetFunc considers only basic types. See compileToOps for
// handling of repetition and nesting. // handling of repetition and nesting.
@@ -405,7 +405,7 @@ func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) {
m := make(map[string]Value) m := make(map[string]Value)
for i, fieldSchema := range schema { for i, fieldSchema := range schema {
if fieldSchema.Type != RecordFieldType { if fieldSchema.Type != RecordFieldType {
m[fieldSchema.Name] = vs[i] m[fieldSchema.Name] = toUploadValue(vs[i], fieldSchema)
continue continue
} }
// Nested record, possibly repeated. // Nested record, possibly repeated.
@@ -510,14 +510,9 @@ func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (i
schemaField.Name, vfield.Type()) schemaField.Name, vfield.Type())
} }
// A non-nested field can be represented by its Go value. // A non-nested field can be represented by its Go value, except for civil times.
if schemaField.Type != RecordFieldType { if schemaField.Type != RecordFieldType {
if !schemaField.Repeated || vfield.Len() > 0 { return toUploadValueReflect(vfield, schemaField), nil
return vfield.Interface(), nil
}
// The service treats a null repeated field as an error. Return
// nil to omit the field entirely.
return nil, nil
} }
// A non-repeated nested field is converted into a map[string]Value. // A non-repeated nested field is converted into a map[string]Value.
if !schemaField.Repeated { if !schemaField.Repeated {
@@ -545,6 +540,73 @@ func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (i
return vals, nil return vals, nil
} }
func toUploadValue(val interface{}, fs *FieldSchema) interface{} {
if fs.Type == TimeFieldType || fs.Type == DateTimeFieldType {
return toUploadValueReflect(reflect.ValueOf(val), fs)
}
return val
}
func toUploadValueReflect(v reflect.Value, fs *FieldSchema) interface{} {
switch fs.Type {
case TimeFieldType:
return civilToUploadValue(v, fs, func(v reflect.Value) string {
return CivilTimeString(v.Interface().(civil.Time))
})
case DateTimeFieldType:
return civilToUploadValue(v, fs, func(v reflect.Value) string {
return CivilDateTimeString(v.Interface().(civil.DateTime))
})
default:
if !fs.Repeated || v.Len() > 0 {
return v.Interface()
}
// The service treats a null repeated field as an error. Return
// nil to omit the field entirely.
return nil
}
}
func civilToUploadValue(v reflect.Value, fs *FieldSchema, cvt func(reflect.Value) string) interface{} {
if !fs.Repeated {
return cvt(v)
}
if v.Len() == 0 {
return nil
}
s := make([]string, v.Len())
for i := 0; i < v.Len(); i++ {
s[i] = cvt(v.Index(i))
}
return s
}
// CivilTimeString returns a string representing a civil.Time in a format compatible
// with BigQuery SQL. It rounds the time to the nearest microsecond and returns a
// string with six digits of sub-second precision.
//
// Use CivilTimeString when using civil.Time in DML, for example in INSERT
// statements.
func CivilTimeString(t civil.Time) string {
if t.Nanosecond == 0 {
return t.String()
} else {
micro := (t.Nanosecond + 500) / 1000 // round to nearest microsecond
t.Nanosecond = 0
return t.String() + fmt.Sprintf(".%06d", micro)
}
}
// CivilDateTimeString returns a string representing a civil.DateTime in a format compatible
// with BigQuery SQL. It separate the date and time with a space, and formats the time
// with CivilTimeString.
//
// Use CivilDateTimeString when using civil.DateTime in DML, for example in INSERT
// statements.
func CivilDateTimeString(dt civil.DateTime) string {
return dt.Date.String() + " " + CivilTimeString(dt.Time)
}
// convertRows converts a series of TableRows into a series of Value slices. // convertRows converts a series of TableRows into a series of Value slices.
// schema is used to interpret the data from rows; its length must match the // schema is used to interpret the data from rows; its length must match the
// length of each row. // length of each row.
@@ -618,7 +680,6 @@ func convertNestedRecord(val map[string]interface{}, schema Schema) (Value, erro
for i, cell := range record { for i, cell := range record {
// each cell contains a single entry, keyed by "v" // each cell contains a single entry, keyed by "v"
val := cell.(map[string]interface{})["v"] val := cell.(map[string]interface{})["v"]
fs := schema[i] fs := schema[i]
v, err := convertValue(val, fs.Type, fs.Schema) v, err := convertValue(val, fs.Type, fs.Schema)
if err != nil { if err != nil {

View File

@@ -58,22 +58,31 @@ func TestConvertBasicValues(t *testing.T) {
} }
func TestConvertTime(t *testing.T) { func TestConvertTime(t *testing.T) {
// TODO(jba): add tests for civil time types.
schema := []*FieldSchema{ schema := []*FieldSchema{
{Type: TimestampFieldType}, {Type: TimestampFieldType},
{Type: DateFieldType},
{Type: TimeFieldType},
{Type: DateTimeFieldType},
} }
thyme := time.Date(1970, 1, 1, 10, 0, 0, 10, time.UTC) ts := testTimestamp.Round(time.Millisecond)
row := &bq.TableRow{ row := &bq.TableRow{
F: []*bq.TableCell{ F: []*bq.TableCell{
{V: fmt.Sprintf("%.10f", float64(thyme.UnixNano())/1e9)}, {V: fmt.Sprintf("%.10f", float64(ts.UnixNano())/1e9)},
{V: testDate.String()},
{V: testTime.String()},
{V: testDateTime.String()},
}, },
} }
got, err := convertRow(row, schema) got, err := convertRow(row, schema)
if err != nil { if err != nil {
t.Fatalf("error converting: %v", err) t.Fatalf("error converting: %v", err)
} }
if !got[0].(time.Time).Equal(thyme) { want := []Value{ts, testDate, testTime, testDateTime}
t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, thyme) for i, g := range got {
w := want[i]
if !testutil.Equal(g, w) {
t.Errorf("#%d: got:\n%v\nwant:\n%v", i, g, w)
}
} }
if got[0].(time.Time).Location() != time.UTC { if got[0].(time.Time).Location() != time.UTC {
t.Errorf("expected time zone UTC: got:\n%v", got) t.Errorf("expected time zone UTC: got:\n%v", got)
@@ -337,24 +346,58 @@ func TestRepeatedRecordContainingRecord(t *testing.T) {
} }
} }
func TestConvertRowErrors(t *testing.T) {
// mismatched lengths
if _, err := convertRow(&bq.TableRow{F: []*bq.TableCell{{V: ""}}}, Schema{}); err == nil {
t.Error("got nil, want error")
}
v3 := map[string]interface{}{"v": 3}
for _, test := range []struct {
value interface{}
fs FieldSchema
}{
{3, FieldSchema{Type: IntegerFieldType}}, // not a string
{[]interface{}{v3}, // not a string, repeated
FieldSchema{Type: IntegerFieldType, Repeated: true}},
{map[string]interface{}{"f": []interface{}{v3}}, // not a string, nested
FieldSchema{Type: RecordFieldType, Schema: Schema{{Type: IntegerFieldType}}}},
{map[string]interface{}{"f": []interface{}{v3}}, // wrong length, nested
FieldSchema{Type: RecordFieldType, Schema: Schema{}}},
} {
_, err := convertRow(
&bq.TableRow{F: []*bq.TableCell{{V: test.value}}},
Schema{&test.fs})
if err == nil {
t.Errorf("value %v, fs %v: got nil, want error", test.value, test.fs)
}
}
// bad field type
if _, err := convertBasicType("", FieldType("BAD")); err == nil {
t.Error("got nil, want error")
}
}
func TestValuesSaverConvertsToMap(t *testing.T) { func TestValuesSaverConvertsToMap(t *testing.T) {
testCases := []struct { testCases := []struct {
vs ValuesSaver vs ValuesSaver
want *insertionRow wantInsertID string
wantRow map[string]Value
}{ }{
{ {
vs: ValuesSaver{ vs: ValuesSaver{
Schema: []*FieldSchema{ Schema: []*FieldSchema{
{Name: "intField", Type: IntegerFieldType}, {Name: "intField", Type: IntegerFieldType},
{Name: "strField", Type: StringFieldType}, {Name: "strField", Type: StringFieldType},
{Name: "dtField", Type: DateTimeFieldType},
}, },
InsertID: "iid", InsertID: "iid",
Row: []Value{1, "a"}, Row: []Value{1, "a",
}, civil.DateTime{civil.Date{1, 2, 3}, civil.Time{4, 5, 6, 7000}}},
want: &insertionRow{
InsertID: "iid",
Row: map[string]Value{"intField": 1, "strField": "a"},
}, },
wantInsertID: "iid",
wantRow: map[string]Value{"intField": 1, "strField": "a",
"dtField": "0001-02-03 04:05:06.000007"},
}, },
{ {
vs: ValuesSaver{ vs: ValuesSaver{
@@ -371,13 +414,11 @@ func TestValuesSaverConvertsToMap(t *testing.T) {
InsertID: "iid", InsertID: "iid",
Row: []Value{1, []Value{[]Value{2, 3}}}, Row: []Value{1, []Value{[]Value{2, 3}}},
}, },
want: &insertionRow{ wantInsertID: "iid",
InsertID: "iid", wantRow: map[string]Value{
Row: map[string]Value{ "intField": 1,
"intField": 1, "recordField": map[string]Value{
"recordField": map[string]Value{ "nestedInt": []Value{2, 3},
"nestedInt": []Value{2, 3},
},
}, },
}, },
}, },
@@ -402,25 +443,59 @@ func TestValuesSaverConvertsToMap(t *testing.T) {
}, },
}, },
}, },
want: &insertionRow{ wantInsertID: "iid",
InsertID: "iid", wantRow: map[string]Value{
Row: map[string]Value{ "records": []Value{
"records": []Value{ map[string]Value{"x": 1, "y": 2},
map[string]Value{"x": 1, "y": 2}, map[string]Value{"x": 3, "y": 4},
map[string]Value{"x": 3, "y": 4},
},
}, },
}, },
}, },
} }
for _, tc := range testCases { for _, tc := range testCases {
data, insertID, err := tc.vs.Save() gotRow, gotInsertID, err := tc.vs.Save()
if err != nil { if err != nil {
t.Errorf("Expected successful save; got: %v", err) t.Errorf("Expected successful save; got: %v", err)
continue
} }
got := &insertionRow{insertID, data} if !testutil.Equal(gotRow, tc.wantRow) {
if !testutil.Equal(got, tc.want) { t.Errorf("%v row:\ngot:\n%+v\nwant:\n%+v", tc.vs, gotRow, tc.wantRow)
t.Errorf("saving ValuesSaver:\ngot:\n%+v\nwant:\n%+v", got, tc.want) }
if !testutil.Equal(gotInsertID, tc.wantInsertID) {
t.Errorf("%v ID:\ngot:\n%+v\nwant:\n%+v", tc.vs, gotInsertID, tc.wantInsertID)
}
}
}
func TestValuesToMapErrors(t *testing.T) {
for _, test := range []struct {
values []Value
schema Schema
}{
{ // mismatched length
[]Value{1},
Schema{},
},
{ // nested record not a slice
[]Value{1},
Schema{{Type: RecordFieldType}},
},
{ // nested record mismatched length
[]Value{[]Value{1}},
Schema{{Type: RecordFieldType}},
},
{ // nested repeated record not a slice
[]Value{[]Value{1}},
Schema{{Type: RecordFieldType, Repeated: true}},
},
{ // nested repeated record mismatched length
[]Value{[]Value{[]Value{1}}},
Schema{{Type: RecordFieldType, Repeated: true}},
},
} {
_, err := valuesToMap(test.values, test.schema)
if err == nil {
t.Errorf("%v, %v: got nil, want error", test.values, test.schema)
} }
} }
} }
@@ -429,6 +504,8 @@ func TestStructSaver(t *testing.T) {
schema := Schema{ schema := Schema{
{Name: "s", Type: StringFieldType}, {Name: "s", Type: StringFieldType},
{Name: "r", Type: IntegerFieldType, Repeated: true}, {Name: "r", Type: IntegerFieldType, Repeated: true},
{Name: "t", Type: TimeFieldType},
{Name: "tr", Type: TimeFieldType, Repeated: true},
{Name: "nested", Type: RecordFieldType, Schema: Schema{ {Name: "nested", Type: RecordFieldType, Schema: Schema{
{Name: "b", Type: BooleanFieldType}, {Name: "b", Type: BooleanFieldType},
}}, }},
@@ -442,6 +519,8 @@ func TestStructSaver(t *testing.T) {
T struct { T struct {
S string S string
R []int R []int
T civil.Time
TR []civil.Time
Nested *N Nested *N
Rnested []*N Rnested []*N
} }
@@ -464,22 +543,27 @@ func TestStructSaver(t *testing.T) {
t.Errorf("%s:\ngot\n%#v\nwant\n%#v", msg, got, want) t.Errorf("%s:\ngot\n%#v\nwant\n%#v", msg, got, want)
} }
} }
ct1 := civil.Time{1, 2, 3, 4000}
ct2 := civil.Time{5, 6, 7, 8000}
in := T{ in := T{
S: "x", S: "x",
R: []int{1, 2}, R: []int{1, 2},
T: ct1,
TR: []civil.Time{ct1, ct2},
Nested: &N{B: true}, Nested: &N{B: true},
Rnested: []*N{{true}, {false}}, Rnested: []*N{{true}, {false}},
} }
want := map[string]Value{ want := map[string]Value{
"s": "x", "s": "x",
"r": []int{1, 2}, "r": []int{1, 2},
"t": "01:02:03.000004",
"tr": []string{"01:02:03.000004", "05:06:07.000008"},
"nested": map[string]Value{"b": true}, "nested": map[string]Value{"b": true},
"rnested": []Value{map[string]Value{"b": true}, map[string]Value{"b": false}}, "rnested": []Value{map[string]Value{"b": true}, map[string]Value{"b": false}},
} }
check("all values", in, want) check("all values", in, want)
check("all values, ptr", &in, want) check("all values, ptr", &in, want)
check("empty struct", T{}, map[string]Value{"s": ""}) check("empty struct", T{}, map[string]Value{"s": "", "t": "00:00:00"})
// Missing and extra fields ignored. // Missing and extra fields ignored.
type T2 struct { type T2 struct {
@@ -492,10 +576,39 @@ func TestStructSaver(t *testing.T) {
check("nils in slice", T{Rnested: []*N{{true}, nil, {false}}}, check("nils in slice", T{Rnested: []*N{{true}, nil, {false}}},
map[string]Value{ map[string]Value{
"s": "", "s": "",
"t": "00:00:00",
"rnested": []Value{map[string]Value{"b": true}, map[string]Value(nil), map[string]Value{"b": false}}, "rnested": []Value{map[string]Value{"b": true}, map[string]Value(nil), map[string]Value{"b": false}},
}) })
} }
func TestStructSaverErrors(t *testing.T) {
type (
badField struct {
I int `bigquery:"@"`
}
badR struct{ R int }
badRN struct{ R []int }
)
for i, test := range []struct {
struct_ interface{}
schema Schema
}{
{0, nil}, // not a struct
{&badField{}, nil}, // bad field name
{&badR{}, Schema{{Name: "r", Repeated: true}}}, // repeated field has bad type
{&badR{}, Schema{{Name: "r", Type: RecordFieldType}}}, // nested field has bad type
{&badRN{[]int{0}}, // nested repeated field has bad type
Schema{{Name: "r", Type: RecordFieldType, Repeated: true}}},
} {
ss := &StructSaver{Struct: test.struct_, Schema: test.schema}
_, _, err := ss.Save()
if err == nil {
t.Errorf("#%d, %v, %v: got nil, want error", i, test.struct_, test.schema)
}
}
}
func TestConvertRows(t *testing.T) { func TestConvertRows(t *testing.T) {
schema := []*FieldSchema{ schema := []*FieldSchema{
{Type: StringFieldType}, {Type: StringFieldType},
@@ -528,6 +641,12 @@ func TestConvertRows(t *testing.T) {
if !testutil.Equal(got, want) { if !testutil.Equal(got, want) {
t.Errorf("\ngot %v\nwant %v", got, want) t.Errorf("\ngot %v\nwant %v", got, want)
} }
rows[0].F[0].V = 1
_, err = convertRows(rows, schema)
if err == nil {
t.Error("got nil, want error")
}
} }
func TestValueList(t *testing.T) { func TestValueList(t *testing.T) {
@@ -835,6 +954,65 @@ func TestStructLoaderErrors(t *testing.T) {
type bad2 struct{ I uint } // unsupported integer type type bad2 struct{ I uint } // unsupported integer type
check(&bad2{}) check(&bad2{})
type bad3 struct {
I int `bigquery:"@"`
} // bad field name
check(&bad3{})
type bad4 struct{ Nested int } // non-struct for nested field
check(&bad4{})
type bad5 struct{ Nested struct{ NestS int } } // bad nested struct
check(&bad5{})
bad6 := &struct{ Nums int }{} // non-slice for repeated field
sl := structLoader{}
err := sl.set(bad6, repSchema)
if err == nil {
t.Errorf("%T: got nil, want error", bad6)
}
// sl.set's error is sticky, with even good input.
err2 := sl.set(&repStruct{}, repSchema)
if err2 != err {
t.Errorf("%v != %v, expected equal", err2, err)
}
// sl.Load is similarly sticky
err2 = sl.Load(nil, nil)
if err2 != err {
t.Errorf("%v != %v, expected equal", err2, err)
}
// Null values.
schema := Schema{
{Name: "i", Type: IntegerFieldType},
{Name: "f", Type: FloatFieldType},
{Name: "b", Type: BooleanFieldType},
{Name: "s", Type: StringFieldType},
{Name: "by", Type: BytesFieldType},
{Name: "d", Type: DateFieldType},
}
type s struct {
I int
F float64
B bool
S string
By []byte
D civil.Date
}
vals := []Value{int64(0), 0.0, false, "", []byte{}, testDate}
if err := load(&s{}, schema, vals); err != nil {
t.Fatal(err)
}
for i, e := range vals {
vals[i] = nil
got := load(&s{}, schema, vals)
if got != errNoNulls {
t.Errorf("#%d: got %v, want %v", i, got, errNoNulls)
}
vals[i] = e
}
// Using more than one struct type with the same structLoader. // Using more than one struct type with the same structLoader.
type different struct { type different struct {
B bool B bool
@@ -845,11 +1023,11 @@ func TestStructLoaderErrors(t *testing.T) {
Nums []int Nums []int
} }
var sl structLoader sl = structLoader{}
if err := sl.set(&testStruct1{}, schema2); err != nil { if err := sl.set(&testStruct1{}, schema2); err != nil {
t.Fatal(err) t.Fatal(err)
} }
err := sl.set(&different{}, schema2) err = sl.set(&different{}, schema2)
if err == nil { if err == nil {
t.Error("different struct types: got nil, want error") t.Error("different struct types: got nil, want error")
} }

View File

@@ -19,10 +19,12 @@ import (
"testing" "testing"
"time" "time"
"cloud.google.com/go/internal/testutil"
"fmt" "fmt"
"golang.org/x/net/context"
"reflect"
"strings" "strings"
"golang.org/x/net/context"
) )
func TestAdminIntegration(t *testing.T) { func TestAdminIntegration(t *testing.T) {
@@ -126,7 +128,7 @@ func TestAdminIntegration(t *testing.T) {
} }
sort.Strings(tblInfo.Families) sort.Strings(tblInfo.Families)
wantFams := []string{"fam1", "fam2"} wantFams := []string{"fam1", "fam2"}
if !reflect.DeepEqual(tblInfo.Families, wantFams) { if !testutil.Equal(tblInfo.Families, wantFams) {
t.Errorf("Column family mismatch, got %v, want %v", tblInfo.Families, wantFams) t.Errorf("Column family mismatch, got %v, want %v", tblInfo.Families, wantFams)
} }

View File

@@ -19,12 +19,13 @@ package bigtable
import ( import (
"fmt" "fmt"
"math/rand" "math/rand"
"reflect"
"strings" "strings"
"sync" "sync"
"testing" "testing"
"time" "time"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
@@ -67,14 +68,16 @@ func TestClientIntegration(t *testing.T) {
t.Fatalf("IntegrationEnv: %v", err) t.Fatalf("IntegrationEnv: %v", err)
} }
timeout := 30 * time.Second var timeout time.Duration
if testEnv.Config().UseProd { if testEnv.Config().UseProd {
timeout = 5 * time.Minute timeout = 5 * time.Minute
t.Logf("Running test against production") t.Logf("Running test against production")
} else { } else {
timeout = 1 * time.Minute
t.Logf("bttest.Server running on %s", testEnv.Config().AdminEndpoint) t.Logf("bttest.Server running on %s", testEnv.Config().AdminEndpoint)
} }
ctx, _ := context.WithTimeout(context.Background(), timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
client, err := testEnv.NewClient() client, err := testEnv.NewClient()
if err != nil { if err != nil {
@@ -156,7 +159,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "jadams", Column: "follows:tjefferson", Value: []byte("1")}, {Row: "jadams", Column: "follows:tjefferson", Value: []byte("1")},
}, },
} }
if !reflect.DeepEqual(row, wantRow) { if !testutil.Equal(row, wantRow) {
t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow) t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow)
} }
checkpoint("tested ReadRow") checkpoint("tested ReadRow")
@@ -319,6 +322,12 @@ func TestClientIntegration(t *testing.T) {
filter: ConditionFilter(ChainFilters(ColumnFilter(".*j.*"), ColumnFilter(".*mckinley.*")), StripValueFilter(), nil), filter: ConditionFilter(ChainFilters(ColumnFilter(".*j.*"), ColumnFilter(".*mckinley.*")), StripValueFilter(), nil),
want: "", want: "",
}, },
{
desc: "chain that ends with an interleave that has no match. covers #804",
rr: RowRange{},
filter: ConditionFilter(ChainFilters(ColumnFilter(".*j.*"), InterleaveFilters(ColumnFilter(".*x.*"), ColumnFilter(".*z.*"))), StripValueFilter(), nil),
want: "",
},
} }
for _, tc := range readTests { for _, tc := range readTests {
var opts []ReadOption var opts []ReadOption
@@ -442,9 +451,13 @@ func TestClientIntegration(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("ApplyReadModifyWrite %+v: %v", step.rmw, err) t.Fatalf("ApplyReadModifyWrite %+v: %v", step.rmw, err)
} }
// Make sure the modified cell returned by the RMW operation has a timestamp.
if row["counter"][0].Timestamp == 0 {
t.Errorf("RMW returned cell timestamp: got %v, want > 0", row["counter"][0].Timestamp)
}
clearTimestamps(row) clearTimestamps(row)
wantRow := Row{"counter": []ReadItem{{Row: "gwashington", Column: "counter:likes", Value: step.want}}} wantRow := Row{"counter": []ReadItem{{Row: "gwashington", Column: "counter:likes", Value: step.want}}}
if !reflect.DeepEqual(row, wantRow) { if !testutil.Equal(row, wantRow) {
t.Fatalf("After %s,\n got %v\nwant %v", step.desc, row, wantRow) t.Fatalf("After %s,\n got %v\nwant %v", step.desc, row, wantRow)
} }
} }
@@ -498,7 +511,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")}, {Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
{Row: "testrow", Column: "ts:col2", Timestamp: 0, Value: []byte("val-0")}, {Row: "testrow", Column: "ts:col2", Timestamp: 0, Value: []byte("val-0")},
}} }}
if !reflect.DeepEqual(r, wantRow) { if !testutil.Equal(r, wantRow) {
t.Errorf("Cell with multiple versions,\n got %v\nwant %v", r, wantRow) t.Errorf("Cell with multiple versions,\n got %v\nwant %v", r, wantRow)
} }
// Do the same read, but filter to the latest two versions. // Do the same read, but filter to the latest two versions.
@@ -512,7 +525,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")}, {Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")}, {Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
}} }}
if !reflect.DeepEqual(r, wantRow) { if !testutil.Equal(r, wantRow) {
t.Errorf("Cell with multiple versions and LatestNFilter(2),\n got %v\nwant %v", r, wantRow) t.Errorf("Cell with multiple versions and LatestNFilter(2),\n got %v\nwant %v", r, wantRow)
} }
// Check cell offset / limit // Check cell offset / limit
@@ -525,7 +538,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
}} }}
if !reflect.DeepEqual(r, wantRow) { if !testutil.Equal(r, wantRow) {
t.Errorf("Cell with multiple versions and CellsPerRowLimitFilter(3),\n got %v\nwant %v", r, wantRow) t.Errorf("Cell with multiple versions and CellsPerRowLimitFilter(3),\n got %v\nwant %v", r, wantRow)
} }
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(CellsPerRowOffsetFilter(3))) r, err = tbl.ReadRow(ctx, "testrow", RowFilter(CellsPerRowOffsetFilter(3)))
@@ -539,7 +552,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")}, {Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
{Row: "testrow", Column: "ts:col2", Timestamp: 0, Value: []byte("val-0")}, {Row: "testrow", Column: "ts:col2", Timestamp: 0, Value: []byte("val-0")},
}} }}
if !reflect.DeepEqual(r, wantRow) { if !testutil.Equal(r, wantRow) {
t.Errorf("Cell with multiple versions and CellsPerRowOffsetFilter(3),\n got %v\nwant %v", r, wantRow) t.Errorf("Cell with multiple versions and CellsPerRowOffsetFilter(3),\n got %v\nwant %v", r, wantRow)
} }
// Check timestamp range filtering (with truncation) // Check timestamp range filtering (with truncation)
@@ -553,7 +566,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")}, {Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")}, {Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
}} }}
if !reflect.DeepEqual(r, wantRow) { if !testutil.Equal(r, wantRow) {
t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 3000),\n got %v\nwant %v", r, wantRow) t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 3000),\n got %v\nwant %v", r, wantRow)
} }
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(TimestampRangeFilterMicros(1000, 0))) r, err = tbl.ReadRow(ctx, "testrow", RowFilter(TimestampRangeFilterMicros(1000, 0)))
@@ -568,7 +581,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")}, {Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")}, {Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
}} }}
if !reflect.DeepEqual(r, wantRow) { if !testutil.Equal(r, wantRow) {
t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 0),\n got %v\nwant %v", r, wantRow) t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 0),\n got %v\nwant %v", r, wantRow)
} }
// Delete non-existing cells, no such column family in this row // Delete non-existing cells, no such column family in this row
@@ -585,7 +598,7 @@ func TestClientIntegration(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Reading row: %v", err) t.Fatalf("Reading row: %v", err)
} }
if !reflect.DeepEqual(r, wantRow) { if !testutil.Equal(r, wantRow) {
t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow) t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow)
} }
// Delete non-existing cells, no such column in this column family // Delete non-existing cells, no such column in this column family
@@ -599,7 +612,7 @@ func TestClientIntegration(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Reading row: %v", err) t.Fatalf("Reading row: %v", err)
} }
if !reflect.DeepEqual(r, wantRow) { if !testutil.Equal(r, wantRow) {
t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow) t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow)
} }
// Delete the cell with timestamp 2000 and repeat the last read, // Delete the cell with timestamp 2000 and repeat the last read,
@@ -619,7 +632,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")}, {Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")}, {Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
}} }}
if !reflect.DeepEqual(r, wantRow) { if !testutil.Equal(r, wantRow) {
t.Errorf("Cell with multiple versions and LatestNFilter(2), after deleting timestamp 2000,\n got %v\nwant %v", r, wantRow) t.Errorf("Cell with multiple versions and LatestNFilter(2), after deleting timestamp 2000,\n got %v\nwant %v", r, wantRow)
} }
checkpoint("tested multiple versions in a cell") checkpoint("tested multiple versions in a cell")
@@ -654,7 +667,7 @@ func TestClientIntegration(t *testing.T) {
wantRow = Row{"ts": []ReadItem{ wantRow = Row{"ts": []ReadItem{
{Row: "row1", Column: "ts:col", Timestamp: 0, Value: []byte("3")}, {Row: "row1", Column: "ts:col", Timestamp: 0, Value: []byte("3")},
}} }}
if !reflect.DeepEqual(r, wantRow) { if !testutil.Equal(r, wantRow) {
t.Errorf("column family was not deleted.\n got %v\n want %v", r, wantRow) t.Errorf("column family was not deleted.\n got %v\n want %v", r, wantRow)
} }
@@ -672,7 +685,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "row2", Column: "status:start", Timestamp: 0, Value: []byte("1")}, {Row: "row2", Column: "status:start", Timestamp: 0, Value: []byte("1")},
}, },
} }
if !reflect.DeepEqual(r, wantRow) { if !testutil.Equal(r, wantRow) {
t.Errorf("Column family was deleted unexpectly.\n got %v\n want %v", r, wantRow) t.Errorf("Column family was deleted unexpectly.\n got %v\n want %v", r, wantRow)
} }
checkpoint("tested family delete") checkpoint("tested family delete")
@@ -700,7 +713,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "row3", Column: "status:start", Timestamp: 0, Value: []byte("1")}, {Row: "row3", Column: "status:start", Timestamp: 0, Value: []byte("1")},
}, },
} }
if !reflect.DeepEqual(r, wantRow) { if !testutil.Equal(r, wantRow) {
t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow) t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow)
} }
mut = NewMutation() mut = NewMutation()
@@ -717,7 +730,7 @@ func TestClientIntegration(t *testing.T) {
{Row: "row3", Column: "status:end", Timestamp: 0, Value: []byte("3")}, {Row: "row3", Column: "status:end", Timestamp: 0, Value: []byte("3")},
}, },
} }
if !reflect.DeepEqual(r, wantRow) { if !testutil.Equal(r, wantRow) {
t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow) t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow)
} }
mut = NewMutation() mut = NewMutation()
@@ -742,7 +755,7 @@ func TestClientIntegration(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Reading row: %v", err) t.Fatalf("Reading row: %v", err)
} }
if !reflect.DeepEqual(r, wantRow) { if !testutil.Equal(r, wantRow) {
t.Errorf("Column was not deleted correctly.\n got %v\n want %v", r, wantRow) t.Errorf("Column was not deleted correctly.\n got %v\n want %v", r, wantRow)
} }
checkpoint("tested column delete") checkpoint("tested column delete")
@@ -791,7 +804,7 @@ func TestClientIntegration(t *testing.T) {
wantRow = Row{"ts": []ReadItem{ wantRow = Row{"ts": []ReadItem{
{Row: "bigrow", Column: "ts:col", Value: bigBytes}, {Row: "bigrow", Column: "ts:col", Value: bigBytes},
}} }}
if !reflect.DeepEqual(r, wantRow) { if !testutil.Equal(r, wantRow) {
t.Errorf("Big read returned incorrect bytes: %v", r) t.Errorf("Big read returned incorrect bytes: %v", r)
} }
// Now write 1000 rows, each with 82 KB values, then scan them all. // Now write 1000 rows, each with 82 KB values, then scan them all.
@@ -879,7 +892,7 @@ func TestClientIntegration(t *testing.T) {
wantItems = append(wantItems, ReadItem{Row: rowKey, Column: "bulk:" + val, Value: []byte("1")}) wantItems = append(wantItems, ReadItem{Row: rowKey, Column: "bulk:" + val, Value: []byte("1")})
} }
wantRow := Row{"bulk": wantItems} wantRow := Row{"bulk": wantItems}
if !reflect.DeepEqual(row, wantRow) { if !testutil.Equal(row, wantRow) {
t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow) t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow)
} }
} }

View File

@@ -421,12 +421,14 @@ func filterRow(f *btpb.RowFilter, r *row) bool {
} }
} }
} }
var count int
for _, fam := range r.families { for _, fam := range r.families {
for _, cs := range fam.cells { for _, cs := range fam.cells {
sort.Sort(byDescTS(cs)) sort.Sort(byDescTS(cs))
count += len(cs)
} }
} }
return true return count > 0
case *btpb.RowFilter_CellsPerColumnLimitFilter: case *btpb.RowFilter_CellsPerColumnLimitFilter:
lim := int(f.CellsPerColumnLimitFilter) lim := int(f.CellsPerColumnLimitFilter)
for _, fam := range r.families { for _, fam := range r.families {
@@ -690,10 +692,8 @@ func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutate
nr := r.copy() nr := r.copy()
filterRow(req.PredicateFilter, nr) filterRow(req.PredicateFilter, nr)
whichMut = !nr.isEmpty() whichMut = !nr.isEmpty()
// TODO(dsymonds): Figure out if this is supposed to be set
// even when there's no predicate filter.
res.PredicateMatched = whichMut
} }
res.PredicateMatched = whichMut
muts := req.FalseMutations muts := req.FalseMutations
if whichMut { if whichMut {
muts = req.TrueMutations muts = req.TrueMutations
@@ -907,7 +907,8 @@ func (s *server) ReadModifyWriteRow(ctx context.Context, req *btpb.ReadModifyWri
f.Columns = append(f.Columns, &btpb.Column{ f.Columns = append(f.Columns, &btpb.Column{
Qualifier: []byte(qual), Qualifier: []byte(qual),
Cells: []*btpb.Cell{{ Cells: []*btpb.Cell{{
Value: cell.value, TimestampMicros: cell.ts,
Value: cell.value,
}}, }},
}) })
} }

View File

@@ -17,6 +17,7 @@ package bttest
import ( import (
"fmt" "fmt"
"math/rand" "math/rand"
"strconv"
"sync" "sync"
"sync/atomic" "sync/atomic"
"testing" "testing"
@@ -26,7 +27,6 @@ import (
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
btpb "google.golang.org/genproto/googleapis/bigtable/v2" btpb "google.golang.org/genproto/googleapis/bigtable/v2"
"google.golang.org/grpc" "google.golang.org/grpc"
"strconv"
) )
func TestConcurrentMutationsReadModifyAndGC(t *testing.T) { func TestConcurrentMutationsReadModifyAndGC(t *testing.T) {
@@ -430,7 +430,7 @@ func TestReadRowsOrder(t *testing.T) {
t.Fatal("Response count: got 0, want > 0") t.Fatal("Response count: got 0, want > 0")
} }
if len(mock.responses[0].Chunks) != 27 { if len(mock.responses[0].Chunks) != 27 {
t.Fatal("Chunk count: got %d, want 27", len(mock.responses[0].Chunks)) t.Fatalf("Chunk count: got %d, want 27", len(mock.responses[0].Chunks))
} }
testOrder := func(ms *MockReadRowsServer) { testOrder := func(ms *MockReadRowsServer) {
var prevFam, prevCol string var prevFam, prevCol string
@@ -480,7 +480,7 @@ func TestReadRowsOrder(t *testing.T) {
t.Fatal("Response count: got 0, want > 0") t.Fatal("Response count: got 0, want > 0")
} }
if len(mock.responses[0].Chunks) != 18 { if len(mock.responses[0].Chunks) != 18 {
t.Fatal("Chunk count: got %d, want 18", len(mock.responses[0].Chunks)) t.Fatalf("Chunk count: got %d, want 18", len(mock.responses[0].Chunks))
} }
testOrder(mock) testOrder(mock)
@@ -511,7 +511,61 @@ func TestReadRowsOrder(t *testing.T) {
t.Fatal("Response count: got 0, want > 0") t.Fatal("Response count: got 0, want > 0")
} }
if len(mock.responses[0].Chunks) != 30 { if len(mock.responses[0].Chunks) != 30 {
t.Fatal("Chunk count: got %d, want 30", len(mock.responses[0].Chunks)) t.Fatalf("Chunk count: got %d, want 30", len(mock.responses[0].Chunks))
} }
testOrder(mock) testOrder(mock)
} }
func TestCheckAndMutateRowWithoutPredicate(t *testing.T) {
s := &server{
tables: make(map[string]*table),
}
ctx := context.Background()
newTbl := btapb.Table{
ColumnFamilies: map[string]*btapb.ColumnFamily{
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}},
},
}
tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
if err != nil {
t.Fatalf("Creating table: %v", err)
}
// Populate the table
val := []byte("value")
mrreq := &btpb.MutateRowRequest{
TableName: tbl.Name,
RowKey: []byte("row-present"),
Mutations: []*btpb.Mutation{{
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
FamilyName: "cf",
ColumnQualifier: []byte("col"),
TimestampMicros: 0,
Value: val,
}},
}},
}
if _, err := s.MutateRow(ctx, mrreq); err != nil {
t.Fatalf("Populating table: %v", err)
}
req := &btpb.CheckAndMutateRowRequest{
TableName: tbl.Name,
RowKey: []byte("row-not-present"),
}
if res, err := s.CheckAndMutateRow(ctx, req); err != nil {
t.Errorf("CheckAndMutateRow error: %v", err)
} else if got, want := res.PredicateMatched, false; got != want {
t.Errorf("Invalid PredicateMatched value: got %t, want %t", got, want)
}
req = &btpb.CheckAndMutateRowRequest{
TableName: tbl.Name,
RowKey: []byte("row-present"),
}
if res, err := s.CheckAndMutateRow(ctx, req); err != nil {
t.Errorf("CheckAndMutateRow error: %v", err)
} else if got, want := res.PredicateMatched, true; got != want {
t.Errorf("Invalid PredicateMatched value: got %t, want %t", got, want)
}
}

View File

@@ -216,6 +216,13 @@ var commands = []struct {
"into multiple tablets. Can be repeated to create multiple splits.", "into multiple tablets. Can be repeated to create multiple splits.",
Required: cbtconfig.ProjectAndInstanceRequired, Required: cbtconfig.ProjectAndInstanceRequired,
}, },
{
Name: "deletecolumn",
Desc: "Delete all cells in a column",
do: doDeleteColumn,
Usage: "cbt deletecolumn <table> <row> <family> <column>",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{ {
Name: "deletefamily", Name: "deletefamily",
Desc: "Delete a column family", Desc: "Delete a column family",
@@ -284,10 +291,12 @@ var commands = []struct {
Name: "read", Name: "read",
Desc: "Read rows", Desc: "Read rows",
do: doRead, do: doRead,
Usage: "cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [count=<n>]\n" + Usage: "cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>]" +
" [regex=<regex>] [count=<n>]\n" +
" start=<row> Start reading at this row\n" + " start=<row> Start reading at this row\n" +
" end=<row> Stop reading before this row\n" + " end=<row> Stop reading before this row\n" +
" prefix=<prefix> Read rows with this prefix\n" + " prefix=<prefix> Read rows with this prefix\n" +
" regex=<regex> Read rows with keys matching this regex\n" +
" count=<n> Read only this many rows\n", " count=<n> Read only this many rows\n",
Required: cbtconfig.ProjectAndInstanceRequired, Required: cbtconfig.ProjectAndInstanceRequired,
}, },
@@ -365,6 +374,18 @@ func doCreateTable(ctx context.Context, args ...string) {
} }
} }
func doDeleteColumn(ctx context.Context, args ...string) {
if len(args) != 4 {
log.Fatal("usage: cbt deletecolumn <table> <row> <family> <column>")
}
tbl := getClient().Open(args[0])
mut := bigtable.NewMutation()
mut.DeleteCellsInColumn(args[2], args[3])
if err := tbl.Apply(ctx, args[1], mut); err != nil {
log.Fatalf("Deleting cells in column: %v", err)
}
}
func doDeleteFamily(ctx context.Context, args ...string) { func doDeleteFamily(ctx context.Context, args ...string) {
if len(args) != 2 { if len(args) != 2 {
log.Fatal("usage: cbt deletefamily <table> <family>") log.Fatal("usage: cbt deletefamily <table> <family>")
@@ -474,7 +495,9 @@ var docTemplate = template.Must(template.New("doc").Funcs(template.FuncMap{
//go:generate go run cbt.go -o cbtdoc.go doc //go:generate go run cbt.go -o cbtdoc.go doc
/* /*
Cbt is a tool for doing basic interactions with Cloud Bigtable. Cbt is a tool for doing basic interactions with Cloud Bigtable. To learn how to
install the cbt tool, see the
[cbt overview](https://cloud.google.com/bigtable/docs/go/cbt-overview).
Usage: Usage:
@@ -672,7 +695,7 @@ func doRead(ctx context.Context, args ...string) {
case "limit": case "limit":
// Be nicer; we used to support this, but renamed it to "end". // Be nicer; we used to support this, but renamed it to "end".
log.Fatalf("Unknown arg key %q; did you mean %q?", key, "end") log.Fatalf("Unknown arg key %q; did you mean %q?", key, "end")
case "start", "end", "prefix", "count": case "start", "end", "prefix", "count", "regex":
parsed[key] = val parsed[key] = val
} }
} }
@@ -698,6 +721,9 @@ func doRead(ctx context.Context, args ...string) {
} }
opts = append(opts, bigtable.LimitRows(n)) opts = append(opts, bigtable.LimitRows(n))
} }
if regex := parsed["regex"]; regex != "" {
opts = append(opts, bigtable.RowFilter(bigtable.RowKeyFilter(regex)))
}
// TODO(dsymonds): Support filters. // TODO(dsymonds): Support filters.
err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool { err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool {

View File

@@ -17,7 +17,9 @@
//go:generate go run cbt.go -o cbtdoc.go doc //go:generate go run cbt.go -o cbtdoc.go doc
/* /*
Cbt is a tool for doing basic interactions with Cloud Bigtable. Cbt is a tool for doing basic interactions with Cloud Bigtable. To learn how to
install the cbt tool, see the
[cbt overview](https://cloud.google.com/bigtable/docs/go/cbt-overview).
Usage: Usage:
@@ -28,6 +30,7 @@ The commands are:
count Count rows in a table count Count rows in a table
createfamily Create a column family createfamily Create a column family
createtable Create a table createtable Create a table
deletecolumn Delete all cells in a column
deletefamily Delete a column family deletefamily Delete a column family
deleterow Delete a row deleterow Delete a row
deletetable Delete a table deletetable Delete a table
@@ -40,13 +43,14 @@ The commands are:
read Read rows read Read rows
set Set value of a cell set Set value of a cell
setgcpolicy Set the GC policy for a column family setgcpolicy Set the GC policy for a column family
version Print the current cbt version
Use "cbt help <command>" for more information about a command. Use "cbt help <command>" for more information about a command.
The options are: The options are:
-project string -project string
project ID project ID, if unset uses gcloud configured project
-instance string -instance string
Cloud Bigtable instance Cloud Bigtable instance
-creds string -creds string
@@ -72,7 +76,16 @@ Usage:
Create a table Create a table
Usage: Usage:
cbt createtable <table> cbt createtable <table> [initial_splits...]
initial_splits=row A row key to be used to initially split the table into multiple tablets. Can be repeated to create multiple splits.
Delete all cells in a column
Usage:
cbt deletecolumn <table> <row> <family> <column>
@@ -153,10 +166,11 @@ Usage:
Read rows Read rows
Usage: Usage:
cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [count=<n>] cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [regex=<regex>] [count=<n>]
start=<row> Start reading at this row start=<row> Start reading at this row
end=<row> Stop reading before this row end=<row> Stop reading before this row
prefix=<prefix> Read rows with this prefix prefix=<prefix> Read rows with this prefix
regex=<regex> Read rows with keys matching this regex
count=<n> Read only this many rows count=<n> Read only this many rows
@@ -187,5 +201,13 @@ Usage:
Print the current cbt version
Usage:
cbt version
*/ */
package main package main

View File

@@ -136,7 +136,7 @@ func (e *EmulatedEnv) Config() IntegrationTestConfig {
func (e *EmulatedEnv) NewAdminClient() (*AdminClient, error) { func (e *EmulatedEnv) NewAdminClient() (*AdminClient, error) {
timeout := 20 * time.Second timeout := 20 * time.Second
ctx, _ := context.WithTimeout(context.Background(), timeout) ctx, _ := context.WithTimeout(context.Background(), timeout)
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure()) conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure(), grpc.WithBlock())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -152,7 +152,8 @@ func (e *EmulatedEnv) NewInstanceAdminClient() (*InstanceAdminClient, error) {
func (e *EmulatedEnv) NewClient() (*Client, error) { func (e *EmulatedEnv) NewClient() (*Client, error) {
timeout := 20 * time.Second timeout := 20 * time.Second
ctx, _ := context.WithTimeout(context.Background(), timeout) ctx, _ := context.WithTimeout(context.Background(), timeout)
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure(), grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20))) conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure(), grpc.WithBlock(),
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20)))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@@ -181,7 +181,7 @@ type timestampRangeFilter struct {
} }
func (trf timestampRangeFilter) String() string { func (trf timestampRangeFilter) String() string {
return fmt.Sprintf("timestamp_range(%s,%s)", trf.startTime, trf.endTime) return fmt.Sprintf("timestamp_range(%v,%v)", trf.startTime, trf.endTime)
} }
func (trf timestampRangeFilter) proto() *btpb.RowFilter { func (trf timestampRangeFilter) proto() *btpb.RowFilter {

View File

@@ -135,24 +135,24 @@ func GCRuleToString(rule *bttdpb.GcRule) string {
if rule == nil { if rule == nil {
return "<default>" return "<default>"
} }
var ruleStr string switch r := rule.Rule.(type) {
if r, ok := rule.Rule.(*bttdpb.GcRule_MaxNumVersions); ok { case *bttdpb.GcRule_MaxNumVersions:
ruleStr += MaxVersionsPolicy(int(r.MaxNumVersions)).String() return MaxVersionsPolicy(int(r.MaxNumVersions)).String()
} else if r, ok := rule.Rule.(*bttdpb.GcRule_MaxAge); ok { case *bttdpb.GcRule_MaxAge:
ruleStr += MaxAgePolicy(time.Duration(r.MaxAge.Seconds) * time.Second).String() return MaxAgePolicy(time.Duration(r.MaxAge.Seconds) * time.Second).String()
} else if r, ok := rule.Rule.(*bttdpb.GcRule_Intersection_); ok { case *bttdpb.GcRule_Intersection_:
var chunks []string return joinRules(r.Intersection.Rules, " && ")
for _, intRule := range r.Intersection.Rules { case *bttdpb.GcRule_Union_:
chunks = append(chunks, GCRuleToString(intRule)) return joinRules(r.Union.Rules, " || ")
} default:
ruleStr += "(" + strings.Join(chunks, " && ") + ")" return ""
} else if r, ok := rule.Rule.(*bttdpb.GcRule_Union_); ok {
var chunks []string
for _, unionRule := range r.Union.Rules {
chunks = append(chunks, GCRuleToString(unionRule))
}
ruleStr += "(" + strings.Join(chunks, " || ") + ")"
} }
}
return ruleStr
func joinRules(rules []*bttdpb.GcRule, sep string) string {
var chunks []string
for _, r := range rules {
chunks = append(chunks, GCRuleToString(r))
}
return "(" + strings.Join(chunks, sep) + ")"
} }

View File

@@ -20,10 +20,11 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"reflect"
"strings" "strings"
"testing" "testing"
"cloud.google.com/go/internal/testutil"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/wrappers" "github.com/golang/protobuf/ptypes/wrappers"
btspb "google.golang.org/genproto/googleapis/bigtable/v2" btspb "google.golang.org/genproto/googleapis/bigtable/v2"
@@ -48,7 +49,7 @@ func TestSingleCell(t *testing.T) {
t.Fatalf("Family name length mismatch %d, %d", 1, len(row["fm"])) t.Fatalf("Family name length mismatch %d, %d", 1, len(row["fm"]))
} }
want := []ReadItem{ri("rk", "fm", "col", 1, "value")} want := []ReadItem{ri("rk", "fm", "col", 1, "value")}
if !reflect.DeepEqual(row["fm"], want) { if !testutil.Equal(row["fm"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm"], want) t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm"], want)
} }
if err := cr.Close(); err != nil { if err := cr.Close(); err != nil {
@@ -76,14 +77,14 @@ func TestMultipleCells(t *testing.T) {
ri("rs", "fm1", "col1", 1, "val2"), ri("rs", "fm1", "col1", 1, "val2"),
ri("rs", "fm1", "col2", 0, "val3"), ri("rs", "fm1", "col2", 0, "val3"),
} }
if !reflect.DeepEqual(row["fm1"], want) { if !testutil.Equal(row["fm1"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
} }
want = []ReadItem{ want = []ReadItem{
ri("rs", "fm2", "col1", 0, "val4"), ri("rs", "fm2", "col1", 0, "val4"),
ri("rs", "fm2", "col2", 1, "extralongval5"), ri("rs", "fm2", "col2", 1, "extralongval5"),
} }
if !reflect.DeepEqual(row["fm2"], want) { if !testutil.Equal(row["fm2"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want) t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want)
} }
if err := cr.Close(); err != nil { if err := cr.Close(); err != nil {
@@ -108,7 +109,7 @@ func TestSplitCells(t *testing.T) {
ri("rs", "fm1", "col1", 0, "hello world"), ri("rs", "fm1", "col1", 0, "hello world"),
ri("rs", "fm1", "col2", 0, "val2"), ri("rs", "fm1", "col2", 0, "val2"),
} }
if !reflect.DeepEqual(row["fm1"], want) { if !testutil.Equal(row["fm1"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
} }
if err := cr.Close(); err != nil { if err := cr.Close(); err != nil {
@@ -124,7 +125,7 @@ func TestMultipleRows(t *testing.T) {
t.Fatalf("Processing chunk: %v", err) t.Fatalf("Processing chunk: %v", err)
} }
want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")} want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")}
if !reflect.DeepEqual(row["fm1"], want) { if !testutil.Equal(row["fm1"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
} }
@@ -133,7 +134,7 @@ func TestMultipleRows(t *testing.T) {
t.Fatalf("Processing chunk: %v", err) t.Fatalf("Processing chunk: %v", err)
} }
want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")} want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")}
if !reflect.DeepEqual(row["fm2"], want) { if !testutil.Equal(row["fm2"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want) t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want)
} }
@@ -150,7 +151,7 @@ func TestBlankQualifier(t *testing.T) {
t.Fatalf("Processing chunk: %v", err) t.Fatalf("Processing chunk: %v", err)
} }
want := []ReadItem{ri("rs1", "fm1", "", 1, "val1")} want := []ReadItem{ri("rs1", "fm1", "", 1, "val1")}
if !reflect.DeepEqual(row["fm1"], want) { if !testutil.Equal(row["fm1"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
} }
@@ -159,7 +160,7 @@ func TestBlankQualifier(t *testing.T) {
t.Fatalf("Processing chunk: %v", err) t.Fatalf("Processing chunk: %v", err)
} }
want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")} want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")}
if !reflect.DeepEqual(row["fm2"], want) { if !testutil.Equal(row["fm2"], want) {
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want) t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want)
} }
@@ -177,7 +178,7 @@ func TestReset(t *testing.T) {
cr.Process(ccReset()) cr.Process(ccReset())
row, _ := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true)) row, _ := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true))
want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")} want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")}
if !reflect.DeepEqual(row["fm1"], want) { if !testutil.Equal(row["fm1"], want) {
t.Fatalf("Reset: got: %v\nwant: %v\n", row["fm1"], want) t.Fatalf("Reset: got: %v\nwant: %v\n", row["fm1"], want)
} }
if err := cr.Close(); err != nil { if err := cr.Close(); err != nil {
@@ -279,7 +280,7 @@ func runTestCase(t *testing.T, test TestCase) {
got := toSet(results) got := toSet(results)
want := toSet(test.Results) want := toSet(test.Results)
if !reflect.DeepEqual(got, want) { if !testutil.Equal(got, want) {
t.Fatalf("[%s]: got: %v\nwant: %v\n", test.Name, got, want) t.Fatalf("[%s]: got: %v\nwant: %v\n", test.Name, got, want)
} }
} }

View File

@@ -16,13 +16,14 @@ limitations under the License.
package bigtable package bigtable
import ( import (
"reflect"
"strings" "strings"
"testing" "testing"
"time" "time"
"cloud.google.com/go/bigtable/bttest" "cloud.google.com/go/bigtable/bttest"
"cloud.google.com/go/internal/testutil"
"github.com/golang/protobuf/ptypes/wrappers" "github.com/golang/protobuf/ptypes/wrappers"
"github.com/google/go-cmp/cmp"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/api/option" "google.golang.org/api/option"
btpb "google.golang.org/genproto/googleapis/bigtable/v2" btpb "google.golang.org/genproto/googleapis/bigtable/v2"
@@ -36,7 +37,7 @@ func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure(), grpc.WithBlock())
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@@ -80,10 +81,10 @@ func TestRetryApply(t *testing.T) {
return handler(ctx, req) return handler(ctx, req)
} }
tbl, cleanup, err := setupFakeServer(grpc.UnaryInterceptor(errInjector)) tbl, cleanup, err := setupFakeServer(grpc.UnaryInterceptor(errInjector))
defer cleanup()
if err != nil { if err != nil {
t.Fatalf("fake server setup: %v", err) t.Fatalf("fake server setup: %v", err)
} }
defer cleanup()
mut := NewMutation() mut := NewMutation()
mut.Set("cf", "col", 1, []byte("val")) mut.Set("cf", "col", 1, []byte("val"))
@@ -231,13 +232,13 @@ func TestRetryApplyBulk(t *testing.T) {
} }
errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2"}, []*Mutation{m1, niMut}) errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2"}, []*Mutation{m1, niMut})
if err != nil { if err != nil {
t.Errorf("unretryable errors: request failed %v") t.Errorf("unretryable errors: request failed %v", err)
} }
want := []error{ want := []error{
grpc.Errorf(codes.FailedPrecondition, ""), grpc.Errorf(codes.FailedPrecondition, ""),
grpc.Errorf(codes.Aborted, ""), grpc.Errorf(codes.Aborted, ""),
} }
if !reflect.DeepEqual(want, errors) { if !testutil.Equal(want, errors) {
t.Errorf("unretryable errors: got: %v, want: %v", errors, want) t.Errorf("unretryable errors: got: %v, want: %v", errors, want)
} }
@@ -273,7 +274,7 @@ func TestRetainRowsAfter(t *testing.T) {
prevRowKey := "m" prevRowKey := "m"
want := NewRange("m\x00", "z") want := NewRange("m\x00", "z")
got := prevRowRange.retainRowsAfter(prevRowKey) got := prevRowRange.retainRowsAfter(prevRowKey)
if !reflect.DeepEqual(want, got) { if !testutil.Equal(want, got, cmp.AllowUnexported(RowRange{})) {
t.Errorf("range retry: got %v, want %v", got, want) t.Errorf("range retry: got %v, want %v", got, want)
} }
@@ -281,7 +282,7 @@ func TestRetainRowsAfter(t *testing.T) {
prevRowKey = "f" prevRowKey = "f"
wantRowRangeList := RowRangeList{NewRange("f\x00", "g"), NewRange("h", "l")} wantRowRangeList := RowRangeList{NewRange("f\x00", "g"), NewRange("h", "l")}
got = prevRowRangeList.retainRowsAfter(prevRowKey) got = prevRowRangeList.retainRowsAfter(prevRowKey)
if !reflect.DeepEqual(wantRowRangeList, got) { if !testutil.Equal(wantRowRangeList, got, cmp.AllowUnexported(RowRange{})) {
t.Errorf("range list retry: got %v, want %v", got, wantRowRangeList) t.Errorf("range list retry: got %v, want %v", got, wantRowRangeList)
} }
@@ -289,7 +290,7 @@ func TestRetainRowsAfter(t *testing.T) {
prevRowKey = "b" prevRowKey = "b"
wantList := RowList{"c", "d", "e", "f"} wantList := RowList{"c", "d", "e", "f"}
got = prevRowList.retainRowsAfter(prevRowKey) got = prevRowList.retainRowsAfter(prevRowKey)
if !reflect.DeepEqual(wantList, got) { if !testutil.Equal(wantList, got) {
t.Errorf("list retry: got %v, want %v", got, wantList) t.Errorf("list retry: got %v, want %v", got, wantList)
} }
} }
@@ -351,7 +352,7 @@ func TestRetryReadRows(t *testing.T) {
return true return true
}) })
want := []string{"a", "b", "c", "d"} want := []string{"a", "b", "c", "d"}
if !reflect.DeepEqual(got, want) { if !testutil.Equal(got, want) {
t.Errorf("retry range integration: got %v, want %v", got, want) t.Errorf("retry range integration: got %v, want %v", got, want)
} }
} }

View File

@@ -0,0 +1,674 @@
// Copyright 2017, Google LLC All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package container
import (
"time"
"cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/api/transport"
containerpb "google.golang.org/genproto/googleapis/container/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// ClusterManagerCallOptions contains the retry settings for each method of ClusterManagerClient.
type ClusterManagerCallOptions struct {
ListClusters []gax.CallOption
GetCluster []gax.CallOption
CreateCluster []gax.CallOption
UpdateCluster []gax.CallOption
UpdateNodePool []gax.CallOption
SetNodePoolAutoscaling []gax.CallOption
SetLoggingService []gax.CallOption
SetMonitoringService []gax.CallOption
SetAddonsConfig []gax.CallOption
SetLocations []gax.CallOption
UpdateMaster []gax.CallOption
SetMasterAuth []gax.CallOption
DeleteCluster []gax.CallOption
ListOperations []gax.CallOption
GetOperation []gax.CallOption
CancelOperation []gax.CallOption
GetServerConfig []gax.CallOption
ListNodePools []gax.CallOption
GetNodePool []gax.CallOption
CreateNodePool []gax.CallOption
DeleteNodePool []gax.CallOption
RollbackNodePoolUpgrade []gax.CallOption
SetNodePoolManagement []gax.CallOption
SetLabels []gax.CallOption
SetLegacyAbac []gax.CallOption
StartIPRotation []gax.CallOption
CompleteIPRotation []gax.CallOption
SetNodePoolSize []gax.CallOption
SetNetworkPolicy []gax.CallOption
SetMaintenancePolicy []gax.CallOption
}
func defaultClusterManagerClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("container.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultClusterManagerCallOptions() *ClusterManagerCallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &ClusterManagerCallOptions{
ListClusters: retry[[2]string{"default", "idempotent"}],
GetCluster: retry[[2]string{"default", "idempotent"}],
CreateCluster: retry[[2]string{"default", "non_idempotent"}],
UpdateCluster: retry[[2]string{"default", "non_idempotent"}],
UpdateNodePool: retry[[2]string{"default", "non_idempotent"}],
SetNodePoolAutoscaling: retry[[2]string{"default", "non_idempotent"}],
SetLoggingService: retry[[2]string{"default", "non_idempotent"}],
SetMonitoringService: retry[[2]string{"default", "non_idempotent"}],
SetAddonsConfig: retry[[2]string{"default", "non_idempotent"}],
SetLocations: retry[[2]string{"default", "non_idempotent"}],
UpdateMaster: retry[[2]string{"default", "non_idempotent"}],
SetMasterAuth: retry[[2]string{"default", "non_idempotent"}],
DeleteCluster: retry[[2]string{"default", "idempotent"}],
ListOperations: retry[[2]string{"default", "idempotent"}],
GetOperation: retry[[2]string{"default", "idempotent"}],
CancelOperation: retry[[2]string{"default", "non_idempotent"}],
GetServerConfig: retry[[2]string{"default", "idempotent"}],
ListNodePools: retry[[2]string{"default", "idempotent"}],
GetNodePool: retry[[2]string{"default", "idempotent"}],
CreateNodePool: retry[[2]string{"default", "non_idempotent"}],
DeleteNodePool: retry[[2]string{"default", "idempotent"}],
RollbackNodePoolUpgrade: retry[[2]string{"default", "non_idempotent"}],
SetNodePoolManagement: retry[[2]string{"default", "non_idempotent"}],
SetLabels: retry[[2]string{"default", "non_idempotent"}],
SetLegacyAbac: retry[[2]string{"default", "non_idempotent"}],
StartIPRotation: retry[[2]string{"default", "non_idempotent"}],
CompleteIPRotation: retry[[2]string{"default", "non_idempotent"}],
SetNodePoolSize: retry[[2]string{"default", "non_idempotent"}],
SetNetworkPolicy: retry[[2]string{"default", "non_idempotent"}],
SetMaintenancePolicy: retry[[2]string{"default", "non_idempotent"}],
}
}
// ClusterManagerClient is a client for interacting with Google Container Engine API.
type ClusterManagerClient struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
clusterManagerClient containerpb.ClusterManagerClient
// The call options for this service.
CallOptions *ClusterManagerCallOptions
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewClusterManagerClient creates a new cluster manager client.
//
// Google Container Engine Cluster Manager v1
func NewClusterManagerClient(ctx context.Context, opts ...option.ClientOption) (*ClusterManagerClient, error) {
conn, err := transport.DialGRPC(ctx, append(defaultClusterManagerClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &ClusterManagerClient{
conn: conn,
CallOptions: defaultClusterManagerCallOptions(),
clusterManagerClient: containerpb.NewClusterManagerClient(conn),
}
c.setGoogleClientInfo()
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *ClusterManagerClient) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *ClusterManagerClient) Close() error {
return c.conn.Close()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *ClusterManagerClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// ListClusters lists all clusters owned by a project in either the specified zone or all
// zones.
func (c *ClusterManagerClient) ListClusters(ctx context.Context, req *containerpb.ListClustersRequest, opts ...gax.CallOption) (*containerpb.ListClustersResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListClusters[0:len(c.CallOptions.ListClusters):len(c.CallOptions.ListClusters)], opts...)
var resp *containerpb.ListClustersResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.ListClusters(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// GetCluster gets the details of a specific cluster.
func (c *ClusterManagerClient) GetCluster(ctx context.Context, req *containerpb.GetClusterRequest, opts ...gax.CallOption) (*containerpb.Cluster, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetCluster[0:len(c.CallOptions.GetCluster):len(c.CallOptions.GetCluster)], opts...)
var resp *containerpb.Cluster
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.GetCluster(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// CreateCluster creates a cluster, consisting of the specified number and type of Google
// Compute Engine instances.
//
// By default, the cluster is created in the project's
// default network (at /compute/docs/networks-and-firewalls#networks).
//
// One firewall is added for the cluster. After cluster creation,
// the cluster creates routes for each node to allow the containers
// on that node to communicate with all other instances in the
// cluster.
//
// Finally, an entry is added to the project's global metadata indicating
// which CIDR range is being used by the cluster.
func (c *ClusterManagerClient) CreateCluster(ctx context.Context, req *containerpb.CreateClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CreateCluster[0:len(c.CallOptions.CreateCluster):len(c.CallOptions.CreateCluster)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.CreateCluster(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// UpdateCluster updates the settings of a specific cluster.
func (c *ClusterManagerClient) UpdateCluster(ctx context.Context, req *containerpb.UpdateClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.UpdateCluster[0:len(c.CallOptions.UpdateCluster):len(c.CallOptions.UpdateCluster)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.UpdateCluster(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// UpdateNodePool updates the version and/or image type of a specific node pool.
func (c *ClusterManagerClient) UpdateNodePool(ctx context.Context, req *containerpb.UpdateNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.UpdateNodePool[0:len(c.CallOptions.UpdateNodePool):len(c.CallOptions.UpdateNodePool)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.UpdateNodePool(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetNodePoolAutoscaling sets the autoscaling settings of a specific node pool.
func (c *ClusterManagerClient) SetNodePoolAutoscaling(ctx context.Context, req *containerpb.SetNodePoolAutoscalingRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetNodePoolAutoscaling[0:len(c.CallOptions.SetNodePoolAutoscaling):len(c.CallOptions.SetNodePoolAutoscaling)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetNodePoolAutoscaling(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetLoggingService sets the logging service of a specific cluster.
func (c *ClusterManagerClient) SetLoggingService(ctx context.Context, req *containerpb.SetLoggingServiceRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetLoggingService[0:len(c.CallOptions.SetLoggingService):len(c.CallOptions.SetLoggingService)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetLoggingService(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetMonitoringService sets the monitoring service of a specific cluster.
func (c *ClusterManagerClient) SetMonitoringService(ctx context.Context, req *containerpb.SetMonitoringServiceRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetMonitoringService[0:len(c.CallOptions.SetMonitoringService):len(c.CallOptions.SetMonitoringService)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetMonitoringService(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetAddonsConfig sets the addons of a specific cluster.
func (c *ClusterManagerClient) SetAddonsConfig(ctx context.Context, req *containerpb.SetAddonsConfigRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetAddonsConfig[0:len(c.CallOptions.SetAddonsConfig):len(c.CallOptions.SetAddonsConfig)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetAddonsConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetLocations sets the locations of a specific cluster.
func (c *ClusterManagerClient) SetLocations(ctx context.Context, req *containerpb.SetLocationsRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetLocations[0:len(c.CallOptions.SetLocations):len(c.CallOptions.SetLocations)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetLocations(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// UpdateMaster updates the master of a specific cluster.
func (c *ClusterManagerClient) UpdateMaster(ctx context.Context, req *containerpb.UpdateMasterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.UpdateMaster[0:len(c.CallOptions.UpdateMaster):len(c.CallOptions.UpdateMaster)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.UpdateMaster(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetMasterAuth used to set master auth materials. Currently supports :-
// Changing the admin password of a specific cluster.
// This can be either via password generation or explicitly set the password.
func (c *ClusterManagerClient) SetMasterAuth(ctx context.Context, req *containerpb.SetMasterAuthRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetMasterAuth[0:len(c.CallOptions.SetMasterAuth):len(c.CallOptions.SetMasterAuth)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetMasterAuth(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeleteCluster deletes the cluster, including the Kubernetes endpoint and all worker
// nodes.
//
// Firewalls and routes that were configured during cluster creation
// are also deleted.
//
// Other Google Compute Engine resources that might be in use by the cluster
// (e.g. load balancer resources) will not be deleted if they weren't present
// at the initial create time.
func (c *ClusterManagerClient) DeleteCluster(ctx context.Context, req *containerpb.DeleteClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteCluster[0:len(c.CallOptions.DeleteCluster):len(c.CallOptions.DeleteCluster)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.DeleteCluster(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListOperations lists all operations in a project in a specific zone or all zones.
func (c *ClusterManagerClient) ListOperations(ctx context.Context, req *containerpb.ListOperationsRequest, opts ...gax.CallOption) (*containerpb.ListOperationsResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListOperations[0:len(c.CallOptions.ListOperations):len(c.CallOptions.ListOperations)], opts...)
var resp *containerpb.ListOperationsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.ListOperations(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// GetOperation gets the specified operation.
func (c *ClusterManagerClient) GetOperation(ctx context.Context, req *containerpb.GetOperationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetOperation[0:len(c.CallOptions.GetOperation):len(c.CallOptions.GetOperation)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.GetOperation(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// CancelOperation cancels the specified operation.
func (c *ClusterManagerClient) CancelOperation(ctx context.Context, req *containerpb.CancelOperationRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CancelOperation[0:len(c.CallOptions.CancelOperation):len(c.CallOptions.CancelOperation)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.clusterManagerClient.CancelOperation(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// GetServerConfig returns configuration info about the Container Engine service.
func (c *ClusterManagerClient) GetServerConfig(ctx context.Context, req *containerpb.GetServerConfigRequest, opts ...gax.CallOption) (*containerpb.ServerConfig, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetServerConfig[0:len(c.CallOptions.GetServerConfig):len(c.CallOptions.GetServerConfig)], opts...)
var resp *containerpb.ServerConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.GetServerConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListNodePools lists the node pools for a cluster.
func (c *ClusterManagerClient) ListNodePools(ctx context.Context, req *containerpb.ListNodePoolsRequest, opts ...gax.CallOption) (*containerpb.ListNodePoolsResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListNodePools[0:len(c.CallOptions.ListNodePools):len(c.CallOptions.ListNodePools)], opts...)
var resp *containerpb.ListNodePoolsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.ListNodePools(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// GetNodePool retrieves the node pool requested.
func (c *ClusterManagerClient) GetNodePool(ctx context.Context, req *containerpb.GetNodePoolRequest, opts ...gax.CallOption) (*containerpb.NodePool, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetNodePool[0:len(c.CallOptions.GetNodePool):len(c.CallOptions.GetNodePool)], opts...)
var resp *containerpb.NodePool
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.GetNodePool(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// CreateNodePool creates a node pool for a cluster.
func (c *ClusterManagerClient) CreateNodePool(ctx context.Context, req *containerpb.CreateNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CreateNodePool[0:len(c.CallOptions.CreateNodePool):len(c.CallOptions.CreateNodePool)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.CreateNodePool(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeleteNodePool deletes a node pool from a cluster.
func (c *ClusterManagerClient) DeleteNodePool(ctx context.Context, req *containerpb.DeleteNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteNodePool[0:len(c.CallOptions.DeleteNodePool):len(c.CallOptions.DeleteNodePool)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.DeleteNodePool(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// RollbackNodePoolUpgrade roll back the previously Aborted or Failed NodePool upgrade.
// This will be an no-op if the last upgrade successfully completed.
func (c *ClusterManagerClient) RollbackNodePoolUpgrade(ctx context.Context, req *containerpb.RollbackNodePoolUpgradeRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.RollbackNodePoolUpgrade[0:len(c.CallOptions.RollbackNodePoolUpgrade):len(c.CallOptions.RollbackNodePoolUpgrade)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.RollbackNodePoolUpgrade(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetNodePoolManagement sets the NodeManagement options for a node pool.
func (c *ClusterManagerClient) SetNodePoolManagement(ctx context.Context, req *containerpb.SetNodePoolManagementRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetNodePoolManagement[0:len(c.CallOptions.SetNodePoolManagement):len(c.CallOptions.SetNodePoolManagement)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetNodePoolManagement(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetLabels sets labels on a cluster.
func (c *ClusterManagerClient) SetLabels(ctx context.Context, req *containerpb.SetLabelsRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetLabels[0:len(c.CallOptions.SetLabels):len(c.CallOptions.SetLabels)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetLabels(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetLegacyAbac enables or disables the ABAC authorization mechanism on a cluster.
func (c *ClusterManagerClient) SetLegacyAbac(ctx context.Context, req *containerpb.SetLegacyAbacRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetLegacyAbac[0:len(c.CallOptions.SetLegacyAbac):len(c.CallOptions.SetLegacyAbac)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetLegacyAbac(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// StartIPRotation start master IP rotation.
func (c *ClusterManagerClient) StartIPRotation(ctx context.Context, req *containerpb.StartIPRotationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.StartIPRotation[0:len(c.CallOptions.StartIPRotation):len(c.CallOptions.StartIPRotation)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.StartIPRotation(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// CompleteIPRotation completes master IP rotation.
func (c *ClusterManagerClient) CompleteIPRotation(ctx context.Context, req *containerpb.CompleteIPRotationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CompleteIPRotation[0:len(c.CallOptions.CompleteIPRotation):len(c.CallOptions.CompleteIPRotation)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.CompleteIPRotation(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetNodePoolSize sets the size of a specific node pool.
func (c *ClusterManagerClient) SetNodePoolSize(ctx context.Context, req *containerpb.SetNodePoolSizeRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetNodePoolSize[0:len(c.CallOptions.SetNodePoolSize):len(c.CallOptions.SetNodePoolSize)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetNodePoolSize(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetNetworkPolicy enables/Disables Network Policy for a cluster.
func (c *ClusterManagerClient) SetNetworkPolicy(ctx context.Context, req *containerpb.SetNetworkPolicyRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetNetworkPolicy[0:len(c.CallOptions.SetNetworkPolicy):len(c.CallOptions.SetNetworkPolicy)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetNetworkPolicy(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SetMaintenancePolicy sets the maintenance policy for a cluster.
func (c *ClusterManagerClient) SetMaintenancePolicy(ctx context.Context, req *containerpb.SetMaintenancePolicyRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetMaintenancePolicy[0:len(c.CallOptions.SetMaintenancePolicy):len(c.CallOptions.SetMaintenancePolicy)], opts...)
var resp *containerpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.clusterManagerClient.SetMaintenancePolicy(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

View File

@@ -0,0 +1,571 @@
// Copyright 2017, Google LLC All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package container_test
import (
"cloud.google.com/go/container/apiv1"
"golang.org/x/net/context"
containerpb "google.golang.org/genproto/googleapis/container/v1"
)
func ExampleNewClusterManagerClient() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}
func ExampleClusterManagerClient_ListClusters() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.ListClustersRequest{
// TODO: Fill request struct fields.
}
resp, err := c.ListClusters(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_GetCluster() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.GetClusterRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetCluster(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_CreateCluster() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.CreateClusterRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CreateCluster(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_UpdateCluster() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.UpdateClusterRequest{
// TODO: Fill request struct fields.
}
resp, err := c.UpdateCluster(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_UpdateNodePool() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.UpdateNodePoolRequest{
// TODO: Fill request struct fields.
}
resp, err := c.UpdateNodePool(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetNodePoolAutoscaling() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetNodePoolAutoscalingRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetNodePoolAutoscaling(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetLoggingService() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetLoggingServiceRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetLoggingService(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetMonitoringService() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetMonitoringServiceRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetMonitoringService(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetAddonsConfig() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetAddonsConfigRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetAddonsConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetLocations() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetLocationsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetLocations(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_UpdateMaster() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.UpdateMasterRequest{
// TODO: Fill request struct fields.
}
resp, err := c.UpdateMaster(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetMasterAuth() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetMasterAuthRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetMasterAuth(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_DeleteCluster() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.DeleteClusterRequest{
// TODO: Fill request struct fields.
}
resp, err := c.DeleteCluster(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_ListOperations() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.ListOperationsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.ListOperations(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_GetOperation() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.GetOperationRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetOperation(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_CancelOperation() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.CancelOperationRequest{
// TODO: Fill request struct fields.
}
err = c.CancelOperation(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleClusterManagerClient_GetServerConfig() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.GetServerConfigRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetServerConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_ListNodePools() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.ListNodePoolsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.ListNodePools(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_GetNodePool() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.GetNodePoolRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetNodePool(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_CreateNodePool() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.CreateNodePoolRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CreateNodePool(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_DeleteNodePool() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.DeleteNodePoolRequest{
// TODO: Fill request struct fields.
}
resp, err := c.DeleteNodePool(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_RollbackNodePoolUpgrade() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.RollbackNodePoolUpgradeRequest{
// TODO: Fill request struct fields.
}
resp, err := c.RollbackNodePoolUpgrade(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetNodePoolManagement() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetNodePoolManagementRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetNodePoolManagement(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetLabels() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetLabelsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetLabels(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetLegacyAbac() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetLegacyAbacRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetLegacyAbac(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_StartIPRotation() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.StartIPRotationRequest{
// TODO: Fill request struct fields.
}
resp, err := c.StartIPRotation(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_CompleteIPRotation() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.CompleteIPRotationRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CompleteIPRotation(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetNodePoolSize() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetNodePoolSizeRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetNodePoolSize(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetNetworkPolicy() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetNetworkPolicyRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetNetworkPolicy(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClusterManagerClient_SetMaintenancePolicy() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.SetMaintenancePolicyRequest{
// TODO: Fill request struct fields.
}
resp, err := c.SetMaintenancePolicy(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

48
vendor/cloud.google.com/go/container/apiv1/doc.go generated vendored Normal file
View File

@@ -0,0 +1,48 @@
// Copyright 2017, Google LLC All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
// Package container is an auto-generated package for the
// Google Container Engine API.
//
// NOTE: This package is in alpha. It is not stable, and is likely to change.
//
// The Google Kubernetes Engine API is used for building and managing
// container
// based applications, powered by the open source Kubernetes technology.
package container // import "cloud.google.com/go/container/apiv1"
import (
"golang.org/x/net/context"
"google.golang.org/grpc/metadata"
)
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",
}
}

2912
vendor/cloud.google.com/go/container/apiv1/mock_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -24,7 +24,10 @@ import (
"testing" "testing"
"time" "time"
"cloud.google.com/go/internal/testutil"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/google/go-cmp/cmp"
"golang.org/x/net/context" "golang.org/x/net/context"
pb "google.golang.org/genproto/googleapis/datastore/v1" pb "google.golang.org/genproto/googleapis/datastore/v1"
"google.golang.org/grpc" "google.golang.org/grpc"
@@ -1942,18 +1945,7 @@ func TestRoundTrip(t *testing.T) {
sortPL(*pl) sortPL(*pl)
} }
equal := false if !testutil.Equal(got, tc.want, cmp.AllowUnexported(X0{}, X2{})) {
switch v := got.(type) {
// Round tripping a time.Time can result in a different time.Location: Local instead of UTC.
// We therefore test equality explicitly, instead of relying on reflect.DeepEqual.
case *T:
equal = v.T.Equal(tc.want.(*T).T)
case *SpecialTime:
equal = v.MyTime.Equal(tc.want.(*SpecialTime).MyTime.Time)
default:
equal = reflect.DeepEqual(got, tc.want)
}
if !equal {
t.Errorf("%s: compare:\ngot: %+#v\nwant: %+#v", tc.desc, got, tc.want) t.Errorf("%s: compare:\ngot: %+#v\nwant: %+#v", tc.desc, got, tc.want)
continue continue
} }
@@ -2707,7 +2699,7 @@ func TestLoadSavePLS(t *testing.T) {
t.Errorf("%s: save: %v", tc.desc, err) t.Errorf("%s: save: %v", tc.desc, err)
continue continue
} }
if !reflect.DeepEqual(e, tc.wantSave) { if !testutil.Equal(e, tc.wantSave) {
t.Errorf("%s: save: \ngot: %+v\nwant: %+v", tc.desc, e, tc.wantSave) t.Errorf("%s: save: \ngot: %+v\nwant: %+v", tc.desc, e, tc.wantSave)
continue continue
} }
@@ -2729,7 +2721,7 @@ func TestLoadSavePLS(t *testing.T) {
t.Errorf("%s: load: %v", tc.desc, err) t.Errorf("%s: load: %v", tc.desc, err)
continue continue
} }
if !reflect.DeepEqual(gota, tc.wantLoad) { if !testutil.Equal(gota, tc.wantLoad) {
t.Errorf("%s: load: \ngot: %+v\nwant: %+v", tc.desc, gota, tc.wantLoad) t.Errorf("%s: load: \ngot: %+v\nwant: %+v", tc.desc, gota, tc.wantLoad)
continue continue
} }
@@ -2864,7 +2856,7 @@ func TestQueryConstruction(t *testing.T) {
} }
continue continue
} }
if !reflect.DeepEqual(test.q, test.exp) { if !testutil.Equal(test.q, test.exp, cmp.AllowUnexported(Query{})) {
t.Errorf("%d: mismatch: got %v want %v", i, test.q, test.exp) t.Errorf("%d: mismatch: got %v want %v", i, test.q, test.exp)
} }
} }
@@ -3322,7 +3314,7 @@ func TestKeyLoaderEndToEnd(t *testing.T) {
} }
for i := range dst { for i := range dst {
if !reflect.DeepEqual(dst[i].K, keys[i]) { if !testutil.Equal(dst[i].K, keys[i]) {
t.Fatalf("unexpected entity %d to have key %+v, got %+v", i, keys[i], dst[i].K) t.Fatalf("unexpected entity %d to have key %+v, got %+v", i, keys[i], dst[i].K)
} }
} }

View File

@@ -78,7 +78,7 @@ func TestBasics(t *testing.T) {
if err != nil { if err != nil {
t.Errorf("client.Delete: %v", err) t.Errorf("client.Delete: %v", err)
} }
if !reflect.DeepEqual(x0, x1) { if !testutil.Equal(x0, x1) {
t.Errorf("compare: x0=%v, x1=%v", x0, x1) t.Errorf("compare: x0=%v, x1=%v", x0, x1)
} }
} }
@@ -117,7 +117,7 @@ func TestTopLevelKeyLoaded(t *testing.T) {
} }
// The two keys should be absolutely identical. // The two keys should be absolutely identical.
if !reflect.DeepEqual(e.K, k) { if !testutil.Equal(e.K, k) {
t.Fatalf("e.K not equal to k; got %#v, want %#v", e.K, k) t.Fatalf("e.K not equal to k; got %#v, want %#v", e.K, k)
} }
@@ -142,7 +142,7 @@ func TestListValues(t *testing.T) {
if err := client.Get(ctx, k, &p1); err != nil { if err := client.Get(ctx, k, &p1); err != nil {
t.Errorf("client.Get: %v", err) t.Errorf("client.Get: %v", err)
} }
if !reflect.DeepEqual(p0, p1) { if !testutil.Equal(p0, p1) {
t.Errorf("compare:\np0=%v\np1=%#v", p0, p1) t.Errorf("compare:\np0=%v\np1=%#v", p0, p1)
} }
if err = client.Delete(ctx, k); err != nil { if err = client.Delete(ctx, k); err != nil {
@@ -402,7 +402,7 @@ func TestFilters(t *testing.T) {
if err != nil { if err != nil {
t.Errorf("client.GetAll: %v", err) t.Errorf("client.GetAll: %v", err)
} }
if !reflect.DeepEqual(got, want) { if !testutil.Equal(got, want) {
t.Errorf("compare: got=%v, want=%v", got, want) t.Errorf("compare: got=%v, want=%v", got, want)
} }
}, func() { }, func() {
@@ -421,7 +421,7 @@ func TestFilters(t *testing.T) {
if err != nil { if err != nil {
t.Errorf("client.GetAll: %v", err) t.Errorf("client.GetAll: %v", err)
} }
if !reflect.DeepEqual(got, want) { if !testutil.Equal(got, want) {
t.Errorf("compare: got=%v, want=%v", got, want) t.Errorf("compare: got=%v, want=%v", got, want)
} }
}) })
@@ -730,10 +730,10 @@ func TestGetAllWithFieldMismatch(t *testing.T) {
{X: 22}, {X: 22},
} }
getKeys, err := client.GetAll(ctx, NewQuery("GetAllThing").Ancestor(parent), &got) getKeys, err := client.GetAll(ctx, NewQuery("GetAllThing").Ancestor(parent), &got)
if len(getKeys) != 3 && !reflect.DeepEqual(getKeys, putKeys) { if len(getKeys) != 3 && !testutil.Equal(getKeys, putKeys) {
t.Errorf("client.GetAll: keys differ\ngetKeys=%v\nputKeys=%v", getKeys, putKeys) t.Errorf("client.GetAll: keys differ\ngetKeys=%v\nputKeys=%v", getKeys, putKeys)
} }
if !reflect.DeepEqual(got, want) { if !testutil.Equal(got, want) {
t.Errorf("client.GetAll: entities differ\ngot =%v\nwant=%v", got, want) t.Errorf("client.GetAll: entities differ\ngot =%v\nwant=%v", got, want)
} }
if _, ok := err.(*ErrFieldMismatch); !ok { if _, ok := err.(*ErrFieldMismatch); !ok {
@@ -858,7 +858,7 @@ loop:
got = append(got, dst.I) got = append(got, dst.I)
} }
sort.Ints(got) sort.Ints(got)
if !reflect.DeepEqual(got, tc.want) { if !testutil.Equal(got, tc.want) {
t.Errorf("elems %q: got %+v want %+v", tc.desc, got, tc.want) t.Errorf("elems %q: got %+v want %+v", tc.desc, got, tc.want)
continue continue
} }
@@ -994,7 +994,7 @@ func TestNilPointers(t *testing.T) {
xs := make([]*X, 2) xs := make([]*X, 2)
if err := client.GetMulti(ctx, keys, xs); err != nil { if err := client.GetMulti(ctx, keys, xs); err != nil {
t.Errorf("GetMulti: %v", err) t.Errorf("GetMulti: %v", err)
} else if !reflect.DeepEqual(xs, src) { } else if !testutil.Equal(xs, src) {
t.Errorf("GetMulti fetched %v, want %v", xs, src) t.Errorf("GetMulti fetched %v, want %v", xs, src)
} }

View File

@@ -18,6 +18,8 @@ import (
"reflect" "reflect"
"testing" "testing"
"cloud.google.com/go/internal/testutil"
pb "google.golang.org/genproto/googleapis/datastore/v1" pb "google.golang.org/genproto/googleapis/datastore/v1"
) )
@@ -164,7 +166,7 @@ func TestLoadEntityNestedLegacy(t *testing.T) {
continue continue
} }
if !reflect.DeepEqual(tc.want, dst) { if !testutil.Equal(tc.want, dst) {
t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want) t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want)
} }
} }
@@ -407,7 +409,7 @@ func TestLoadEntityNested(t *testing.T) {
continue continue
} }
if !reflect.DeepEqual(tc.want, dst) { if !testutil.Equal(tc.want, dst) {
t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want) t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want)
} }
} }
@@ -503,7 +505,7 @@ func TestAlreadyPopulatedDst(t *testing.T) {
continue continue
} }
if !reflect.DeepEqual(tc.want, tc.dst) { if !testutil.Equal(tc.want, tc.dst) {
t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, tc.dst, tc.want) t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, tc.dst, tc.want)
} }
} }
@@ -748,7 +750,7 @@ func TestKeyLoader(t *testing.T) {
continue continue
} }
if !reflect.DeepEqual(tc.want, tc.dst) { if !testutil.Equal(tc.want, tc.dst) {
t.Errorf("%s: compare:\ngot: %+v\nwant: %+v", tc.desc, tc.dst, tc.want) t.Errorf("%s: compare:\ngot: %+v\nwant: %+v", tc.desc, tc.dst, tc.want)
} }
} }

View File

@@ -21,7 +21,10 @@ import (
"sort" "sort"
"testing" "testing"
"cloud.google.com/go/internal/testutil"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/google/go-cmp/cmp"
"golang.org/x/net/context" "golang.org/x/net/context"
pb "google.golang.org/genproto/googleapis/datastore/v1" pb "google.golang.org/genproto/googleapis/datastore/v1"
"google.golang.org/grpc" "google.golang.org/grpc"
@@ -334,7 +337,7 @@ func TestSimpleQuery(t *testing.T) {
} }
} }
if !reflect.DeepEqual(tc.dst, tc.want) { if !testutil.Equal(tc.dst, tc.want) {
t.Errorf("dst type %T: Entities\ngot %+v\nwant %+v", tc.dst, tc.dst, tc.want) t.Errorf("dst type %T: Entities\ngot %+v\nwant %+v", tc.dst, tc.dst, tc.want)
continue continue
} }
@@ -357,10 +360,10 @@ func TestQueriesAreImmutable(t *testing.T) {
q0 := NewQuery("foo") q0 := NewQuery("foo")
q1 := NewQuery("foo") q1 := NewQuery("foo")
q2 := q1.Offset(2) q2 := q1.Offset(2)
if !reflect.DeepEqual(q0, q1) { if !testutil.Equal(q0, q1, cmp.AllowUnexported(Query{})) {
t.Errorf("q0 and q1 were not equal") t.Errorf("q0 and q1 were not equal")
} }
if reflect.DeepEqual(q1, q2) { if testutil.Equal(q1, q2, cmp.AllowUnexported(Query{})) {
t.Errorf("q1 and q2 were equal") t.Errorf("q1 and q2 were equal")
} }
@@ -381,10 +384,10 @@ func TestQueriesAreImmutable(t *testing.T) {
q4 := f() q4 := f()
q5 := q4.Order("y") q5 := q4.Order("y")
q6 := q4.Order("z") q6 := q4.Order("z")
if !reflect.DeepEqual(q3, q5) { if !testutil.Equal(q3, q5, cmp.AllowUnexported(Query{})) {
t.Errorf("q3 and q5 were not equal") t.Errorf("q3 and q5 were not equal")
} }
if reflect.DeepEqual(q5, q6) { if testutil.Equal(q5, q6, cmp.AllowUnexported(Query{})) {
t.Errorf("q5 and q6 were equal") t.Errorf("q5 and q6 were equal")
} }
} }

View File

@@ -15,9 +15,10 @@
package datastore package datastore
import ( import (
"reflect"
"testing" "testing"
"cloud.google.com/go/internal/testutil"
pb "google.golang.org/genproto/googleapis/datastore/v1" pb "google.golang.org/genproto/googleapis/datastore/v1"
) )
@@ -187,7 +188,7 @@ func TestSaveEntityNested(t *testing.T) {
continue continue
} }
if !reflect.DeepEqual(tc.want, got) { if !testutil.Equal(tc.want, got) {
t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, got, tc.want) t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, got, tc.want)
} }
} }

View File

@@ -1,4 +1,4 @@
// Copyright 2017, Google Inc. All rights reserved. // Copyright 2017, Google LLC All rights reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -27,6 +27,7 @@ import (
clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
) )
// Controller2CallOptions contains the retry settings for each method of Controller2Client. // Controller2CallOptions contains the retry settings for each method of Controller2Client.
@@ -76,8 +77,8 @@ type Controller2Client struct {
// The call options for this service. // The call options for this service.
CallOptions *Controller2CallOptions CallOptions *Controller2CallOptions
// The metadata to be sent with each request. // The x-goog-* metadata to be sent with each request.
xGoogHeader []string xGoogMetadata metadata.MD
} }
// NewController2Client creates a new controller2 client. // NewController2Client creates a new controller2 client.
@@ -134,7 +135,7 @@ func (c *Controller2Client) Close() error {
func (c *Controller2Client) SetGoogleClientInfo(keyval ...string) { func (c *Controller2Client) SetGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...) kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)} c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
} }
// RegisterDebuggee registers the debuggee with the controller service. // RegisterDebuggee registers the debuggee with the controller service.
@@ -148,7 +149,7 @@ func (c *Controller2Client) SetGoogleClientInfo(keyval ...string) {
// from data loss, or change the debuggee_id format. Agents must handle // from data loss, or change the debuggee_id format. Agents must handle
// debuggee_id value changing upon re-registration. // debuggee_id value changing upon re-registration.
func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest, opts ...gax.CallOption) (*clouddebuggerpb.RegisterDebuggeeResponse, error) { func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest, opts ...gax.CallOption) (*clouddebuggerpb.RegisterDebuggeeResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.RegisterDebuggee[0:len(c.CallOptions.RegisterDebuggee):len(c.CallOptions.RegisterDebuggee)], opts...) opts = append(c.CallOptions.RegisterDebuggee[0:len(c.CallOptions.RegisterDebuggee):len(c.CallOptions.RegisterDebuggee)], opts...)
var resp *clouddebuggerpb.RegisterDebuggeeResponse var resp *clouddebuggerpb.RegisterDebuggeeResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -176,7 +177,7 @@ func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebu
// until the controller removes them from the active list to avoid // until the controller removes them from the active list to avoid
// setting those breakpoints again. // setting those breakpoints again.
func (c *Controller2Client) ListActiveBreakpoints(ctx context.Context, req *clouddebuggerpb.ListActiveBreakpointsRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListActiveBreakpointsResponse, error) { func (c *Controller2Client) ListActiveBreakpoints(ctx context.Context, req *clouddebuggerpb.ListActiveBreakpointsRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListActiveBreakpointsResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListActiveBreakpoints[0:len(c.CallOptions.ListActiveBreakpoints):len(c.CallOptions.ListActiveBreakpoints)], opts...) opts = append(c.CallOptions.ListActiveBreakpoints[0:len(c.CallOptions.ListActiveBreakpoints):len(c.CallOptions.ListActiveBreakpoints)], opts...)
var resp *clouddebuggerpb.ListActiveBreakpointsResponse var resp *clouddebuggerpb.ListActiveBreakpointsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -199,7 +200,7 @@ func (c *Controller2Client) ListActiveBreakpoints(ctx context.Context, req *clou
// semantics. These may only make changes such as canonicalizing a value // semantics. These may only make changes such as canonicalizing a value
// or snapping the location to the correct line of code. // or snapping the location to the correct line of code.
func (c *Controller2Client) UpdateActiveBreakpoint(ctx context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) { func (c *Controller2Client) UpdateActiveBreakpoint(ctx context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.UpdateActiveBreakpoint[0:len(c.CallOptions.UpdateActiveBreakpoint):len(c.CallOptions.UpdateActiveBreakpoint)], opts...) opts = append(c.CallOptions.UpdateActiveBreakpoint[0:len(c.CallOptions.UpdateActiveBreakpoint):len(c.CallOptions.UpdateActiveBreakpoint)], opts...)
var resp *clouddebuggerpb.UpdateActiveBreakpointResponse var resp *clouddebuggerpb.UpdateActiveBreakpointResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {

View File

@@ -1,4 +1,4 @@
// Copyright 2017, Google Inc. All rights reserved. // Copyright 2017, Google LLC All rights reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
// Copyright 2017, Google Inc. All rights reserved. // Copyright 2017, Google LLC All rights reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -27,6 +27,7 @@ import (
clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
) )
// Debugger2CallOptions contains the retry settings for each method of Debugger2Client. // Debugger2CallOptions contains the retry settings for each method of Debugger2Client.
@@ -80,8 +81,8 @@ type Debugger2Client struct {
// The call options for this service. // The call options for this service.
CallOptions *Debugger2CallOptions CallOptions *Debugger2CallOptions
// The metadata to be sent with each request. // The x-goog-* metadata to be sent with each request.
xGoogHeader []string xGoogMetadata metadata.MD
} }
// NewDebugger2Client creates a new debugger2 client. // NewDebugger2Client creates a new debugger2 client.
@@ -130,12 +131,12 @@ func (c *Debugger2Client) Close() error {
func (c *Debugger2Client) SetGoogleClientInfo(keyval ...string) { func (c *Debugger2Client) SetGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...) kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)} c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
} }
// SetBreakpoint sets the breakpoint to the debuggee. // SetBreakpoint sets the breakpoint to the debuggee.
func (c *Debugger2Client) SetBreakpoint(ctx context.Context, req *clouddebuggerpb.SetBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.SetBreakpointResponse, error) { func (c *Debugger2Client) SetBreakpoint(ctx context.Context, req *clouddebuggerpb.SetBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.SetBreakpointResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.SetBreakpoint[0:len(c.CallOptions.SetBreakpoint):len(c.CallOptions.SetBreakpoint)], opts...) opts = append(c.CallOptions.SetBreakpoint[0:len(c.CallOptions.SetBreakpoint):len(c.CallOptions.SetBreakpoint)], opts...)
var resp *clouddebuggerpb.SetBreakpointResponse var resp *clouddebuggerpb.SetBreakpointResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -151,7 +152,7 @@ func (c *Debugger2Client) SetBreakpoint(ctx context.Context, req *clouddebuggerp
// GetBreakpoint gets breakpoint information. // GetBreakpoint gets breakpoint information.
func (c *Debugger2Client) GetBreakpoint(ctx context.Context, req *clouddebuggerpb.GetBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.GetBreakpointResponse, error) { func (c *Debugger2Client) GetBreakpoint(ctx context.Context, req *clouddebuggerpb.GetBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.GetBreakpointResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetBreakpoint[0:len(c.CallOptions.GetBreakpoint):len(c.CallOptions.GetBreakpoint)], opts...) opts = append(c.CallOptions.GetBreakpoint[0:len(c.CallOptions.GetBreakpoint):len(c.CallOptions.GetBreakpoint)], opts...)
var resp *clouddebuggerpb.GetBreakpointResponse var resp *clouddebuggerpb.GetBreakpointResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -167,7 +168,7 @@ func (c *Debugger2Client) GetBreakpoint(ctx context.Context, req *clouddebuggerp
// DeleteBreakpoint deletes the breakpoint from the debuggee. // DeleteBreakpoint deletes the breakpoint from the debuggee.
func (c *Debugger2Client) DeleteBreakpoint(ctx context.Context, req *clouddebuggerpb.DeleteBreakpointRequest, opts ...gax.CallOption) error { func (c *Debugger2Client) DeleteBreakpoint(ctx context.Context, req *clouddebuggerpb.DeleteBreakpointRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteBreakpoint[0:len(c.CallOptions.DeleteBreakpoint):len(c.CallOptions.DeleteBreakpoint)], opts...) opts = append(c.CallOptions.DeleteBreakpoint[0:len(c.CallOptions.DeleteBreakpoint):len(c.CallOptions.DeleteBreakpoint)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error var err error
@@ -179,7 +180,7 @@ func (c *Debugger2Client) DeleteBreakpoint(ctx context.Context, req *clouddebugg
// ListBreakpoints lists all breakpoints for the debuggee. // ListBreakpoints lists all breakpoints for the debuggee.
func (c *Debugger2Client) ListBreakpoints(ctx context.Context, req *clouddebuggerpb.ListBreakpointsRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListBreakpointsResponse, error) { func (c *Debugger2Client) ListBreakpoints(ctx context.Context, req *clouddebuggerpb.ListBreakpointsRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListBreakpointsResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListBreakpoints[0:len(c.CallOptions.ListBreakpoints):len(c.CallOptions.ListBreakpoints)], opts...) opts = append(c.CallOptions.ListBreakpoints[0:len(c.CallOptions.ListBreakpoints):len(c.CallOptions.ListBreakpoints)], opts...)
var resp *clouddebuggerpb.ListBreakpointsResponse var resp *clouddebuggerpb.ListBreakpointsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -195,7 +196,7 @@ func (c *Debugger2Client) ListBreakpoints(ctx context.Context, req *clouddebugge
// ListDebuggees lists all the debuggees that the user has access to. // ListDebuggees lists all the debuggees that the user has access to.
func (c *Debugger2Client) ListDebuggees(ctx context.Context, req *clouddebuggerpb.ListDebuggeesRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListDebuggeesResponse, error) { func (c *Debugger2Client) ListDebuggees(ctx context.Context, req *clouddebuggerpb.ListDebuggeesRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListDebuggeesResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListDebuggees[0:len(c.CallOptions.ListDebuggees):len(c.CallOptions.ListDebuggees)], opts...) opts = append(c.CallOptions.ListDebuggees[0:len(c.CallOptions.ListDebuggees):len(c.CallOptions.ListDebuggees)], opts...)
var resp *clouddebuggerpb.ListDebuggeesResponse var resp *clouddebuggerpb.ListDebuggeesResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {

View File

@@ -1,4 +1,4 @@
// Copyright 2017, Google Inc. All rights reserved. // Copyright 2017, Google LLC All rights reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
// Copyright 2017, Google Inc. All rights reserved. // Copyright 2017, Google LLC All rights reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -14,9 +14,11 @@
// AUTO-GENERATED CODE. DO NOT EDIT. // AUTO-GENERATED CODE. DO NOT EDIT.
// Package debugger is an experimental, auto-generated package for the // Package debugger is an auto-generated package for the
// Stackdriver Debugger API. // Stackdriver Debugger API.
// //
// NOTE: This package is in alpha. It is not stable, and is likely to change.
//
// Examines the call stack and variables of a running application // Examines the call stack and variables of a running application
// without stopping or slowing it down. // without stopping or slowing it down.
// //
@@ -28,11 +30,15 @@ import (
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
) )
func insertXGoog(ctx context.Context, val []string) context.Context { func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
md, _ := metadata.FromOutgoingContext(ctx) out, _ := metadata.FromOutgoingContext(ctx)
md = md.Copy() out = out.Copy()
md["x-goog-api-client"] = val for _, md := range mds {
return metadata.NewOutgoingContext(ctx, md) for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
} }
// DefaultAuthScopes reports the default set of authentication scopes to use with this package. // DefaultAuthScopes reports the default set of authentication scopes to use with this package.

View File

@@ -1,4 +1,4 @@
// Copyright 2017, Google Inc. All rights reserved. // Copyright 2017, Google LLC All rights reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
// Copyright 2017, Google Inc. All rights reserved. // Copyright 2017, Google LLC All rights reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
// Copyright 2017, Google Inc. All rights reserved. // Copyright 2017, Google LLC All rights reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -30,12 +30,15 @@ import (
dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1" dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
) )
// CallOptions contains the retry settings for each method of Client. // CallOptions contains the retry settings for each method of Client.
type CallOptions struct { type CallOptions struct {
InspectContent []gax.CallOption InspectContent []gax.CallOption
RedactContent []gax.CallOption RedactContent []gax.CallOption
DeidentifyContent []gax.CallOption
AnalyzeDataSourceRisk []gax.CallOption
CreateInspectOperation []gax.CallOption CreateInspectOperation []gax.CallOption
ListInspectFindings []gax.CallOption ListInspectFindings []gax.CallOption
ListInfoTypes []gax.CallOption ListInfoTypes []gax.CallOption
@@ -67,6 +70,8 @@ func defaultCallOptions() *CallOptions {
return &CallOptions{ return &CallOptions{
InspectContent: retry[[2]string{"default", "non_idempotent"}], InspectContent: retry[[2]string{"default", "non_idempotent"}],
RedactContent: retry[[2]string{"default", "non_idempotent"}], RedactContent: retry[[2]string{"default", "non_idempotent"}],
DeidentifyContent: retry[[2]string{"default", "idempotent"}],
AnalyzeDataSourceRisk: retry[[2]string{"default", "idempotent"}],
CreateInspectOperation: retry[[2]string{"default", "non_idempotent"}], CreateInspectOperation: retry[[2]string{"default", "non_idempotent"}],
ListInspectFindings: retry[[2]string{"default", "idempotent"}], ListInspectFindings: retry[[2]string{"default", "idempotent"}],
ListInfoTypes: retry[[2]string{"default", "idempotent"}], ListInfoTypes: retry[[2]string{"default", "idempotent"}],
@@ -90,8 +95,8 @@ type Client struct {
// The call options for this service. // The call options for this service.
CallOptions *CallOptions CallOptions *CallOptions
// The metadata to be sent with each request. // The x-goog-* metadata to be sent with each request.
xGoogHeader []string xGoogMetadata metadata.MD
} }
// NewClient creates a new dlp service client. // NewClient creates a new dlp service client.
@@ -145,7 +150,7 @@ func (c *Client) Close() error {
func (c *Client) setGoogleClientInfo(keyval ...string) { func (c *Client) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...) kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)} c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
} }
// ResultPath returns the path for the result resource. // ResultPath returns the path for the result resource.
@@ -159,7 +164,7 @@ func ResultPath(result string) string {
// InspectContent finds potentially sensitive info in a list of strings. // InspectContent finds potentially sensitive info in a list of strings.
// This method has limits on input size, processing time, and output size. // This method has limits on input size, processing time, and output size.
func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest, opts ...gax.CallOption) (*dlppb.InspectContentResponse, error) { func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest, opts ...gax.CallOption) (*dlppb.InspectContentResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.InspectContent[0:len(c.CallOptions.InspectContent):len(c.CallOptions.InspectContent)], opts...) opts = append(c.CallOptions.InspectContent[0:len(c.CallOptions.InspectContent):len(c.CallOptions.InspectContent)], opts...)
var resp *dlppb.InspectContentResponse var resp *dlppb.InspectContentResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -176,7 +181,7 @@ func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRe
// RedactContent redacts potentially sensitive info from a list of strings. // RedactContent redacts potentially sensitive info from a list of strings.
// This method has limits on input size, processing time, and output size. // This method has limits on input size, processing time, and output size.
func (c *Client) RedactContent(ctx context.Context, req *dlppb.RedactContentRequest, opts ...gax.CallOption) (*dlppb.RedactContentResponse, error) { func (c *Client) RedactContent(ctx context.Context, req *dlppb.RedactContentRequest, opts ...gax.CallOption) (*dlppb.RedactContentResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.RedactContent[0:len(c.CallOptions.RedactContent):len(c.CallOptions.RedactContent)], opts...) opts = append(c.CallOptions.RedactContent[0:len(c.CallOptions.RedactContent):len(c.CallOptions.RedactContent)], opts...)
var resp *dlppb.RedactContentResponse var resp *dlppb.RedactContentResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -190,10 +195,46 @@ func (c *Client) RedactContent(ctx context.Context, req *dlppb.RedactContentRequ
return resp, nil return resp, nil
} }
// DeidentifyContent de-identifies potentially sensitive info from a list of strings.
// This method has limits on input size and output size.
func (c *Client) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest, opts ...gax.CallOption) (*dlppb.DeidentifyContentResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeidentifyContent[0:len(c.CallOptions.DeidentifyContent):len(c.CallOptions.DeidentifyContent)], opts...)
var resp *dlppb.DeidentifyContentResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.DeidentifyContent(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// AnalyzeDataSourceRisk schedules a job to compute risk analysis metrics over content in a Google
// Cloud Platform repository.
func (c *Client) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest, opts ...gax.CallOption) (*AnalyzeDataSourceRiskOperation, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.AnalyzeDataSourceRisk[0:len(c.CallOptions.AnalyzeDataSourceRisk):len(c.CallOptions.AnalyzeDataSourceRisk)], opts...)
var resp *longrunningpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.AnalyzeDataSourceRisk(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return &AnalyzeDataSourceRiskOperation{
lro: longrunning.InternalNewOperation(c.LROClient, resp),
}, nil
}
// CreateInspectOperation schedules a job scanning content in a Google Cloud Platform data // CreateInspectOperation schedules a job scanning content in a Google Cloud Platform data
// repository. // repository.
func (c *Client) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest, opts ...gax.CallOption) (*CreateInspectOperationHandle, error) { func (c *Client) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest, opts ...gax.CallOption) (*CreateInspectOperationHandle, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CreateInspectOperation[0:len(c.CallOptions.CreateInspectOperation):len(c.CallOptions.CreateInspectOperation)], opts...) opts = append(c.CallOptions.CreateInspectOperation[0:len(c.CallOptions.CreateInspectOperation):len(c.CallOptions.CreateInspectOperation)], opts...)
var resp *longrunningpb.Operation var resp *longrunningpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -211,7 +252,7 @@ func (c *Client) CreateInspectOperation(ctx context.Context, req *dlppb.CreateIn
// ListInspectFindings returns list of results for given inspect operation result set id. // ListInspectFindings returns list of results for given inspect operation result set id.
func (c *Client) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest, opts ...gax.CallOption) (*dlppb.ListInspectFindingsResponse, error) { func (c *Client) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest, opts ...gax.CallOption) (*dlppb.ListInspectFindingsResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListInspectFindings[0:len(c.CallOptions.ListInspectFindings):len(c.CallOptions.ListInspectFindings)], opts...) opts = append(c.CallOptions.ListInspectFindings[0:len(c.CallOptions.ListInspectFindings):len(c.CallOptions.ListInspectFindings)], opts...)
var resp *dlppb.ListInspectFindingsResponse var resp *dlppb.ListInspectFindingsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -227,7 +268,7 @@ func (c *Client) ListInspectFindings(ctx context.Context, req *dlppb.ListInspect
// ListInfoTypes returns sensitive information types for given category. // ListInfoTypes returns sensitive information types for given category.
func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest, opts ...gax.CallOption) (*dlppb.ListInfoTypesResponse, error) { func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest, opts ...gax.CallOption) (*dlppb.ListInfoTypesResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListInfoTypes[0:len(c.CallOptions.ListInfoTypes):len(c.CallOptions.ListInfoTypes)], opts...) opts = append(c.CallOptions.ListInfoTypes[0:len(c.CallOptions.ListInfoTypes):len(c.CallOptions.ListInfoTypes)], opts...)
var resp *dlppb.ListInfoTypesResponse var resp *dlppb.ListInfoTypesResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -243,7 +284,7 @@ func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequ
// ListRootCategories returns the list of root categories of sensitive information. // ListRootCategories returns the list of root categories of sensitive information.
func (c *Client) ListRootCategories(ctx context.Context, req *dlppb.ListRootCategoriesRequest, opts ...gax.CallOption) (*dlppb.ListRootCategoriesResponse, error) { func (c *Client) ListRootCategories(ctx context.Context, req *dlppb.ListRootCategoriesRequest, opts ...gax.CallOption) (*dlppb.ListRootCategoriesResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListRootCategories[0:len(c.CallOptions.ListRootCategories):len(c.CallOptions.ListRootCategories)], opts...) opts = append(c.CallOptions.ListRootCategories[0:len(c.CallOptions.ListRootCategories):len(c.CallOptions.ListRootCategories)], opts...)
var resp *dlppb.ListRootCategoriesResponse var resp *dlppb.ListRootCategoriesResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -257,6 +298,75 @@ func (c *Client) ListRootCategories(ctx context.Context, req *dlppb.ListRootCate
return resp, nil return resp, nil
} }
// AnalyzeDataSourceRiskOperation manages a long-running operation from AnalyzeDataSourceRisk.
type AnalyzeDataSourceRiskOperation struct {
lro *longrunning.Operation
}
// AnalyzeDataSourceRiskOperation returns a new AnalyzeDataSourceRiskOperation from a given name.
// The name must be that of a previously created AnalyzeDataSourceRiskOperation, possibly from a different process.
func (c *Client) AnalyzeDataSourceRiskOperation(name string) *AnalyzeDataSourceRiskOperation {
return &AnalyzeDataSourceRiskOperation{
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
}
}
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
//
// See documentation of Poll for error-handling information.
func (op *AnalyzeDataSourceRiskOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*dlppb.RiskAnalysisOperationResult, error) {
var resp dlppb.RiskAnalysisOperationResult
if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil {
return nil, err
}
return &resp, nil
}
// Poll fetches the latest state of the long-running operation.
//
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
//
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
// the operation has completed with failure, the error is returned and op.Done will return true.
// If Poll succeeds and the operation has completed successfully,
// op.Done will return true, and the response of the operation is returned.
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
func (op *AnalyzeDataSourceRiskOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*dlppb.RiskAnalysisOperationResult, error) {
var resp dlppb.RiskAnalysisOperationResult
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
return nil, err
}
if !op.Done() {
return nil, nil
}
return &resp, nil
}
// Metadata returns metadata associated with the long-running operation.
// Metadata itself does not contact the server, but Poll does.
// To get the latest metadata, call this method after a successful call to Poll.
// If the metadata is not available, the returned metadata and error are both nil.
func (op *AnalyzeDataSourceRiskOperation) Metadata() (*dlppb.RiskAnalysisOperationMetadata, error) {
var meta dlppb.RiskAnalysisOperationMetadata
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
return nil, nil
} else if err != nil {
return nil, err
}
return &meta, nil
}
// Done reports whether the long-running operation has completed.
func (op *AnalyzeDataSourceRiskOperation) Done() bool {
return op.lro.Done()
}
// Name returns the name of the long-running operation.
// The name is assigned by the server and is unique within the service from which the operation is created.
func (op *AnalyzeDataSourceRiskOperation) Name() string {
return op.lro.Name()
}
// CreateInspectOperationHandle manages a long-running operation from CreateInspectOperation. // CreateInspectOperationHandle manages a long-running operation from CreateInspectOperation.
type CreateInspectOperationHandle struct { type CreateInspectOperationHandle struct {
lro *longrunning.Operation lro *longrunning.Operation

View File

@@ -1,4 +1,4 @@
// Copyright 2017, Google Inc. All rights reserved. // Copyright 2017, Google LLC All rights reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -68,6 +68,47 @@ func ExampleClient_RedactContent() {
_ = resp _ = resp
} }
func ExampleClient_DeidentifyContent() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.DeidentifyContentRequest{
// TODO: Fill request struct fields.
}
resp, err := c.DeidentifyContent(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_AnalyzeDataSourceRisk() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.AnalyzeDataSourceRiskRequest{
// TODO: Fill request struct fields.
}
op, err := c.AnalyzeDataSourceRisk(ctx, req)
if err != nil {
// TODO: Handle error.
}
resp, err := op.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_CreateInspectOperation() { func ExampleClient_CreateInspectOperation() {
ctx := context.Background() ctx := context.Background()
c, err := dlp.NewClient(ctx) c, err := dlp.NewClient(ctx)

View File

@@ -1,4 +1,4 @@
// Copyright 2017, Google Inc. All rights reserved. // Copyright 2017, Google LLC All rights reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -14,9 +14,11 @@
// AUTO-GENERATED CODE. DO NOT EDIT. // AUTO-GENERATED CODE. DO NOT EDIT.
// Package dlp is an experimental, auto-generated package for the // Package dlp is an auto-generated package for the
// DLP API. // DLP API.
// //
// NOTE: This package is in alpha. It is not stable, and is likely to change.
//
// The Google Data Loss Prevention API provides methods for detection of // The Google Data Loss Prevention API provides methods for detection of
// privacy-sensitive fragments in text, images, and Google Cloud Platform // privacy-sensitive fragments in text, images, and Google Cloud Platform
// storage repositories. // storage repositories.
@@ -27,11 +29,15 @@ import (
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
) )
func insertXGoog(ctx context.Context, val []string) context.Context { func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
md, _ := metadata.FromOutgoingContext(ctx) out, _ := metadata.FromOutgoingContext(ctx)
md = md.Copy() out = out.Copy()
md["x-goog-api-client"] = val for _, md := range mds {
return metadata.NewOutgoingContext(ctx, md) for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
} }
// DefaultAuthScopes reports the default set of authentication scopes to use with this package. // DefaultAuthScopes reports the default set of authentication scopes to use with this package.

View File

@@ -1,4 +1,4 @@
// Copyright 2017, Google Inc. All rights reserved. // Copyright 2017, Google LLC All rights reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -85,6 +85,18 @@ func (s *mockDlpServer) RedactContent(ctx context.Context, req *dlppb.RedactCont
return s.resps[0].(*dlppb.RedactContentResponse), nil return s.resps[0].(*dlppb.RedactContentResponse), nil
} }
func (s *mockDlpServer) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest) (*dlppb.DeidentifyContentResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*dlppb.DeidentifyContentResponse), nil
}
func (s *mockDlpServer) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest) (*longrunningpb.Operation, error) { func (s *mockDlpServer) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest) (*longrunningpb.Operation, error) {
md, _ := metadata.FromIncomingContext(ctx) md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
@@ -97,6 +109,18 @@ func (s *mockDlpServer) CreateInspectOperation(ctx context.Context, req *dlppb.C
return s.resps[0].(*longrunningpb.Operation), nil return s.resps[0].(*longrunningpb.Operation), nil
} }
func (s *mockDlpServer) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest) (*longrunningpb.Operation, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*longrunningpb.Operation), nil
}
func (s *mockDlpServer) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest) (*dlppb.ListInspectFindingsResponse, error) { func (s *mockDlpServer) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest) (*dlppb.ListInspectFindingsResponse, error) {
md, _ := metadata.FromIncomingContext(ctx) md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
@@ -170,8 +194,23 @@ func TestDlpServiceInspectContent(t *testing.T) {
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{} var name string = "EMAIL_ADDRESS"
var items []*dlppb.ContentItem = nil var infoTypesElement = &dlppb.InfoType{
Name: name,
}
var infoTypes = []*dlppb.InfoType{infoTypesElement}
var inspectConfig = &dlppb.InspectConfig{
InfoTypes: infoTypes,
}
var type_ string = "text/plain"
var value string = "My email is example@example.com."
var itemsElement = &dlppb.ContentItem{
Type: type_,
DataItem: &dlppb.ContentItem_Value{
Value: value,
},
}
var items = []*dlppb.ContentItem{itemsElement}
var request = &dlppb.InspectContentRequest{ var request = &dlppb.InspectContentRequest{
InspectConfig: inspectConfig, InspectConfig: inspectConfig,
Items: items, Items: items,
@@ -201,8 +240,23 @@ func TestDlpServiceInspectContentError(t *testing.T) {
errCode := codes.PermissionDenied errCode := codes.PermissionDenied
mockDlp.err = gstatus.Error(errCode, "test error") mockDlp.err = gstatus.Error(errCode, "test error")
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{} var name string = "EMAIL_ADDRESS"
var items []*dlppb.ContentItem = nil var infoTypesElement = &dlppb.InfoType{
Name: name,
}
var infoTypes = []*dlppb.InfoType{infoTypesElement}
var inspectConfig = &dlppb.InspectConfig{
InfoTypes: infoTypes,
}
var type_ string = "text/plain"
var value string = "My email is example@example.com."
var itemsElement = &dlppb.ContentItem{
Type: type_,
DataItem: &dlppb.ContentItem_Value{
Value: value,
},
}
var items = []*dlppb.ContentItem{itemsElement}
var request = &dlppb.InspectContentRequest{ var request = &dlppb.InspectContentRequest{
InspectConfig: inspectConfig, InspectConfig: inspectConfig,
Items: items, Items: items,
@@ -230,13 +284,26 @@ func TestDlpServiceRedactContent(t *testing.T) {
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{} var name string = "EMAIL_ADDRESS"
var items []*dlppb.ContentItem = nil var infoTypesElement = &dlppb.InfoType{
var replaceConfigs []*dlppb.RedactContentRequest_ReplaceConfig = nil Name: name,
}
var infoTypes = []*dlppb.InfoType{infoTypesElement}
var inspectConfig = &dlppb.InspectConfig{
InfoTypes: infoTypes,
}
var type_ string = "text/plain"
var value string = "My email is example@example.com."
var itemsElement = &dlppb.ContentItem{
Type: type_,
DataItem: &dlppb.ContentItem_Value{
Value: value,
},
}
var items = []*dlppb.ContentItem{itemsElement}
var request = &dlppb.RedactContentRequest{ var request = &dlppb.RedactContentRequest{
InspectConfig: inspectConfig, InspectConfig: inspectConfig,
Items: items, Items: items,
ReplaceConfigs: replaceConfigs,
} }
c, err := NewClient(context.Background(), clientOpt) c, err := NewClient(context.Background(), clientOpt)
@@ -263,13 +330,26 @@ func TestDlpServiceRedactContentError(t *testing.T) {
errCode := codes.PermissionDenied errCode := codes.PermissionDenied
mockDlp.err = gstatus.Error(errCode, "test error") mockDlp.err = gstatus.Error(errCode, "test error")
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{} var name string = "EMAIL_ADDRESS"
var items []*dlppb.ContentItem = nil var infoTypesElement = &dlppb.InfoType{
var replaceConfigs []*dlppb.RedactContentRequest_ReplaceConfig = nil Name: name,
}
var infoTypes = []*dlppb.InfoType{infoTypesElement}
var inspectConfig = &dlppb.InspectConfig{
InfoTypes: infoTypes,
}
var type_ string = "text/plain"
var value string = "My email is example@example.com."
var itemsElement = &dlppb.ContentItem{
Type: type_,
DataItem: &dlppb.ContentItem_Value{
Value: value,
},
}
var items = []*dlppb.ContentItem{itemsElement}
var request = &dlppb.RedactContentRequest{ var request = &dlppb.RedactContentRequest{
InspectConfig: inspectConfig, InspectConfig: inspectConfig,
Items: items, Items: items,
ReplaceConfigs: replaceConfigs,
} }
c, err := NewClient(context.Background(), clientOpt) c, err := NewClient(context.Background(), clientOpt)
@@ -286,10 +366,160 @@ func TestDlpServiceRedactContentError(t *testing.T) {
} }
_ = resp _ = resp
} }
func TestDlpServiceDeidentifyContent(t *testing.T) {
var expectedResponse *dlppb.DeidentifyContentResponse = &dlppb.DeidentifyContentResponse{}
mockDlp.err = nil
mockDlp.reqs = nil
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
var deidentifyConfig *dlppb.DeidentifyConfig = &dlppb.DeidentifyConfig{}
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
var items []*dlppb.ContentItem = nil
var request = &dlppb.DeidentifyContentRequest{
DeidentifyConfig: deidentifyConfig,
InspectConfig: inspectConfig,
Items: items,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.DeidentifyContent(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestDlpServiceDeidentifyContentError(t *testing.T) {
errCode := codes.PermissionDenied
mockDlp.err = gstatus.Error(errCode, "test error")
var deidentifyConfig *dlppb.DeidentifyConfig = &dlppb.DeidentifyConfig{}
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
var items []*dlppb.ContentItem = nil
var request = &dlppb.DeidentifyContentRequest{
DeidentifyConfig: deidentifyConfig,
InspectConfig: inspectConfig,
Items: items,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.DeidentifyContent(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestDlpServiceAnalyzeDataSourceRisk(t *testing.T) {
var expectedResponse *dlppb.RiskAnalysisOperationResult = &dlppb.RiskAnalysisOperationResult{}
mockDlp.err = nil
mockDlp.reqs = nil
any, err := ptypes.MarshalAny(expectedResponse)
if err != nil {
t.Fatal(err)
}
mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
Name: "longrunning-test",
Done: true,
Result: &longrunningpb.Operation_Response{Response: any},
})
var privacyMetric *dlppb.PrivacyMetric = &dlppb.PrivacyMetric{}
var sourceTable *dlppb.BigQueryTable = &dlppb.BigQueryTable{}
var request = &dlppb.AnalyzeDataSourceRiskRequest{
PrivacyMetric: privacyMetric,
SourceTable: sourceTable,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
respLRO, err := c.AnalyzeDataSourceRisk(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := respLRO.Wait(context.Background())
if err != nil {
t.Fatal(err)
}
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestDlpServiceAnalyzeDataSourceRiskError(t *testing.T) {
errCode := codes.PermissionDenied
mockDlp.err = nil
mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
Name: "longrunning-test",
Done: true,
Result: &longrunningpb.Operation_Error{
Error: &status.Status{
Code: int32(errCode),
Message: "test error",
},
},
})
var privacyMetric *dlppb.PrivacyMetric = &dlppb.PrivacyMetric{}
var sourceTable *dlppb.BigQueryTable = &dlppb.BigQueryTable{}
var request = &dlppb.AnalyzeDataSourceRiskRequest{
PrivacyMetric: privacyMetric,
SourceTable: sourceTable,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
respLRO, err := c.AnalyzeDataSourceRisk(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := respLRO.Wait(context.Background())
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestDlpServiceCreateInspectOperation(t *testing.T) { func TestDlpServiceCreateInspectOperation(t *testing.T) {
var name string = "name3373707" var name2 string = "name2-1052831874"
var expectedResponse = &dlppb.InspectOperationResult{ var expectedResponse = &dlppb.InspectOperationResult{
Name: name, Name: name2,
} }
mockDlp.err = nil mockDlp.err = nil
@@ -305,8 +535,26 @@ func TestDlpServiceCreateInspectOperation(t *testing.T) {
Result: &longrunningpb.Operation_Response{Response: any}, Result: &longrunningpb.Operation_Response{Response: any},
}) })
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{} var name string = "EMAIL_ADDRESS"
var storageConfig *dlppb.StorageConfig = &dlppb.StorageConfig{} var infoTypesElement = &dlppb.InfoType{
Name: name,
}
var infoTypes = []*dlppb.InfoType{infoTypesElement}
var inspectConfig = &dlppb.InspectConfig{
InfoTypes: infoTypes,
}
var url string = "gs://example_bucket/example_file.png"
var fileSet = &dlppb.CloudStorageOptions_FileSet{
Url: url,
}
var cloudStorageOptions = &dlppb.CloudStorageOptions{
FileSet: fileSet,
}
var storageConfig = &dlppb.StorageConfig{
Type: &dlppb.StorageConfig_CloudStorageOptions{
CloudStorageOptions: cloudStorageOptions,
},
}
var outputConfig *dlppb.OutputStorageConfig = &dlppb.OutputStorageConfig{} var outputConfig *dlppb.OutputStorageConfig = &dlppb.OutputStorageConfig{}
var request = &dlppb.CreateInspectOperationRequest{ var request = &dlppb.CreateInspectOperationRequest{
InspectConfig: inspectConfig, InspectConfig: inspectConfig,
@@ -352,8 +600,26 @@ func TestDlpServiceCreateInspectOperationError(t *testing.T) {
}, },
}) })
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{} var name string = "EMAIL_ADDRESS"
var storageConfig *dlppb.StorageConfig = &dlppb.StorageConfig{} var infoTypesElement = &dlppb.InfoType{
Name: name,
}
var infoTypes = []*dlppb.InfoType{infoTypesElement}
var inspectConfig = &dlppb.InspectConfig{
InfoTypes: infoTypes,
}
var url string = "gs://example_bucket/example_file.png"
var fileSet = &dlppb.CloudStorageOptions_FileSet{
Url: url,
}
var cloudStorageOptions = &dlppb.CloudStorageOptions{
FileSet: fileSet,
}
var storageConfig = &dlppb.StorageConfig{
Type: &dlppb.StorageConfig_CloudStorageOptions{
CloudStorageOptions: cloudStorageOptions,
},
}
var outputConfig *dlppb.OutputStorageConfig = &dlppb.OutputStorageConfig{} var outputConfig *dlppb.OutputStorageConfig = &dlppb.OutputStorageConfig{}
var request = &dlppb.CreateInspectOperationRequest{ var request = &dlppb.CreateInspectOperationRequest{
InspectConfig: inspectConfig, InspectConfig: inspectConfig,
@@ -446,8 +712,8 @@ func TestDlpServiceListInfoTypes(t *testing.T) {
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
var category string = "category50511102" var category string = "PII"
var languageCode string = "languageCode-412800396" var languageCode string = "en"
var request = &dlppb.ListInfoTypesRequest{ var request = &dlppb.ListInfoTypesRequest{
Category: category, Category: category,
LanguageCode: languageCode, LanguageCode: languageCode,
@@ -477,8 +743,8 @@ func TestDlpServiceListInfoTypesError(t *testing.T) {
errCode := codes.PermissionDenied errCode := codes.PermissionDenied
mockDlp.err = gstatus.Error(errCode, "test error") mockDlp.err = gstatus.Error(errCode, "test error")
var category string = "category50511102" var category string = "PII"
var languageCode string = "languageCode-412800396" var languageCode string = "en"
var request = &dlppb.ListInfoTypesRequest{ var request = &dlppb.ListInfoTypesRequest{
Category: category, Category: category,
LanguageCode: languageCode, LanguageCode: languageCode,
@@ -506,7 +772,7 @@ func TestDlpServiceListRootCategories(t *testing.T) {
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
var languageCode string = "languageCode-412800396" var languageCode string = "en"
var request = &dlppb.ListRootCategoriesRequest{ var request = &dlppb.ListRootCategoriesRequest{
LanguageCode: languageCode, LanguageCode: languageCode,
} }
@@ -535,7 +801,7 @@ func TestDlpServiceListRootCategoriesError(t *testing.T) {
errCode := codes.PermissionDenied errCode := codes.PermissionDenied
mockDlp.err = gstatus.Error(errCode, "test error") mockDlp.err = gstatus.Error(errCode, "test error")
var languageCode string = "languageCode-412800396" var languageCode string = "en"
var request = &dlppb.ListRootCategoriesRequest{ var request = &dlppb.ListRootCategoriesRequest{
LanguageCode: languageCode, LanguageCode: languageCode,
} }

View File

@@ -1,4 +1,4 @@
// Copyright 2017, Google Inc. All rights reserved. // Copyright 2017, Google LLC All rights reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
// Copyright 2017, Google Inc. All rights reserved. // Copyright 2017, Google LLC All rights reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -14,9 +14,11 @@
// AUTO-GENERATED CODE. DO NOT EDIT. // AUTO-GENERATED CODE. DO NOT EDIT.
// Package errorreporting is an experimental, auto-generated package for the // Package errorreporting is an auto-generated package for the
// Stackdriver Error Reporting API. // Stackdriver Error Reporting API.
// //
// NOTE: This package is in alpha. It is not stable, and is likely to change.
//
// Stackdriver Error Reporting groups and counts similar errors from cloud // Stackdriver Error Reporting groups and counts similar errors from cloud
// services. The Stackdriver Error Reporting API provides a way to report new // services. The Stackdriver Error Reporting API provides a way to report new
// errors and read access to error groups and their associated errors. // errors and read access to error groups and their associated errors.
@@ -29,11 +31,15 @@ import (
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
) )
func insertXGoog(ctx context.Context, val []string) context.Context { func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
md, _ := metadata.FromOutgoingContext(ctx) out, _ := metadata.FromOutgoingContext(ctx)
md = md.Copy() out = out.Copy()
md["x-goog-api-client"] = val for _, md := range mds {
return metadata.NewOutgoingContext(ctx, md) for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
} }
// DefaultAuthScopes reports the default set of authentication scopes to use with this package. // DefaultAuthScopes reports the default set of authentication scopes to use with this package.

View File

@@ -1,4 +1,4 @@
// Copyright 2017, Google Inc. All rights reserved. // Copyright 2017, Google LLC All rights reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -27,6 +27,7 @@ import (
clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
) )
// ErrorGroupCallOptions contains the retry settings for each method of ErrorGroupClient. // ErrorGroupCallOptions contains the retry settings for each method of ErrorGroupClient.
@@ -74,8 +75,8 @@ type ErrorGroupClient struct {
// The call options for this service. // The call options for this service.
CallOptions *ErrorGroupCallOptions CallOptions *ErrorGroupCallOptions
// The metadata to be sent with each request. // The x-goog-* metadata to be sent with each request.
xGoogHeader []string xGoogMetadata metadata.MD
} }
// NewErrorGroupClient creates a new error group service client. // NewErrorGroupClient creates a new error group service client.
@@ -113,7 +114,7 @@ func (c *ErrorGroupClient) Close() error {
func (c *ErrorGroupClient) SetGoogleClientInfo(keyval ...string) { func (c *ErrorGroupClient) SetGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...) kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)} c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
} }
// ErrorGroupGroupPath returns the path for the group resource. // ErrorGroupGroupPath returns the path for the group resource.
@@ -128,7 +129,7 @@ func ErrorGroupGroupPath(project, group string) string {
// GetGroup get the specified group. // GetGroup get the specified group.
func (c *ErrorGroupClient) GetGroup(ctx context.Context, req *clouderrorreportingpb.GetGroupRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ErrorGroup, error) { func (c *ErrorGroupClient) GetGroup(ctx context.Context, req *clouderrorreportingpb.GetGroupRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ErrorGroup, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetGroup[0:len(c.CallOptions.GetGroup):len(c.CallOptions.GetGroup)], opts...) opts = append(c.CallOptions.GetGroup[0:len(c.CallOptions.GetGroup):len(c.CallOptions.GetGroup)], opts...)
var resp *clouderrorreportingpb.ErrorGroup var resp *clouderrorreportingpb.ErrorGroup
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -145,7 +146,7 @@ func (c *ErrorGroupClient) GetGroup(ctx context.Context, req *clouderrorreportin
// UpdateGroup replace the data for the specified group. // UpdateGroup replace the data for the specified group.
// Fails if the group does not exist. // Fails if the group does not exist.
func (c *ErrorGroupClient) UpdateGroup(ctx context.Context, req *clouderrorreportingpb.UpdateGroupRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ErrorGroup, error) { func (c *ErrorGroupClient) UpdateGroup(ctx context.Context, req *clouderrorreportingpb.UpdateGroupRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ErrorGroup, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...) opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...)
var resp *clouderrorreportingpb.ErrorGroup var resp *clouderrorreportingpb.ErrorGroup
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {

View File

@@ -1,4 +1,4 @@
// Copyright 2017, Google Inc. All rights reserved. // Copyright 2017, Google LLC All rights reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
// Copyright 2017, Google Inc. All rights reserved. // Copyright 2017, Google LLC All rights reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -29,6 +29,7 @@ import (
clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
) )
// ErrorStatsCallOptions contains the retry settings for each method of ErrorStatsClient. // ErrorStatsCallOptions contains the retry settings for each method of ErrorStatsClient.
@@ -78,8 +79,8 @@ type ErrorStatsClient struct {
// The call options for this service. // The call options for this service.
CallOptions *ErrorStatsCallOptions CallOptions *ErrorStatsCallOptions
// The metadata to be sent with each request. // The x-goog-* metadata to be sent with each request.
xGoogHeader []string xGoogMetadata metadata.MD
} }
// NewErrorStatsClient creates a new error stats service client. // NewErrorStatsClient creates a new error stats service client.
@@ -118,7 +119,7 @@ func (c *ErrorStatsClient) Close() error {
func (c *ErrorStatsClient) SetGoogleClientInfo(keyval ...string) { func (c *ErrorStatsClient) SetGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...) kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)} c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
} }
// ErrorStatsProjectPath returns the path for the project resource. // ErrorStatsProjectPath returns the path for the project resource.
@@ -131,7 +132,7 @@ func ErrorStatsProjectPath(project string) string {
// ListGroupStats lists the specified groups. // ListGroupStats lists the specified groups.
func (c *ErrorStatsClient) ListGroupStats(ctx context.Context, req *clouderrorreportingpb.ListGroupStatsRequest, opts ...gax.CallOption) *ErrorGroupStatsIterator { func (c *ErrorStatsClient) ListGroupStats(ctx context.Context, req *clouderrorreportingpb.ListGroupStatsRequest, opts ...gax.CallOption) *ErrorGroupStatsIterator {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListGroupStats[0:len(c.CallOptions.ListGroupStats):len(c.CallOptions.ListGroupStats)], opts...) opts = append(c.CallOptions.ListGroupStats[0:len(c.CallOptions.ListGroupStats):len(c.CallOptions.ListGroupStats)], opts...)
it := &ErrorGroupStatsIterator{} it := &ErrorGroupStatsIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorGroupStats, string, error) { it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorGroupStats, string, error) {
@@ -166,7 +167,7 @@ func (c *ErrorStatsClient) ListGroupStats(ctx context.Context, req *clouderrorre
// ListEvents lists the specified events. // ListEvents lists the specified events.
func (c *ErrorStatsClient) ListEvents(ctx context.Context, req *clouderrorreportingpb.ListEventsRequest, opts ...gax.CallOption) *ErrorEventIterator { func (c *ErrorStatsClient) ListEvents(ctx context.Context, req *clouderrorreportingpb.ListEventsRequest, opts ...gax.CallOption) *ErrorEventIterator {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListEvents[0:len(c.CallOptions.ListEvents):len(c.CallOptions.ListEvents)], opts...) opts = append(c.CallOptions.ListEvents[0:len(c.CallOptions.ListEvents):len(c.CallOptions.ListEvents)], opts...)
it := &ErrorEventIterator{} it := &ErrorEventIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorEvent, string, error) { it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorEvent, string, error) {
@@ -201,7 +202,7 @@ func (c *ErrorStatsClient) ListEvents(ctx context.Context, req *clouderrorreport
// DeleteEvents deletes all error events of a given project. // DeleteEvents deletes all error events of a given project.
func (c *ErrorStatsClient) DeleteEvents(ctx context.Context, req *clouderrorreportingpb.DeleteEventsRequest, opts ...gax.CallOption) (*clouderrorreportingpb.DeleteEventsResponse, error) { func (c *ErrorStatsClient) DeleteEvents(ctx context.Context, req *clouderrorreportingpb.DeleteEventsRequest, opts ...gax.CallOption) (*clouderrorreportingpb.DeleteEventsResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteEvents[0:len(c.CallOptions.DeleteEvents):len(c.CallOptions.DeleteEvents)], opts...) opts = append(c.CallOptions.DeleteEvents[0:len(c.CallOptions.DeleteEvents):len(c.CallOptions.DeleteEvents)], opts...)
var resp *clouderrorreportingpb.DeleteEventsResponse var resp *clouderrorreportingpb.DeleteEventsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {

View File

@@ -1,4 +1,4 @@
// Copyright 2017, Google Inc. All rights reserved. // Copyright 2017, Google LLC All rights reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
// Copyright 2017, Google Inc. All rights reserved. // Copyright 2017, Google LLC All rights reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
// Copyright 2017, Google Inc. All rights reserved. // Copyright 2017, Google LLC All rights reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -24,6 +24,7 @@ import (
"google.golang.org/api/transport" "google.golang.org/api/transport"
clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/metadata"
) )
// ReportErrorsCallOptions contains the retry settings for each method of ReportErrorsClient. // ReportErrorsCallOptions contains the retry settings for each method of ReportErrorsClient.
@@ -56,8 +57,8 @@ type ReportErrorsClient struct {
// The call options for this service. // The call options for this service.
CallOptions *ReportErrorsCallOptions CallOptions *ReportErrorsCallOptions
// The metadata to be sent with each request. // The x-goog-* metadata to be sent with each request.
xGoogHeader []string xGoogMetadata metadata.MD
} }
// NewReportErrorsClient creates a new report errors service client. // NewReportErrorsClient creates a new report errors service client.
@@ -95,7 +96,7 @@ func (c *ReportErrorsClient) Close() error {
func (c *ReportErrorsClient) SetGoogleClientInfo(keyval ...string) { func (c *ReportErrorsClient) SetGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...) kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)} c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
} }
// ReportErrorsProjectPath returns the path for the project resource. // ReportErrorsProjectPath returns the path for the project resource.
@@ -114,7 +115,7 @@ func ReportErrorsProjectPath(project string) string {
// for authentication. To use an API key, append it to the URL as the value of // for authentication. To use an API key, append it to the URL as the value of
// a key parameter. For example:<pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre> // a key parameter. For example:<pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
func (c *ReportErrorsClient) ReportErrorEvent(ctx context.Context, req *clouderrorreportingpb.ReportErrorEventRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ReportErrorEventResponse, error) { func (c *ReportErrorsClient) ReportErrorEvent(ctx context.Context, req *clouderrorreportingpb.ReportErrorEventRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ReportErrorEventResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ReportErrorEvent[0:len(c.CallOptions.ReportErrorEvent):len(c.CallOptions.ReportErrorEvent)], opts...) opts = append(c.CallOptions.ReportErrorEvent[0:len(c.CallOptions.ReportErrorEvent):len(c.CallOptions.ReportErrorEvent)], opts...)
var resp *clouderrorreportingpb.ReportErrorEventResponse var resp *clouderrorreportingpb.ReportErrorEventResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {

View File

@@ -1,4 +1,4 @@
// Copyright 2017, Google Inc. All rights reserved. // Copyright 2017, Google LLC All rights reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@@ -1,215 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errorreporting
import (
"bytes"
"errors"
"log"
"strings"
"testing"
"cloud.google.com/go/logging"
"golang.org/x/net/context"
"google.golang.org/api/option"
)
type fakeLogger struct {
entry *logging.Entry
fail bool
}
func (c *fakeLogger) LogSync(ctx context.Context, e logging.Entry) error {
if c.fail {
return errors.New("request failed")
}
c.entry = &e
return nil
}
func (c *fakeLogger) Close() error {
return nil
}
func newTestClientUsingLogging(c *fakeLogger) *Client {
newLoggerInterface = func(ctx context.Context, project string, opts ...option.ClientOption) (loggerInterface, error) {
return c, nil
}
t, err := NewClient(context.Background(), testProjectID, "myservice", "v1.000", true)
if err != nil {
panic(err)
}
t.RepanicDefault = false
return t
}
func TestCatchNothingUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
defer func() {
e := fl.entry
if e != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx)
}
func entryMessage(e *logging.Entry) string {
return e.Payload.(map[string]interface{})["message"].(string)
}
func commonLoggingChecks(t *testing.T, e *logging.Entry, panickingFunction string) {
if e.Payload.(map[string]interface{})["serviceContext"].(map[string]string)["service"] != "myservice" {
t.Errorf("error report didn't contain service name")
}
if e.Payload.(map[string]interface{})["serviceContext"].(map[string]string)["version"] != "v1.000" {
t.Errorf("error report didn't contain version name")
}
if !strings.Contains(entryMessage(e), "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(entryMessage(e), panickingFunction) {
t.Errorf("error report didn't contain stack trace")
}
}
func TestCatchPanicUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
defer func() {
e := fl.entry
if e == nil {
t.Fatalf("got no error report, expected one")
}
commonLoggingChecks(t, e, "TestCatchPanic")
if !strings.Contains(entryMessage(e), "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchPanicNilClientUsingLogging(t *testing.T) {
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.String()
if !strings.Contains(body, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
if !strings.Contains(body, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(body, "TestCatchPanicNilClient") {
t.Errorf("error report didn't contain recovered value")
}
}()
var c *Client
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestLogFailedReportsUsingLogging(t *testing.T) {
fl := &fakeLogger{fail: true}
c := newTestClientUsingLogging(fl)
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.String()
if !strings.Contains(body, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(body, "errorreporting.TestLogFailedReports") {
t.Errorf("error report didn't contain stack trace")
}
if !strings.Contains(body, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchNilPanicUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
defer func() {
e := fl.entry
if e == nil {
t.Fatalf("got no error report, expected one")
}
commonLoggingChecks(t, e, "TestCatchNilPanic")
if !strings.Contains(entryMessage(e), "nil") {
t.Errorf("error report didn't contain recovered value")
}
}()
b := true
defer c.Catch(ctx, WithMessage("hello, error"), PanicFlag(&b))
panic(nil)
}
func TestNotCatchNilPanicUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
defer func() {
e := fl.entry
if e != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
panic(nil)
}
func TestReportUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
c.Report(ctx, nil, "hello, ", "error")
e := fl.entry
if e == nil {
t.Fatalf("got no error report, expected one")
}
commonLoggingChecks(t, e, "TestReport")
}
func TestReportfUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2)
e := fl.entry
if e == nil {
t.Fatalf("got no error report, expected one")
}
commonLoggingChecks(t, e, "TestReportf")
if !strings.Contains(entryMessage(e), "2+2=4") {
t.Errorf("error report didn't contain formatted message")
}
}
func TestCloseUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
err := c.Close()
if err != nil {
t.Fatal(err)
}
}

View File

@@ -17,58 +17,6 @@
// This package is still experimental and subject to change. // This package is still experimental and subject to change.
// //
// See https://cloud.google.com/error-reporting/ for more information. // See https://cloud.google.com/error-reporting/ for more information.
//
// To initialize a client, use the NewClient function.
//
// import "cloud.google.com/go/errorreporting"
// ...
// errorsClient, err = errorreporting.NewClient(ctx, projectID, "myservice", "v1.0", true)
//
// The client can recover panics in your program and report them as errors.
// To use this functionality, defer its Catch method, as you would any other
// function for recovering panics.
//
// func foo(ctx context.Context, ...) {
// defer errorsClient.Catch(ctx)
// ...
// }
//
// Catch writes an error report containing the recovered value and a stack trace
// to Stackdriver Error Reporting.
//
// There are various options you can add to the call to Catch that modify how
// panics are handled.
//
// WithMessage and WithMessagef add a custom message after the recovered value,
// using fmt.Sprint and fmt.Sprintf respectively.
//
// defer errorsClient.Catch(ctx, errorreporting.WithMessagef("x=%d", x))
//
// WithRequest fills in various fields in the error report with information
// about an http.Request that's being handled.
//
// defer errorsClient.Catch(ctx, errorreporting.WithRequest(httpReq))
//
// By default, after recovering a panic, Catch will panic again with the
// recovered value. You can turn off this behavior with the Repanic option.
//
// defer errorsClient.Catch(ctx, errorreporting.Repanic(false))
//
// You can also change the default behavior for the client by changing the
// RepanicDefault field.
//
// errorsClient.RepanicDefault = false
//
// It is also possible to write an error report directly without recovering a
// panic, using Report or Reportf.
//
// if err != nil {
// errorsClient.Reportf(ctx, r, "unexpected error %v", err)
// }
//
// If you try to write an error report with a nil client, or if the client
// fails to write the report to the server, the error report is logged using
// log.Println.
package errorreporting // import "cloud.google.com/go/errorreporting" package errorreporting // import "cloud.google.com/go/errorreporting"
import ( import (
@@ -77,16 +25,15 @@ import (
"log" "log"
"net/http" "net/http"
"runtime" "runtime"
"strings"
"time" "time"
api "cloud.google.com/go/errorreporting/apiv1beta1" api "cloud.google.com/go/errorreporting/apiv1beta1"
"cloud.google.com/go/internal/version" "cloud.google.com/go/internal/version"
"cloud.google.com/go/logging" "github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/timestamp"
gax "github.com/googleapis/gax-go" gax "github.com/googleapis/gax-go"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/api/option" "google.golang.org/api/option"
"google.golang.org/api/support/bundler"
erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
) )
@@ -94,12 +41,40 @@ const (
userAgent = `gcloud-golang-errorreporting/20160701` userAgent = `gcloud-golang-errorreporting/20160701`
) )
type apiInterface interface { // Config is additional configuration for Client.
ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, opts ...gax.CallOption) (*erpb.ReportErrorEventResponse, error) type Config struct {
Close() error // ServiceName identifies the running program and is included in the error reports.
// Optional.
ServiceName string
// ServiceVersion identifies the version of the running program and is
// included in the error reports.
// Optional.
ServiceVersion string
// OnError is the function to call if any background
// tasks errored. By default, errors are logged.
OnError func(err error)
} }
var newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) { // Entry holds information about the reported error.
type Entry struct {
Error error
Req *http.Request // if error is associated with a request.
Stack []byte // if user does not provide a stack trace, runtime.Stack will be called
}
// Client represents a Google Cloud Error Reporting client.
type Client struct {
projectID string
apiClient client
serviceContext erpb.ServiceContext
bundler *bundler.Bundler
onErrorFn func(err error)
}
var newClient = func(ctx context.Context, opts ...option.ClientOption) (client, error) {
client, err := api.NewReportErrorsClient(ctx, opts...) client, err := api.NewReportErrorsClient(ctx, opts...)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -108,289 +83,99 @@ var newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (ap
return client, nil return client, nil
} }
type loggerInterface interface {
LogSync(ctx context.Context, e logging.Entry) error
Close() error
}
type logger struct {
*logging.Logger
c *logging.Client
}
func (l logger) Close() error {
return l.c.Close()
}
var newLoggerInterface = func(ctx context.Context, projectID string, opts ...option.ClientOption) (loggerInterface, error) {
lc, err := logging.NewClient(ctx, projectID, opts...)
if err != nil {
return nil, fmt.Errorf("creating Logging client: %v", err)
}
l := lc.Logger("errorreports")
return logger{l, lc}, nil
}
type sender interface {
send(ctx context.Context, r *http.Request, message string)
close() error
}
// errorApiSender sends error reports using the Stackdriver Error Reporting API.
type errorApiSender struct {
apiClient apiInterface
projectID string
serviceContext erpb.ServiceContext
}
// loggingSender sends error reports using the Stackdriver Logging API.
type loggingSender struct {
logger loggerInterface
projectID string
serviceContext map[string]string
}
// Client represents a Google Cloud Error Reporting client.
type Client struct {
sender
// RepanicDefault determines whether Catch will re-panic after recovering a
// panic. This behavior can be overridden for an individual call to Catch using
// the Repanic option.
RepanicDefault bool
}
// NewClient returns a new error reporting client. Generally you will want // NewClient returns a new error reporting client. Generally you will want
// to create a client on program initialization and use it through the lifetime // to create a client on program initialization and use it through the lifetime
// of the process. // of the process.
// func NewClient(ctx context.Context, projectID string, cfg Config, opts ...option.ClientOption) (*Client, error) {
// The service name and version string identify the running program, and are if cfg.ServiceName == "" {
// included in error reports. The version string can be left empty. cfg.ServiceName = "goapp"
//
// Set useLogging to report errors also using Stackdriver Logging,
// which will result in errors appearing in both the logs and the error
// dashboard. This is useful if you are already a user of Stackdriver Logging.
func NewClient(ctx context.Context, projectID, serviceName, serviceVersion string, useLogging bool, opts ...option.ClientOption) (*Client, error) {
if useLogging {
l, err := newLoggerInterface(ctx, projectID, opts...)
if err != nil {
return nil, fmt.Errorf("creating Logging client: %v", err)
}
sender := &loggingSender{
logger: l,
projectID: projectID,
serviceContext: map[string]string{
"service": serviceName,
},
}
if serviceVersion != "" {
sender.serviceContext["version"] = serviceVersion
}
c := &Client{
sender: sender,
RepanicDefault: true,
}
return c, nil
} else {
a, err := newApiInterface(ctx, opts...)
if err != nil {
return nil, fmt.Errorf("creating Error Reporting client: %v", err)
}
c := &Client{
sender: &errorApiSender{
apiClient: a,
projectID: "projects/" + projectID,
serviceContext: erpb.ServiceContext{
Service: serviceName,
Version: serviceVersion,
},
},
RepanicDefault: true,
}
return c, nil
} }
c, err := newClient(ctx, opts...)
if err != nil {
return nil, fmt.Errorf("creating client: %v", err)
}
client := &Client{
apiClient: c,
projectID: "projects/" + projectID,
serviceContext: erpb.ServiceContext{
Service: cfg.ServiceName,
Version: cfg.ServiceVersion,
},
}
bundler := bundler.NewBundler((*erpb.ReportErrorEventRequest)(nil), func(bundle interface{}) {
reqs := bundle.([]*erpb.ReportErrorEventRequest)
for _, req := range reqs {
_, err = client.apiClient.ReportErrorEvent(ctx, req)
if err != nil {
client.onError(fmt.Errorf("failed to upload: %v", err))
}
}
})
// TODO(jbd): Optimize bundler limits.
bundler.DelayThreshold = 2 * time.Second
bundler.BundleCountThreshold = 100
bundler.BundleByteThreshold = 1000
bundler.BundleByteLimit = 1000
bundler.BufferedByteLimit = 10000
client.bundler = bundler
return client, nil
}
func (c *Client) onError(err error) {
if c.onErrorFn != nil {
c.onErrorFn(err)
return
}
log.Println(err)
} }
// Close closes any resources held by the client. // Close closes any resources held by the client.
// Close should be called when the client is no longer needed. // Close should be called when the client is no longer needed.
// It need not be called at program exit. // It need not be called at program exit.
func (c *Client) Close() error { func (c *Client) Close() error {
err := c.sender.close() return c.apiClient.Close()
c.sender = nil }
// Report writes an error report. It doesn't block. Errors in
// writing the error report can be handled via Client.OnError.
func (c *Client) Report(e Entry) {
var stack string
if e.Stack != nil {
stack = string(e.Stack)
}
req := c.makeReportErrorEventRequest(e.Req, e.Error.Error(), stack)
c.bundler.Add(req, 1)
}
// ReportSync writes an error report. It blocks until the entry is written.
func (c *Client) ReportSync(ctx context.Context, e Entry) error {
var stack string
if e.Stack != nil {
stack = string(e.Stack)
}
req := c.makeReportErrorEventRequest(e.Req, e.Error.Error(), stack)
_, err := c.apiClient.ReportErrorEvent(ctx, req)
return err return err
} }
// An Option is an optional argument to Catch. // Flush blocks until all currently buffered error reports are sent.
type Option interface {
isOption()
}
// PanicFlag returns an Option that can inform Catch that a panic has occurred.
// If *p is true when Catch is called, an error report is made even if recover
// returns nil. This allows Catch to report an error for panic(nil).
// If p is nil, the option is ignored.
// //
// Here is an example of how to use PanicFlag: // If any errors occurred since the last call to Flush, or the
// // creation of the client if this is the first call, then Flush report the
// func foo(ctx context.Context, ...) { // error via the (*Client).OnError handler.
// hasPanicked := true func (c *Client) Flush() {
// defer errorsClient.Catch(ctx, errorreporting.PanicFlag(&hasPanicked)) c.bundler.Flush()
// ...
// ...
// // We have reached the end of the function, so we're not panicking.
// hasPanicked = false
// }
func PanicFlag(p *bool) Option { return panicFlag{p} }
type panicFlag struct {
*bool
} }
func (h panicFlag) isOption() {} func (c *Client) makeReportErrorEventRequest(r *http.Request, msg string, stack string) *erpb.ReportErrorEventRequest {
if stack == "" {
// Repanic returns an Option that determines whether Catch will re-panic after // limit the stack trace to 16k.
// it reports an error. This overrides the default in the client. var buf [16 * 1024]byte
func Repanic(r bool) Option { return repanic(r) } stack = chopStack(buf[0:runtime.Stack(buf[:], false)])
type repanic bool
func (r repanic) isOption() {}
// WithRequest returns an Option that informs Catch or Report of an http.Request
// that is being handled. Information from the Request is included in the error
// report, if one is made.
func WithRequest(r *http.Request) Option { return withRequest{r} }
type withRequest struct {
*http.Request
}
func (w withRequest) isOption() {}
// WithMessage returns an Option that sets a message to be included in the error
// report, if one is made. v is converted to a string with fmt.Sprint.
func WithMessage(v ...interface{}) Option { return message(v) }
type message []interface{}
func (m message) isOption() {}
// WithMessagef returns an Option that sets a message to be included in the error
// report, if one is made. format and v are converted to a string with fmt.Sprintf.
func WithMessagef(format string, v ...interface{}) Option { return messagef{format, v} }
type messagef struct {
format string
v []interface{}
}
func (m messagef) isOption() {}
// Catch tries to recover a panic; if it succeeds, it writes an error report.
// It should be called by deferring it, like any other function for recovering
// panics.
//
// Catch can be called concurrently with other calls to Catch, Report or Reportf.
func (c *Client) Catch(ctx context.Context, opt ...Option) {
panicked := false
for _, o := range opt {
switch o := o.(type) {
case panicFlag:
panicked = panicked || o.bool != nil && *o.bool
}
} }
x := recover() message := msg + "\n" + stack
if x == nil && !panicked {
return
}
var (
r *http.Request
shouldRepanic = true
messages = []string{fmt.Sprint(x)}
)
if c != nil {
shouldRepanic = c.RepanicDefault
}
for _, o := range opt {
switch o := o.(type) {
case repanic:
shouldRepanic = bool(o)
case withRequest:
r = o.Request
case message:
messages = append(messages, fmt.Sprint(o...))
case messagef:
messages = append(messages, fmt.Sprintf(o.format, o.v...))
}
}
c.logInternal(ctx, r, true, strings.Join(messages, " "))
if shouldRepanic {
panic(x)
}
}
// Report writes an error report unconditionally, instead of only when a panic
// occurs.
// If r is non-nil, information from the Request is included in the error report.
//
// Report can be called concurrently with other calls to Catch, Report or Reportf.
func (c *Client) Report(ctx context.Context, r *http.Request, v ...interface{}) {
c.logInternal(ctx, r, false, fmt.Sprint(v...))
}
// Reportf writes an error report unconditionally, instead of only when a panic
// occurs.
// If r is non-nil, information from the Request is included in the error report.
//
// Reportf can be called concurrently with other calls to Catch, Report or Reportf.
func (c *Client) Reportf(ctx context.Context, r *http.Request, format string, v ...interface{}) {
c.logInternal(ctx, r, false, fmt.Sprintf(format, v...))
}
func (c *Client) logInternal(ctx context.Context, r *http.Request, isPanic bool, msg string) {
// limit the stack trace to 16k.
var buf [16384]byte
stack := buf[0:runtime.Stack(buf[:], false)]
message := msg + "\n" + chopStack(stack, isPanic)
if c == nil {
log.Println("Error report used nil client:", message)
return
}
c.send(ctx, r, message)
}
func (s *loggingSender) send(ctx context.Context, r *http.Request, message string) {
payload := map[string]interface{}{
"eventTime": time.Now().In(time.UTC).Format(time.RFC3339Nano),
"message": message,
"serviceContext": s.serviceContext,
}
if r != nil {
payload["context"] = map[string]interface{}{
"httpRequest": map[string]interface{}{
"method": r.Method,
"url": r.Host + r.RequestURI,
"userAgent": r.UserAgent(),
"referrer": r.Referer(),
"remoteIp": r.RemoteAddr,
},
}
}
e := logging.Entry{
Severity: logging.Error,
Payload: payload,
}
err := s.logger.LogSync(ctx, e)
if err != nil {
log.Println("Error writing error report:", err, "report:", payload)
}
}
func (s *loggingSender) close() error {
return s.logger.Close()
}
func (s *errorApiSender) send(ctx context.Context, r *http.Request, message string) {
time := time.Now()
var errorContext *erpb.ErrorContext var errorContext *erpb.ErrorContext
if r != nil { if r != nil {
errorContext = &erpb.ErrorContext{ errorContext = &erpb.ErrorContext{
@@ -403,37 +188,21 @@ func (s *errorApiSender) send(ctx context.Context, r *http.Request, message stri
}, },
} }
} }
req := erpb.ReportErrorEventRequest{ return &erpb.ReportErrorEventRequest{
ProjectName: s.projectID, ProjectName: c.projectID,
Event: &erpb.ReportedErrorEvent{ Event: &erpb.ReportedErrorEvent{
EventTime: &timestamp.Timestamp{ EventTime: ptypes.TimestampNow(),
Seconds: time.Unix(), ServiceContext: &c.serviceContext,
Nanos: int32(time.Nanosecond()),
},
ServiceContext: &s.serviceContext,
Message: message, Message: message,
Context: errorContext, Context: errorContext,
}, },
} }
_, err := s.apiClient.ReportErrorEvent(ctx, &req)
if err != nil {
log.Println("Error writing error report:", err, "report:", message)
}
}
func (s *errorApiSender) close() error {
return s.apiClient.Close()
} }
// chopStack trims a stack trace so that the function which panics or calls // chopStack trims a stack trace so that the function which panics or calls
// Report is first. // Report is first.
func chopStack(s []byte, isPanic bool) string { func chopStack(s []byte) string {
var f []byte f := []byte("cloud.google.com/go/errorreporting.(*Client).Report")
if isPanic {
f = []byte("panic(")
} else {
f = []byte("cloud.google.com/go/errorreporting.(*Client).Report")
}
lfFirst := bytes.IndexByte(s, '\n') lfFirst := bytes.IndexByte(s, '\n')
if lfFirst == -1 { if lfFirst == -1 {
@@ -454,3 +223,8 @@ func chopStack(s []byte, isPanic bool) string {
} }
return string(s[:lfFirst+1]) + string(stack) return string(s[:lfFirst+1]) + string(stack)
} }
type client interface {
ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, opts ...gax.CallOption) (*erpb.ReportErrorEventResponse, error)
Close() error
}

View File

@@ -15,12 +15,12 @@
package errorreporting package errorreporting
import ( import (
"bytes"
"errors" "errors"
"log"
"strings" "strings"
"testing" "testing"
"cloud.google.com/go/internal/testutil"
gax "github.com/googleapis/gax-go" gax "github.com/googleapis/gax-go"
"golang.org/x/net/context" "golang.org/x/net/context"
@@ -28,14 +28,16 @@ import (
erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
) )
const testProjectID = "testproject"
type fakeReportErrorsClient struct { type fakeReportErrorsClient struct {
req *erpb.ReportErrorEventRequest req *erpb.ReportErrorEventRequest
fail bool fail bool
doneCh chan struct{}
} }
func (c *fakeReportErrorsClient) ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, _ ...gax.CallOption) (*erpb.ReportErrorEventResponse, error) { func (c *fakeReportErrorsClient) ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, _ ...gax.CallOption) (*erpb.ReportErrorEventResponse, error) {
defer func() {
close(c.doneCh)
}()
if c.fail { if c.fail {
return nil, errors.New("request failed") return nil, errors.New("request failed")
} }
@@ -47,166 +49,65 @@ func (c *fakeReportErrorsClient) Close() error {
return nil return nil
} }
func newFakeReportErrorsClient() *fakeReportErrorsClient {
c := &fakeReportErrorsClient{}
c.doneCh = make(chan struct{})
return c
}
func newTestClient(c *fakeReportErrorsClient) *Client { func newTestClient(c *fakeReportErrorsClient) *Client {
newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) { newClient = func(ctx context.Context, opts ...option.ClientOption) (client, error) {
return c, nil return c, nil
} }
t, err := NewClient(context.Background(), testProjectID, "myservice", "v1.000", false) t, err := NewClient(context.Background(), testutil.ProjID(), Config{
ServiceName: "myservice",
ServiceVersion: "v1.0",
})
if err != nil { if err != nil {
panic(err) panic(err)
} }
t.RepanicDefault = false
return t return t
} }
var ctx context.Context func commonChecks(t *testing.T, req *erpb.ReportErrorEventRequest, fn string) {
func init() {
ctx = context.Background()
}
func TestCatchNothing(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
defer func() {
r := fc.req
if r != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx)
}
func commonChecks(t *testing.T, req *erpb.ReportErrorEventRequest, panickingFunction string) {
if req.Event.ServiceContext.Service != "myservice" { if req.Event.ServiceContext.Service != "myservice" {
t.Errorf("error report didn't contain service name") t.Errorf("error report didn't contain service name")
} }
if req.Event.ServiceContext.Version != "v1.000" { if req.Event.ServiceContext.Version != "v1.0" {
t.Errorf("error report didn't contain version name") t.Errorf("error report didn't contain version name")
} }
if !strings.Contains(req.Event.Message, "hello, error") { if !strings.Contains(req.Event.Message, "error") {
t.Errorf("error report didn't contain message") t.Errorf("error report didn't contain message")
} }
if !strings.Contains(req.Event.Message, panickingFunction) { if !strings.Contains(req.Event.Message, fn) {
t.Errorf("error report didn't contain stack trace") t.Errorf("error report didn't contain stack trace")
} }
} }
func TestCatchPanic(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
defer func() {
r := fc.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, r, "errorreporting.TestCatchPanic")
if !strings.Contains(r.Event.Message, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchPanicNilClient(t *testing.T) {
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.String()
if !strings.Contains(body, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
if !strings.Contains(body, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(body, "TestCatchPanicNilClient") {
t.Errorf("error report didn't contain recovered value")
}
}()
var c *Client
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestLogFailedReports(t *testing.T) {
fc := &fakeReportErrorsClient{fail: true}
c := newTestClient(fc)
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.String()
if !strings.Contains(body, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(body, "errorreporting.TestLogFailedReports") {
t.Errorf("error report didn't contain stack trace")
}
if !strings.Contains(body, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchNilPanic(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
defer func() {
r := fc.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, r, "errorreporting.TestCatchNilPanic")
if !strings.Contains(r.Event.Message, "nil") {
t.Errorf("error report didn't contain recovered value")
}
}()
b := true
defer c.Catch(ctx, WithMessage("hello, error"), PanicFlag(&b))
panic(nil)
}
func TestNotCatchNilPanic(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
defer func() {
r := fc.req
if r != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
panic(nil)
}
func TestReport(t *testing.T) { func TestReport(t *testing.T) {
fc := &fakeReportErrorsClient{} fc := newFakeReportErrorsClient()
c := newTestClient(fc) c := newTestClient(fc)
c.Report(ctx, nil, "hello, ", "error") c.Report(Entry{Error: errors.New("error")})
<-fc.doneCh
r := fc.req r := fc.req
if r == nil { if r == nil {
t.Fatalf("got no error report, expected one") t.Fatalf("got no error report, expected one")
} }
commonChecks(t, r, "errorreporting.TestReport") commonChecks(t, r, "errorreporting.TestReport")
} }
func TestReportSync(t *testing.T) {
func TestReportf(t *testing.T) { ctx := context.Background()
fc := &fakeReportErrorsClient{} fc := newFakeReportErrorsClient()
c := newTestClient(fc) c := newTestClient(fc)
c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2) if err := c.ReportSync(ctx, Entry{Error: errors.New("error")}); err != nil {
t.Fatalf("cannot upload errors: %v", err)
}
<-fc.doneCh
r := fc.req r := fc.req
if r == nil { if r == nil {
t.Fatalf("got no error report, expected one") t.Fatalf("got no error report, expected one")
} }
commonChecks(t, r, "errorreporting.TestReportf") commonChecks(t, r, "errorreporting.TestReport")
if !strings.Contains(r.Event.Message, "2+2=4") {
t.Errorf("error report didn't contain formatted message")
}
} }

View File

@@ -0,0 +1,49 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errorreporting_test
import (
"errors"
"log"
"cloud.google.com/go/errorreporting"
"golang.org/x/net/context"
)
func Example() {
// Create the client.
ctx := context.Background()
ec, err := errorreporting.NewClient(ctx, "my-gcp-project", errorreporting.Config{
ServiceName: "myservice",
ServiceVersion: "v1.0",
})
defer func() {
if err := ec.Close(); err != nil {
log.Printf("failed to report errors to Stackdriver: %v", err)
}
}()
// Report an error.
err = doSomething()
if err != nil {
ec.Report(errorreporting.Entry{
Error: err,
})
}
}
func doSomething() error {
return errors.New("something went wrong")
}

View File

@@ -21,68 +21,7 @@ func TestChopStack(t *testing.T) {
name string name string
in []byte in []byte
expected string expected string
isPanic bool
}{ }{
{
name: "Catch",
in: []byte(`goroutine 20 [running]:
runtime/debug.Stack()
/gopath/src/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errorreporting.(*Client).logInternal()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:259 +0x18b
cloud.google.com/go/errorreporting.(*Client).Catch()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:219 +0x6ed
panic()
/gopath/src/runtime/panic.go:458 +0x243
cloud.google.com/go/errorreporting.TestCatchPanic()
/gopath/src/cloud.google.com/go/errorreporting/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`),
expected: `goroutine 20 [running]:
cloud.google.com/go/errorreporting.TestCatchPanic()
/gopath/src/cloud.google.com/go/errorreporting/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`,
isPanic: true,
},
{
name: "function not found",
in: []byte(`goroutine 20 [running]:
runtime/debug.Stack()
/gopath/src/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errorreporting.(*Client).logInternal()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:259 +0x18b
cloud.google.com/go/errorreporting.(*Client).Catch()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:219 +0x6ed
cloud.google.com/go/errorreporting.TestCatchPanic()
/gopath/src/cloud.google.com/go/errorreporting/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`),
expected: `goroutine 20 [running]:
runtime/debug.Stack()
/gopath/src/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errorreporting.(*Client).logInternal()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:259 +0x18b
cloud.google.com/go/errorreporting.(*Client).Catch()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:219 +0x6ed
cloud.google.com/go/errorreporting.TestCatchPanic()
/gopath/src/cloud.google.com/go/errorreporting/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`,
isPanic: true,
},
{ {
name: "Report", name: "Report",
in: []byte(` goroutine 39 [running]: in: []byte(` goroutine 39 [running]:
@@ -107,12 +46,11 @@ testing.tRunner()
created by testing.(*T).Run created by testing.(*T).Run
/gopath/testing/testing.go:646 +0x2ec /gopath/testing/testing.go:646 +0x2ec
`, `,
isPanic: false,
}, },
} { } {
out := chopStack(test.in, test.isPanic) out := chopStack(test.in)
if out != test.expected { if out != test.expected {
t.Errorf("case %q: chopStack(%q, %t): got %q want %q", test.name, test.in, test.isPanic, out, test.expected) t.Errorf("case %q: chopStack(%q): got %q want %q", test.name, test.in, out, test.expected)
} }
} }
} }

View File

@@ -1,215 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errors
import (
"bytes"
"errors"
"log"
"strings"
"testing"
"cloud.google.com/go/logging"
"golang.org/x/net/context"
"google.golang.org/api/option"
)
type fakeLogger struct {
entry *logging.Entry
fail bool
}
func (c *fakeLogger) LogSync(ctx context.Context, e logging.Entry) error {
if c.fail {
return errors.New("request failed")
}
c.entry = &e
return nil
}
func (c *fakeLogger) Close() error {
return nil
}
func newTestClientUsingLogging(c *fakeLogger) *Client {
newLoggerInterface = func(ctx context.Context, project string, opts ...option.ClientOption) (loggerInterface, error) {
return c, nil
}
t, err := NewClient(context.Background(), testProjectID, "myservice", "v1.000", true)
if err != nil {
panic(err)
}
t.RepanicDefault = false
return t
}
func TestCatchNothingUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
defer func() {
e := fl.entry
if e != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx)
}
func entryMessage(e *logging.Entry) string {
return e.Payload.(map[string]interface{})["message"].(string)
}
func commonLoggingChecks(t *testing.T, e *logging.Entry, panickingFunction string) {
if e.Payload.(map[string]interface{})["serviceContext"].(map[string]string)["service"] != "myservice" {
t.Errorf("error report didn't contain service name")
}
if e.Payload.(map[string]interface{})["serviceContext"].(map[string]string)["version"] != "v1.000" {
t.Errorf("error report didn't contain version name")
}
if !strings.Contains(entryMessage(e), "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(entryMessage(e), panickingFunction) {
t.Errorf("error report didn't contain stack trace")
}
}
func TestCatchPanicUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
defer func() {
e := fl.entry
if e == nil {
t.Fatalf("got no error report, expected one")
}
commonLoggingChecks(t, e, "TestCatchPanic")
if !strings.Contains(entryMessage(e), "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchPanicNilClientUsingLogging(t *testing.T) {
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.String()
if !strings.Contains(body, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
if !strings.Contains(body, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(body, "TestCatchPanicNilClient") {
t.Errorf("error report didn't contain recovered value")
}
}()
var c *Client
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestLogFailedReportsUsingLogging(t *testing.T) {
fl := &fakeLogger{fail: true}
c := newTestClientUsingLogging(fl)
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.String()
if !strings.Contains(body, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(body, "errors.TestLogFailedReports") {
t.Errorf("error report didn't contain stack trace")
}
if !strings.Contains(body, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchNilPanicUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
defer func() {
e := fl.entry
if e == nil {
t.Fatalf("got no error report, expected one")
}
commonLoggingChecks(t, e, "TestCatchNilPanic")
if !strings.Contains(entryMessage(e), "nil") {
t.Errorf("error report didn't contain recovered value")
}
}()
b := true
defer c.Catch(ctx, WithMessage("hello, error"), PanicFlag(&b))
panic(nil)
}
func TestNotCatchNilPanicUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
defer func() {
e := fl.entry
if e != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
panic(nil)
}
func TestReportUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
c.Report(ctx, nil, "hello, ", "error")
e := fl.entry
if e == nil {
t.Fatalf("got no error report, expected one")
}
commonLoggingChecks(t, e, "TestReport")
}
func TestReportfUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2)
e := fl.entry
if e == nil {
t.Fatalf("got no error report, expected one")
}
commonLoggingChecks(t, e, "TestReportf")
if !strings.Contains(entryMessage(e), "2+2=4") {
t.Errorf("error report didn't contain formatted message")
}
}
func TestCloseUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
err := c.Close()
if err != nil {
t.Fatal(err)
}
}

View File

@@ -1,458 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package errors is a Google Stackdriver Error Reporting library.
//
// This package is still experimental and subject to change.
//
// See https://cloud.google.com/error-reporting/ for more information.
//
// To initialize a client, use the NewClient function.
//
// import "cloud.google.com/go/errors"
// ...
// errorsClient, err = errors.NewClient(ctx, projectID, "myservice", "v1.0", true)
//
// The client can recover panics in your program and report them as errors.
// To use this functionality, defer its Catch method, as you would any other
// function for recovering panics.
//
// func foo(ctx context.Context, ...) {
// defer errorsClient.Catch(ctx)
// ...
// }
//
// Catch writes an error report containing the recovered value and a stack trace
// to Stackdriver Error Reporting.
//
// There are various options you can add to the call to Catch that modify how
// panics are handled.
//
// WithMessage and WithMessagef add a custom message after the recovered value,
// using fmt.Sprint and fmt.Sprintf respectively.
//
// defer errorsClient.Catch(ctx, errors.WithMessagef("x=%d", x))
//
// WithRequest fills in various fields in the error report with information
// about an http.Request that's being handled.
//
// defer errorsClient.Catch(ctx, errors.WithRequest(httpReq))
//
// By default, after recovering a panic, Catch will panic again with the
// recovered value. You can turn off this behavior with the Repanic option.
//
// defer errorsClient.Catch(ctx, errors.Repanic(false))
//
// You can also change the default behavior for the client by changing the
// RepanicDefault field.
//
// errorsClient.RepanicDefault = false
//
// It is also possible to write an error report directly without recovering a
// panic, using Report or Reportf.
//
// if err != nil {
// errorsClient.Reportf(ctx, r, "unexpected error %v", err)
// }
//
// If you try to write an error report with a nil client, or if the client
// fails to write the report to the server, the error report is logged using
// log.Println.
//
// Deprecated: Use cloud.google.com/go/errorreporting instead.
package errors // import "cloud.google.com/go/errors"
import (
"bytes"
"fmt"
"log"
"net/http"
"runtime"
"strings"
"time"
api "cloud.google.com/go/errorreporting/apiv1beta1"
"cloud.google.com/go/internal/version"
"cloud.google.com/go/logging"
"github.com/golang/protobuf/ptypes/timestamp"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/option"
erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
)
const (
userAgent = `gcloud-golang-errorreporting/20160701`
)
type apiInterface interface {
ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, opts ...gax.CallOption) (*erpb.ReportErrorEventResponse, error)
Close() error
}
var newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) {
client, err := api.NewReportErrorsClient(ctx, opts...)
if err != nil {
return nil, err
}
client.SetGoogleClientInfo("gccl", version.Repo)
return client, nil
}
type loggerInterface interface {
LogSync(ctx context.Context, e logging.Entry) error
Close() error
}
type logger struct {
*logging.Logger
c *logging.Client
}
func (l logger) Close() error {
return l.c.Close()
}
var newLoggerInterface = func(ctx context.Context, projectID string, opts ...option.ClientOption) (loggerInterface, error) {
lc, err := logging.NewClient(ctx, projectID, opts...)
if err != nil {
return nil, fmt.Errorf("creating Logging client: %v", err)
}
l := lc.Logger("errorreports")
return logger{l, lc}, nil
}
type sender interface {
send(ctx context.Context, r *http.Request, message string)
close() error
}
// errorApiSender sends error reports using the Stackdriver Error Reporting API.
type errorApiSender struct {
apiClient apiInterface
projectID string
serviceContext erpb.ServiceContext
}
// loggingSender sends error reports using the Stackdriver Logging API.
type loggingSender struct {
logger loggerInterface
projectID string
serviceContext map[string]string
}
// Client represents a Google Cloud Error Reporting client.
type Client struct {
sender
// RepanicDefault determines whether Catch will re-panic after recovering a
// panic. This behavior can be overridden for an individual call to Catch using
// the Repanic option.
RepanicDefault bool
}
// NewClient returns a new error reporting client. Generally you will want
// to create a client on program initialization and use it through the lifetime
// of the process.
//
// The service name and version string identify the running program, and are
// included in error reports. The version string can be left empty.
//
// Set useLogging to report errors also using Stackdriver Logging,
// which will result in errors appearing in both the logs and the error
// dashboard. This is useful if you are already a user of Stackdriver Logging.
func NewClient(ctx context.Context, projectID, serviceName, serviceVersion string, useLogging bool, opts ...option.ClientOption) (*Client, error) {
if useLogging {
l, err := newLoggerInterface(ctx, projectID, opts...)
if err != nil {
return nil, fmt.Errorf("creating Logging client: %v", err)
}
sender := &loggingSender{
logger: l,
projectID: projectID,
serviceContext: map[string]string{
"service": serviceName,
},
}
if serviceVersion != "" {
sender.serviceContext["version"] = serviceVersion
}
c := &Client{
sender: sender,
RepanicDefault: true,
}
return c, nil
} else {
a, err := newApiInterface(ctx, opts...)
if err != nil {
return nil, fmt.Errorf("creating Error Reporting client: %v", err)
}
c := &Client{
sender: &errorApiSender{
apiClient: a,
projectID: "projects/" + projectID,
serviceContext: erpb.ServiceContext{
Service: serviceName,
Version: serviceVersion,
},
},
RepanicDefault: true,
}
return c, nil
}
}
// Close closes any resources held by the client.
// Close should be called when the client is no longer needed.
// It need not be called at program exit.
func (c *Client) Close() error {
err := c.sender.close()
c.sender = nil
return err
}
// An Option is an optional argument to Catch.
type Option interface {
isOption()
}
// PanicFlag returns an Option that can inform Catch that a panic has occurred.
// If *p is true when Catch is called, an error report is made even if recover
// returns nil. This allows Catch to report an error for panic(nil).
// If p is nil, the option is ignored.
//
// Here is an example of how to use PanicFlag:
//
// func foo(ctx context.Context, ...) {
// hasPanicked := true
// defer errorsClient.Catch(ctx, errors.PanicFlag(&hasPanicked))
// ...
// ...
// // We have reached the end of the function, so we're not panicking.
// hasPanicked = false
// }
func PanicFlag(p *bool) Option { return panicFlag{p} }
type panicFlag struct {
*bool
}
func (h panicFlag) isOption() {}
// Repanic returns an Option that determines whether Catch will re-panic after
// it reports an error. This overrides the default in the client.
func Repanic(r bool) Option { return repanic(r) }
type repanic bool
func (r repanic) isOption() {}
// WithRequest returns an Option that informs Catch or Report of an http.Request
// that is being handled. Information from the Request is included in the error
// report, if one is made.
func WithRequest(r *http.Request) Option { return withRequest{r} }
type withRequest struct {
*http.Request
}
func (w withRequest) isOption() {}
// WithMessage returns an Option that sets a message to be included in the error
// report, if one is made. v is converted to a string with fmt.Sprint.
func WithMessage(v ...interface{}) Option { return message(v) }
type message []interface{}
func (m message) isOption() {}
// WithMessagef returns an Option that sets a message to be included in the error
// report, if one is made. format and v are converted to a string with fmt.Sprintf.
func WithMessagef(format string, v ...interface{}) Option { return messagef{format, v} }
type messagef struct {
format string
v []interface{}
}
func (m messagef) isOption() {}
// Catch tries to recover a panic; if it succeeds, it writes an error report.
// It should be called by deferring it, like any other function for recovering
// panics.
//
// Catch can be called concurrently with other calls to Catch, Report or Reportf.
func (c *Client) Catch(ctx context.Context, opt ...Option) {
panicked := false
for _, o := range opt {
switch o := o.(type) {
case panicFlag:
panicked = panicked || o.bool != nil && *o.bool
}
}
x := recover()
if x == nil && !panicked {
return
}
var (
r *http.Request
shouldRepanic = true
messages = []string{fmt.Sprint(x)}
)
if c != nil {
shouldRepanic = c.RepanicDefault
}
for _, o := range opt {
switch o := o.(type) {
case repanic:
shouldRepanic = bool(o)
case withRequest:
r = o.Request
case message:
messages = append(messages, fmt.Sprint(o...))
case messagef:
messages = append(messages, fmt.Sprintf(o.format, o.v...))
}
}
c.logInternal(ctx, r, true, strings.Join(messages, " "))
if shouldRepanic {
panic(x)
}
}
// Report writes an error report unconditionally, instead of only when a panic
// occurs.
// If r is non-nil, information from the Request is included in the error report.
//
// Report can be called concurrently with other calls to Catch, Report or Reportf.
func (c *Client) Report(ctx context.Context, r *http.Request, v ...interface{}) {
c.logInternal(ctx, r, false, fmt.Sprint(v...))
}
// Reportf writes an error report unconditionally, instead of only when a panic
// occurs.
// If r is non-nil, information from the Request is included in the error report.
//
// Reportf can be called concurrently with other calls to Catch, Report or Reportf.
func (c *Client) Reportf(ctx context.Context, r *http.Request, format string, v ...interface{}) {
c.logInternal(ctx, r, false, fmt.Sprintf(format, v...))
}
func (c *Client) logInternal(ctx context.Context, r *http.Request, isPanic bool, msg string) {
// limit the stack trace to 16k.
var buf [16384]byte
stack := buf[0:runtime.Stack(buf[:], false)]
message := msg + "\n" + chopStack(stack, isPanic)
if c == nil {
log.Println("Error report used nil client:", message)
return
}
c.send(ctx, r, message)
}
func (s *loggingSender) send(ctx context.Context, r *http.Request, message string) {
payload := map[string]interface{}{
"eventTime": time.Now().In(time.UTC).Format(time.RFC3339Nano),
"message": message,
"serviceContext": s.serviceContext,
}
if r != nil {
payload["context"] = map[string]interface{}{
"httpRequest": map[string]interface{}{
"method": r.Method,
"url": r.Host + r.RequestURI,
"userAgent": r.UserAgent(),
"referrer": r.Referer(),
"remoteIp": r.RemoteAddr,
},
}
}
e := logging.Entry{
Severity: logging.Error,
Payload: payload,
}
err := s.logger.LogSync(ctx, e)
if err != nil {
log.Println("Error writing error report:", err, "report:", payload)
}
}
func (s *loggingSender) close() error {
return s.logger.Close()
}
func (s *errorApiSender) send(ctx context.Context, r *http.Request, message string) {
time := time.Now()
var errorContext *erpb.ErrorContext
if r != nil {
errorContext = &erpb.ErrorContext{
HttpRequest: &erpb.HttpRequestContext{
Method: r.Method,
Url: r.Host + r.RequestURI,
UserAgent: r.UserAgent(),
Referrer: r.Referer(),
RemoteIp: r.RemoteAddr,
},
}
}
req := erpb.ReportErrorEventRequest{
ProjectName: s.projectID,
Event: &erpb.ReportedErrorEvent{
EventTime: &timestamp.Timestamp{
Seconds: time.Unix(),
Nanos: int32(time.Nanosecond()),
},
ServiceContext: &s.serviceContext,
Message: message,
Context: errorContext,
},
}
_, err := s.apiClient.ReportErrorEvent(ctx, &req)
if err != nil {
log.Println("Error writing error report:", err, "report:", message)
}
}
func (s *errorApiSender) close() error {
return s.apiClient.Close()
}
// chopStack trims a stack trace so that the function which panics or calls
// Report is first.
func chopStack(s []byte, isPanic bool) string {
var f []byte
if isPanic {
f = []byte("panic(")
} else {
f = []byte("cloud.google.com/go/errors.(*Client).Report")
}
lfFirst := bytes.IndexByte(s, '\n')
if lfFirst == -1 {
return string(s)
}
stack := s[lfFirst:]
panicLine := bytes.Index(stack, f)
if panicLine == -1 {
return string(s)
}
stack = stack[panicLine+1:]
for i := 0; i < 2; i++ {
nextLine := bytes.IndexByte(stack, '\n')
if nextLine == -1 {
return string(s)
}
stack = stack[nextLine+1:]
}
return string(s[:lfFirst+1]) + string(stack)
}

View File

@@ -1,212 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errors
import (
"bytes"
"errors"
"log"
"strings"
"testing"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/option"
erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
)
const testProjectID = "testproject"
type fakeReportErrorsClient struct {
req *erpb.ReportErrorEventRequest
fail bool
}
func (c *fakeReportErrorsClient) ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, _ ...gax.CallOption) (*erpb.ReportErrorEventResponse, error) {
if c.fail {
return nil, errors.New("request failed")
}
c.req = req
return &erpb.ReportErrorEventResponse{}, nil
}
func (c *fakeReportErrorsClient) Close() error {
return nil
}
func newTestClient(c *fakeReportErrorsClient) *Client {
newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) {
return c, nil
}
t, err := NewClient(context.Background(), testProjectID, "myservice", "v1.000", false)
if err != nil {
panic(err)
}
t.RepanicDefault = false
return t
}
var ctx context.Context
func init() {
ctx = context.Background()
}
func TestCatchNothing(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
defer func() {
r := fc.req
if r != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx)
}
func commonChecks(t *testing.T, req *erpb.ReportErrorEventRequest, panickingFunction string) {
if req.Event.ServiceContext.Service != "myservice" {
t.Errorf("error report didn't contain service name")
}
if req.Event.ServiceContext.Version != "v1.000" {
t.Errorf("error report didn't contain version name")
}
if !strings.Contains(req.Event.Message, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(req.Event.Message, panickingFunction) {
t.Errorf("error report didn't contain stack trace")
}
}
func TestCatchPanic(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
defer func() {
r := fc.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, r, "errors.TestCatchPanic")
if !strings.Contains(r.Event.Message, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchPanicNilClient(t *testing.T) {
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.String()
if !strings.Contains(body, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
if !strings.Contains(body, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(body, "TestCatchPanicNilClient") {
t.Errorf("error report didn't contain recovered value")
}
}()
var c *Client
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestLogFailedReports(t *testing.T) {
fc := &fakeReportErrorsClient{fail: true}
c := newTestClient(fc)
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.String()
if !strings.Contains(body, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(body, "errors.TestLogFailedReports") {
t.Errorf("error report didn't contain stack trace")
}
if !strings.Contains(body, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchNilPanic(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
defer func() {
r := fc.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, r, "errors.TestCatchNilPanic")
if !strings.Contains(r.Event.Message, "nil") {
t.Errorf("error report didn't contain recovered value")
}
}()
b := true
defer c.Catch(ctx, WithMessage("hello, error"), PanicFlag(&b))
panic(nil)
}
func TestNotCatchNilPanic(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
defer func() {
r := fc.req
if r != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
panic(nil)
}
func TestReport(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
c.Report(ctx, nil, "hello, ", "error")
r := fc.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, r, "errors.TestReport")
}
func TestReportf(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2)
r := fc.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, r, "errors.TestReportf")
if !strings.Contains(r.Event.Message, "2+2=4") {
t.Errorf("error report didn't contain formatted message")
}
}

View File

@@ -1,118 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errors
import "testing"
func TestChopStack(t *testing.T) {
for _, test := range []struct {
name string
in []byte
expected string
isPanic bool
}{
{
name: "Catch",
in: []byte(`goroutine 20 [running]:
runtime/debug.Stack()
/gopath/src/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errors.(*Client).logInternal()
/gopath/src/cloud.google.com/go/errors/errors.go:259 +0x18b
cloud.google.com/go/errors.(*Client).Catch()
/gopath/src/cloud.google.com/go/errors/errors.go:219 +0x6ed
panic()
/gopath/src/runtime/panic.go:458 +0x243
cloud.google.com/go/errors_test.TestCatchPanic()
/gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`),
expected: `goroutine 20 [running]:
cloud.google.com/go/errors_test.TestCatchPanic()
/gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`,
isPanic: true,
},
{
name: "function not found",
in: []byte(`goroutine 20 [running]:
runtime/debug.Stack()
/gopath/src/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errors.(*Client).logInternal()
/gopath/src/cloud.google.com/go/errors/errors.go:259 +0x18b
cloud.google.com/go/errors.(*Client).Catch()
/gopath/src/cloud.google.com/go/errors/errors.go:219 +0x6ed
cloud.google.com/go/errors_test.TestCatchPanic()
/gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`),
expected: `goroutine 20 [running]:
runtime/debug.Stack()
/gopath/src/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errors.(*Client).logInternal()
/gopath/src/cloud.google.com/go/errors/errors.go:259 +0x18b
cloud.google.com/go/errors.(*Client).Catch()
/gopath/src/cloud.google.com/go/errors/errors.go:219 +0x6ed
cloud.google.com/go/errors_test.TestCatchPanic()
/gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`,
isPanic: true,
},
{
name: "Report",
in: []byte(` goroutine 39 [running]:
runtime/debug.Stack()
/gopath/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errors.(*Client).logInternal()
/gopath/cloud.google.com/go/errors/errors.go:259 +0x18b
cloud.google.com/go/errors.(*Client).Report()
/gopath/cloud.google.com/go/errors/errors.go:248 +0x4ed
cloud.google.com/go/errors_test.TestReport()
/gopath/cloud.google.com/go/errors/errors_test.go:137 +0x2a1
testing.tRunner()
/gopath/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/testing/testing.go:646 +0x2ec
`),
expected: ` goroutine 39 [running]:
cloud.google.com/go/errors_test.TestReport()
/gopath/cloud.google.com/go/errors/errors_test.go:137 +0x2a1
testing.tRunner()
/gopath/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/testing/testing.go:646 +0x2ec
`,
isPanic: false,
},
} {
out := chopStack(test.in, test.isPanic)
if out != test.expected {
t.Errorf("case %q: chopStack(%q, %t): got %q want %q", test.name, test.in, test.isPanic, out, test.expected)
}
}
}

48
vendor/cloud.google.com/go/firestore/apiv1beta1/doc.go generated vendored Normal file
View File

@@ -0,0 +1,48 @@
// Copyright 2017, Google LLC All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
// Package firestore is an auto-generated package for the
// Google Cloud Firestore API.
//
// NOTE: This package is in beta. It is not stable, and may be subject to changes.
//
//
// Use the client at cloud.google.com/go/firestore in preference to this.
package firestore // import "cloud.google.com/go/firestore/apiv1beta1"
import (
"golang.org/x/net/context"
"google.golang.org/grpc/metadata"
)
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/datastore",
}
}

View File

@@ -0,0 +1,544 @@
// Copyright 2017, Google LLC All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package firestore
import (
"math"
"time"
"cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/transport"
firestorepb "google.golang.org/genproto/googleapis/firestore/v1beta1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// CallOptions contains the retry settings for each method of Client.
type CallOptions struct {
GetDocument []gax.CallOption
ListDocuments []gax.CallOption
CreateDocument []gax.CallOption
UpdateDocument []gax.CallOption
DeleteDocument []gax.CallOption
BatchGetDocuments []gax.CallOption
BeginTransaction []gax.CallOption
Commit []gax.CallOption
Rollback []gax.CallOption
RunQuery []gax.CallOption
Write []gax.CallOption
Listen []gax.CallOption
ListCollectionIds []gax.CallOption
}
func defaultClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("firestore.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultCallOptions() *CallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
{"streaming", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &CallOptions{
GetDocument: retry[[2]string{"default", "idempotent"}],
ListDocuments: retry[[2]string{"default", "idempotent"}],
CreateDocument: retry[[2]string{"default", "non_idempotent"}],
UpdateDocument: retry[[2]string{"default", "non_idempotent"}],
DeleteDocument: retry[[2]string{"default", "idempotent"}],
BatchGetDocuments: retry[[2]string{"streaming", "idempotent"}],
BeginTransaction: retry[[2]string{"default", "idempotent"}],
Commit: retry[[2]string{"default", "non_idempotent"}],
Rollback: retry[[2]string{"default", "idempotent"}],
RunQuery: retry[[2]string{"default", "idempotent"}],
Write: retry[[2]string{"streaming", "non_idempotent"}],
Listen: retry[[2]string{"streaming", "idempotent"}],
ListCollectionIds: retry[[2]string{"default", "idempotent"}],
}
}
// Client is a client for interacting with Google Cloud Firestore API.
type Client struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
client firestorepb.FirestoreClient
// The call options for this service.
CallOptions *CallOptions
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewClient creates a new firestore client.
//
// The Cloud Firestore service.
//
// This service exposes several types of comparable timestamps:
//
// create_time - The time at which a document was created. Changes only
// when a document is deleted, then re-created. Increases in a strict
// monotonic fashion.
//
// update_time - The time at which a document was last updated. Changes
// every time a document is modified. Does not change when a write results
// in no modifications. Increases in a strict monotonic fashion.
//
// read_time - The time at which a particular state was observed. Used
// to denote a consistent snapshot of the database or the time at which a
// Document was observed to not exist.
//
// commit_time - The time at which the writes in a transaction were
// committed. Any read with an equal or greater read_time is guaranteed
// to see the effects of the transaction.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &Client{
conn: conn,
CallOptions: defaultCallOptions(),
client: firestorepb.NewFirestoreClient(conn),
}
c.SetGoogleClientInfo()
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *Client) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *Client) Close() error {
return c.conn.Close()
}
// SetGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *Client) SetGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// DatabaseRootPath returns the path for the database root resource.
func DatabaseRootPath(project, database string) string {
return "" +
"projects/" +
project +
"/databases/" +
database +
""
}
// DocumentRootPath returns the path for the document root resource.
func DocumentRootPath(project, database string) string {
return "" +
"projects/" +
project +
"/databases/" +
database +
"/documents" +
""
}
// DocumentPathPath returns the path for the document path resource.
func DocumentPathPath(project, database, documentPath string) string {
return "" +
"projects/" +
project +
"/databases/" +
database +
"/documents/" +
documentPath +
""
}
// AnyPathPath returns the path for the any path resource.
func AnyPathPath(project, database, document, anyPath string) string {
return "" +
"projects/" +
project +
"/databases/" +
database +
"/documents/" +
document +
"/" +
anyPath +
""
}
// GetDocument gets a single document.
func (c *Client) GetDocument(ctx context.Context, req *firestorepb.GetDocumentRequest, opts ...gax.CallOption) (*firestorepb.Document, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetDocument[0:len(c.CallOptions.GetDocument):len(c.CallOptions.GetDocument)], opts...)
var resp *firestorepb.Document
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetDocument(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListDocuments lists documents.
func (c *Client) ListDocuments(ctx context.Context, req *firestorepb.ListDocumentsRequest, opts ...gax.CallOption) *DocumentIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListDocuments[0:len(c.CallOptions.ListDocuments):len(c.CallOptions.ListDocuments)], opts...)
it := &DocumentIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*firestorepb.Document, string, error) {
var resp *firestorepb.ListDocumentsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListDocuments(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.Documents, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// CreateDocument creates a new document.
func (c *Client) CreateDocument(ctx context.Context, req *firestorepb.CreateDocumentRequest, opts ...gax.CallOption) (*firestorepb.Document, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CreateDocument[0:len(c.CallOptions.CreateDocument):len(c.CallOptions.CreateDocument)], opts...)
var resp *firestorepb.Document
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.CreateDocument(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// UpdateDocument updates or inserts a document.
func (c *Client) UpdateDocument(ctx context.Context, req *firestorepb.UpdateDocumentRequest, opts ...gax.CallOption) (*firestorepb.Document, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.UpdateDocument[0:len(c.CallOptions.UpdateDocument):len(c.CallOptions.UpdateDocument)], opts...)
var resp *firestorepb.Document
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.UpdateDocument(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeleteDocument deletes a document.
func (c *Client) DeleteDocument(ctx context.Context, req *firestorepb.DeleteDocumentRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteDocument[0:len(c.CallOptions.DeleteDocument):len(c.CallOptions.DeleteDocument)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.DeleteDocument(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// BatchGetDocuments gets multiple documents.
//
// Documents returned by this method are not guaranteed to be returned in the
// same order that they were requested.
func (c *Client) BatchGetDocuments(ctx context.Context, req *firestorepb.BatchGetDocumentsRequest, opts ...gax.CallOption) (firestorepb.Firestore_BatchGetDocumentsClient, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.BatchGetDocuments[0:len(c.CallOptions.BatchGetDocuments):len(c.CallOptions.BatchGetDocuments)], opts...)
var resp firestorepb.Firestore_BatchGetDocumentsClient
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.BatchGetDocuments(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// BeginTransaction starts a new transaction.
func (c *Client) BeginTransaction(ctx context.Context, req *firestorepb.BeginTransactionRequest, opts ...gax.CallOption) (*firestorepb.BeginTransactionResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.BeginTransaction[0:len(c.CallOptions.BeginTransaction):len(c.CallOptions.BeginTransaction)], opts...)
var resp *firestorepb.BeginTransactionResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.BeginTransaction(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// Commit commits a transaction, while optionally updating documents.
func (c *Client) Commit(ctx context.Context, req *firestorepb.CommitRequest, opts ...gax.CallOption) (*firestorepb.CommitResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.Commit[0:len(c.CallOptions.Commit):len(c.CallOptions.Commit)], opts...)
var resp *firestorepb.CommitResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.Commit(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// Rollback rolls back a transaction.
func (c *Client) Rollback(ctx context.Context, req *firestorepb.RollbackRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.Rollback[0:len(c.CallOptions.Rollback):len(c.CallOptions.Rollback)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.Rollback(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// RunQuery runs a query.
func (c *Client) RunQuery(ctx context.Context, req *firestorepb.RunQueryRequest, opts ...gax.CallOption) (firestorepb.Firestore_RunQueryClient, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.RunQuery[0:len(c.CallOptions.RunQuery):len(c.CallOptions.RunQuery)], opts...)
var resp firestorepb.Firestore_RunQueryClient
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.RunQuery(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// Write streams batches of document updates and deletes, in order.
func (c *Client) Write(ctx context.Context, opts ...gax.CallOption) (firestorepb.Firestore_WriteClient, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.Write[0:len(c.CallOptions.Write):len(c.CallOptions.Write)], opts...)
var resp firestorepb.Firestore_WriteClient
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.Write(ctx, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// Listen listens to changes.
func (c *Client) Listen(ctx context.Context, opts ...gax.CallOption) (firestorepb.Firestore_ListenClient, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.Listen[0:len(c.CallOptions.Listen):len(c.CallOptions.Listen)], opts...)
var resp firestorepb.Firestore_ListenClient
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.Listen(ctx, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListCollectionIds lists all the collection IDs underneath a document.
func (c *Client) ListCollectionIds(ctx context.Context, req *firestorepb.ListCollectionIdsRequest, opts ...gax.CallOption) *StringIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListCollectionIds[0:len(c.CallOptions.ListCollectionIds):len(c.CallOptions.ListCollectionIds)], opts...)
it := &StringIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) {
var resp *firestorepb.ListCollectionIdsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListCollectionIds(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.CollectionIds, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// DocumentIterator manages a stream of *firestorepb.Document.
type DocumentIterator struct {
items []*firestorepb.Document
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*firestorepb.Document, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *DocumentIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *DocumentIterator) Next() (*firestorepb.Document, error) {
var item *firestorepb.Document
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *DocumentIterator) bufLen() int {
return len(it.items)
}
func (it *DocumentIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
// StringIterator manages a stream of string.
type StringIterator struct {
items []string
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []string, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *StringIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *StringIterator) Next() (string, error) {
var item string
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *StringIterator) bufLen() int {
return len(it.items)
}
func (it *StringIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}

Some files were not shown because too many files have changed in this diff Show More