1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-31 15:43:53 +00:00

vendor: update all dependencies to latest versions

This commit is contained in:
Nick Craig-Wood
2017-09-30 15:27:27 +01:00
parent 911d121bb9
commit b017fcfe9a
3048 changed files with 537057 additions and 189681 deletions

View File

@@ -660,6 +660,36 @@ func (s *DatasetReference) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
type EncryptionConfiguration struct {
// KmsKeyName: [Optional] Describes the Cloud KMS encryption key that
// will be used to protect destination BigQuery table. The BigQuery
// Service Account associated with your project requires access to this
// encryption key.
KmsKeyName string `json:"kmsKeyName,omitempty"`
// ForceSendFields is a list of field names (e.g. "KmsKeyName") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "KmsKeyName") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *EncryptionConfiguration) MarshalJSON() ([]byte, error) {
type noMethod EncryptionConfiguration
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
type ErrorProto struct {
// DebugInfo: Debugging information. This property is internal to Google
// and should not be used.
@@ -698,6 +728,14 @@ func (s *ErrorProto) MarshalJSON() ([]byte, error) {
}
type ExplainQueryStage struct {
// ComputeMsAvg: Milliseconds the average shard spent on CPU-bound
// tasks.
ComputeMsAvg int64 `json:"computeMsAvg,omitempty,string"`
// ComputeMsMax: Milliseconds the slowest shard spent on CPU-bound
// tasks.
ComputeMsMax int64 `json:"computeMsMax,omitempty,string"`
// ComputeRatioAvg: Relative amount of time the average shard spent on
// CPU-bound tasks.
ComputeRatioAvg float64 `json:"computeRatioAvg,omitempty"`
@@ -712,6 +750,12 @@ type ExplainQueryStage struct {
// Name: Human-readable name for stage.
Name string `json:"name,omitempty"`
// ReadMsAvg: Milliseconds the average shard spent reading input.
ReadMsAvg int64 `json:"readMsAvg,omitempty,string"`
// ReadMsMax: Milliseconds the slowest shard spent reading input.
ReadMsMax int64 `json:"readMsMax,omitempty,string"`
// ReadRatioAvg: Relative amount of time the average shard spent reading
// input.
ReadRatioAvg float64 `json:"readRatioAvg,omitempty"`
@@ -726,6 +770,13 @@ type ExplainQueryStage struct {
// RecordsWritten: Number of records written by the stage.
RecordsWritten int64 `json:"recordsWritten,omitempty,string"`
// ShuffleOutputBytes: Total number of bytes written to shuffle.
ShuffleOutputBytes int64 `json:"shuffleOutputBytes,omitempty,string"`
// ShuffleOutputBytesSpilled: Total number of bytes written to shuffle
// and spilled to disk.
ShuffleOutputBytesSpilled int64 `json:"shuffleOutputBytesSpilled,omitempty,string"`
// Status: Current status for the stage.
Status string `json:"status,omitempty"`
@@ -733,6 +784,14 @@ type ExplainQueryStage struct {
// (approximately chronological).
Steps []*ExplainQueryStep `json:"steps,omitempty"`
// WaitMsAvg: Milliseconds the average shard spent waiting to be
// scheduled.
WaitMsAvg int64 `json:"waitMsAvg,omitempty,string"`
// WaitMsMax: Milliseconds the slowest shard spent waiting to be
// scheduled.
WaitMsMax int64 `json:"waitMsMax,omitempty,string"`
// WaitRatioAvg: Relative amount of time the average shard spent waiting
// to be scheduled.
WaitRatioAvg float64 `json:"waitRatioAvg,omitempty"`
@@ -741,6 +800,12 @@ type ExplainQueryStage struct {
// to be scheduled.
WaitRatioMax float64 `json:"waitRatioMax,omitempty"`
// WriteMsAvg: Milliseconds the average shard spent on writing output.
WriteMsAvg int64 `json:"writeMsAvg,omitempty,string"`
// WriteMsMax: Milliseconds the slowest shard spent on writing output.
WriteMsMax int64 `json:"writeMsMax,omitempty,string"`
// WriteRatioAvg: Relative amount of time the average shard spent on
// writing output.
WriteRatioAvg float64 `json:"writeRatioAvg,omitempty"`
@@ -749,7 +814,7 @@ type ExplainQueryStage struct {
// writing output.
WriteRatioMax float64 `json:"writeRatioMax,omitempty"`
// ForceSendFields is a list of field names (e.g. "ComputeRatioAvg") to
// ForceSendFields is a list of field names (e.g. "ComputeMsAvg") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
@@ -757,13 +822,12 @@ type ExplainQueryStage struct {
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ComputeRatioAvg") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
// NullFields is a list of field names (e.g. "ComputeMsAvg") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
@@ -894,8 +958,8 @@ type ExternalDataConfiguration struct {
// data sources. For Google Cloud Bigtable URIs: Exactly one URI can be
// specified and it has be a fully specified and valid HTTPS URL for a
// Google Cloud Bigtable table. For Google Cloud Datastore backups,
// exactly one URI can be specified, and it must end with
// '.backup_info'. Also, the '*' wildcard character is not allowed.
// exactly one URI can be specified. Also, the '*' wildcard character is
// not allowed.
SourceUris []string `json:"sourceUris,omitempty"`
// ForceSendFields is a list of field names (e.g. "Autodetect") to
@@ -1004,6 +1068,40 @@ func (s *GetQueryResultsResponse) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
type GetServiceAccountResponse struct {
// Email: The service account email address.
Email string `json:"email,omitempty"`
// Kind: The resource type of the response.
Kind string `json:"kind,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Email") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Email") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GetServiceAccountResponse) MarshalJSON() ([]byte, error) {
type noMethod GetServiceAccountResponse
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
type GoogleSheetsOptions struct {
// SkipLeadingRows: [Optional] The number of rows at the top of a sheet
// that BigQuery will skip when reading the data. The default value is
@@ -1271,6 +1369,10 @@ type JobConfigurationLoad struct {
// one atomic update upon job completion.
CreateDisposition string `json:"createDisposition,omitempty"`
// DestinationEncryptionConfiguration: [Experimental] Custom encryption
// configuration (e.g., Cloud KMS keys).
DestinationEncryptionConfiguration *EncryptionConfiguration `json:"destinationEncryptionConfiguration,omitempty"`
// DestinationTable: [Required] The destination table to load the data
// into.
DestinationTable *TableReference `json:"destinationTable,omitempty"`
@@ -1350,15 +1452,16 @@ type JobConfigurationLoad struct {
SchemaInlineFormat string `json:"schemaInlineFormat,omitempty"`
// SchemaUpdateOptions: [Experimental] Allows the schema of the
// desitination table to be updated as a side effect of the load job.
// Schema update options are supported in two cases: when
// writeDisposition is WRITE_APPEND; when writeDisposition is
// WRITE_TRUNCATE and the destination table is a partition of a table,
// specified by partition decorators. For normal tables, WRITE_TRUNCATE
// will always overwrite the schema. One or more of the following values
// are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to
// the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field
// in the original schema to nullable.
// desitination table to be updated as a side effect of the load job if
// a schema is autodetected or supplied in the job configuration. Schema
// update options are supported in two cases: when writeDisposition is
// WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the
// destination table is a partition of a table, specified by partition
// decorators. For normal tables, WRITE_TRUNCATE will always overwrite
// the schema. One or more of the following values are specified:
// ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema.
// ALLOW_FIELD_RELAXATION: allow relaxing a required field in the
// original schema to nullable.
SchemaUpdateOptions []string `json:"schemaUpdateOptions,omitempty"`
// SkipLeadingRows: [Optional] The number of rows at the top of a CSV
@@ -1374,10 +1477,20 @@ type JobConfigurationLoad struct {
SourceFormat string `json:"sourceFormat,omitempty"`
// SourceUris: [Required] The fully-qualified URIs that point to your
// data in Google Cloud Storage. Each URI can contain one '*' wildcard
// character and it must come after the 'bucket' name.
// data in Google Cloud. For Google Cloud Storage URIs: Each URI can
// contain one '*' wildcard character and it must come after the
// 'bucket' name. Size limits related to load jobs apply to external
// data sources. For Google Cloud Bigtable URIs: Exactly one URI can be
// specified and it has be a fully specified and valid HTTPS URL for a
// Google Cloud Bigtable table. For Google Cloud Datastore backups:
// Exactly one URI can be specified. Also, the '*' wildcard character is
// not allowed.
SourceUris []string `json:"sourceUris,omitempty"`
// TimePartitioning: [Experimental] If specified, configures time-based
// partitioning for the destination table.
TimePartitioning *TimePartitioning `json:"timePartitioning,omitempty"`
// WriteDisposition: [Optional] Specifies the action that occurs if the
// destination table already exists. The following values are supported:
// WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
@@ -1436,6 +1549,10 @@ type JobConfigurationQuery struct {
// unqualified table names in the query.
DefaultDataset *DatasetReference `json:"defaultDataset,omitempty"`
// DestinationEncryptionConfiguration: [Experimental] Custom encryption
// configuration (e.g., Cloud KMS keys).
DestinationEncryptionConfiguration *EncryptionConfiguration `json:"destinationEncryptionConfiguration,omitempty"`
// DestinationTable: [Optional] Describes the table where the query
// results should be stored. If not present, a new table will be created
// to store the results. This property must be set for large results
@@ -1503,13 +1620,16 @@ type JobConfigurationQuery struct {
// source can then be queried as if it were a standard BigQuery table.
TableDefinitions map[string]ExternalDataConfiguration `json:"tableDefinitions,omitempty"`
// TimePartitioning: [Experimental] If specified, configures time-based
// partitioning for the destination table.
TimePartitioning *TimePartitioning `json:"timePartitioning,omitempty"`
// UseLegacySql: Specifies whether to use BigQuery's legacy SQL dialect
// for this query. The default value is true. If set to false, the query
// will use BigQuery's standard SQL:
// https://cloud.google.com/bigquery/sql-reference/ When useLegacySql is
// set to false, the values of allowLargeResults and flattenResults are
// ignored; query will be run as if allowLargeResults is true and
// flattenResults is false.
// set to false, the value of flattenResults is ignored; query will be
// run as if flattenResults is false.
UseLegacySql bool `json:"useLegacySql,omitempty"`
// UseQueryCache: [Optional] Whether to look for the result in the query
@@ -1528,13 +1648,13 @@ type JobConfigurationQuery struct {
// WriteDisposition: [Optional] Specifies the action that occurs if the
// destination table already exists. The following values are supported:
// WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
// table data. WRITE_APPEND: If the table already exists, BigQuery
// appends the data to the table. WRITE_EMPTY: If the table already
// exists and contains data, a 'duplicate' error is returned in the job
// result. The default value is WRITE_EMPTY. Each action is atomic and
// only occurs if BigQuery is able to complete the job successfully.
// Creation, truncation and append actions occur as one atomic update
// upon job completion.
// table data and uses the schema from the query result. WRITE_APPEND:
// If the table already exists, BigQuery appends the data to the table.
// WRITE_EMPTY: If the table already exists and contains data, a
// 'duplicate' error is returned in the job result. The default value is
// WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is
// able to complete the job successfully. Creation, truncation and
// append actions occur as one atomic update upon job completion.
WriteDisposition string `json:"writeDisposition,omitempty"`
// ForceSendFields is a list of field names (e.g. "AllowLargeResults")
@@ -1571,6 +1691,10 @@ type JobConfigurationTableCopy struct {
// one atomic update upon job completion.
CreateDisposition string `json:"createDisposition,omitempty"`
// DestinationEncryptionConfiguration: [Experimental] Custom encryption
// configuration (e.g., Cloud KMS keys).
DestinationEncryptionConfiguration *EncryptionConfiguration `json:"destinationEncryptionConfiguration,omitempty"`
// DestinationTable: [Required] The destination table
DestinationTable *TableReference `json:"destinationTable,omitempty"`
@@ -1830,6 +1954,9 @@ type JobStatistics2 struct {
// TotalBytesProcessed: [Output-only] Total bytes processed for the job.
TotalBytesProcessed int64 `json:"totalBytesProcessed,omitempty,string"`
// TotalSlotMs: [Output-only] Slot-milliseconds for the job.
TotalSlotMs int64 `json:"totalSlotMs,omitempty,string"`
// UndeclaredQueryParameters: [Output-only, Experimental] Standard SQL
// only: list of undeclared query parameters detected during a dry run
// validation.
@@ -1859,6 +1986,13 @@ func (s *JobStatistics2) MarshalJSON() ([]byte, error) {
}
type JobStatistics3 struct {
// BadRecords: [Output-only] The number of bad records encountered. Note
// that if the job has failed because of more bad records encountered
// than the maximum allowed in the load job configuration, then this
// number can be less than the total number of bad records present in
// the input data.
BadRecords int64 `json:"badRecords,omitempty,string"`
// InputFileBytes: [Output-only] Number of bytes of source data in a
// load job.
InputFileBytes int64 `json:"inputFileBytes,omitempty,string"`
@@ -1875,7 +2009,7 @@ type JobStatistics3 struct {
// change.
OutputRows int64 `json:"outputRows,omitempty,string"`
// ForceSendFields is a list of field names (e.g. "InputFileBytes") to
// ForceSendFields is a list of field names (e.g. "BadRecords") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
@@ -1883,13 +2017,12 @@ type JobStatistics3 struct {
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "InputFileBytes") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
// NullFields is a list of field names (e.g. "BadRecords") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
@@ -2269,9 +2402,8 @@ type QueryRequest struct {
// for this query. The default value is true. If set to false, the query
// will use BigQuery's standard SQL:
// https://cloud.google.com/bigquery/sql-reference/ When useLegacySql is
// set to false, the values of allowLargeResults and flattenResults are
// ignored; query will be run as if allowLargeResults is true and
// flattenResults is false.
// set to false, the value of flattenResults is ignored; query will be
// run as if flattenResults is false.
//
// Default: true
UseLegacySql *bool `json:"useLegacySql,omitempty"`
@@ -2433,6 +2565,10 @@ type Table struct {
// Description: [Optional] A user-friendly description of this table.
Description string `json:"description,omitempty"`
// EncryptionConfiguration: [Experimental] Custom encryption
// configuration (e.g., Cloud KMS keys).
EncryptionConfiguration *EncryptionConfiguration `json:"encryptionConfiguration,omitempty"`
// Etag: [Output-only] A hash of this resource.
Etag string `json:"etag,omitempty"`
@@ -3028,8 +3164,14 @@ type TimePartitioning struct {
// storage for a partition.
ExpirationMs int64 `json:"expirationMs,omitempty,string"`
// Field: [Experimental] [Optional] If not set, the table is partitioned
// by pseudo column '_PARTITIONTIME'; if set, the table is partitioned
// by this field. The field must be a top-level TIMESTAMP or DATE field.
// Its mode must be NULLABLE or REQUIRED.
Field string `json:"field,omitempty"`
// Type: [Required] The only type supported is DAY, which will generate
// one partition per day based on data loading time.
// one partition per day.
Type string `json:"type,omitempty"`
// ForceSendFields is a list of field names (e.g. "ExpirationMs") to
@@ -4552,17 +4694,13 @@ func (c *JobsGetQueryResultsCall) Pages(ctx context.Context, f func(*GetQueryRes
// method id "bigquery.jobs.insert":
type JobsInsertCall struct {
s *Service
projectId string
job *Job
urlParams_ gensupport.URLParams
media_ io.Reader
mediaBuffer_ *gensupport.MediaBuffer
mediaType_ string
mediaSize_ int64 // mediaSize, if known. Used only for calls to progressUpdater_.
progressUpdater_ googleapi.ProgressUpdater
ctx_ context.Context
header_ http.Header
s *Service
projectId string
job *Job
urlParams_ gensupport.URLParams
mediaInfo_ *gensupport.MediaInfo
ctx_ context.Context
header_ http.Header
}
// Insert: Starts a new asynchronous job. Requires the Can View project
@@ -4583,12 +4721,7 @@ func (r *JobsService) Insert(projectId string, job *Job) *JobsInsertCall {
// supplied.
// At most one of Media and ResumableMedia may be set.
func (c *JobsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *JobsInsertCall {
opts := googleapi.ProcessMediaOptions(options)
chunkSize := opts.ChunkSize
if !opts.ForceEmptyContentType {
r, c.mediaType_ = gensupport.DetermineContentType(r, opts.ContentType)
}
c.media_, c.mediaBuffer_ = gensupport.PrepareUpload(r, chunkSize)
c.mediaInfo_ = gensupport.NewInfoFromMedia(r, options)
return c
}
@@ -4603,11 +4736,7 @@ func (c *JobsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *J
// supersede any context previously provided to the Context method.
func (c *JobsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *JobsInsertCall {
c.ctx_ = ctx
rdr := gensupport.ReaderAtToReader(r, size)
rdr, c.mediaType_ = gensupport.DetermineContentType(rdr, mediaType)
c.mediaBuffer_ = gensupport.NewMediaBuffer(rdr, googleapi.DefaultUploadChunkSize)
c.media_ = nil
c.mediaSize_ = size
c.mediaInfo_ = gensupport.NewInfoFromResumableMedia(r, size, mediaType)
return c
}
@@ -4616,7 +4745,7 @@ func (c *JobsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size
// not slow down the upload operation. This should only be called when
// using ResumableMedia (as opposed to Media).
func (c *JobsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *JobsInsertCall {
c.progressUpdater_ = pu
c.mediaInfo_.SetProgressUpdater(pu)
return c
}
@@ -4661,27 +4790,16 @@ func (c *JobsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/jobs")
if c.media_ != nil || c.mediaBuffer_ != nil {
if c.mediaInfo_ != nil {
urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1)
protocol := "multipart"
if c.mediaBuffer_ != nil {
protocol = "resumable"
}
c.urlParams_.Set("uploadType", protocol)
c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType())
}
if body == nil {
body = new(bytes.Buffer)
reqHeaders.Set("Content-Type", "application/json")
}
if c.media_ != nil {
combined, ctype := gensupport.CombineBodyMedia(body, "application/json", c.media_, c.mediaType_)
defer combined.Close()
reqHeaders.Set("Content-Type", ctype)
body = combined
}
if c.mediaBuffer_ != nil && c.mediaType_ != "" {
reqHeaders.Set("X-Upload-Content-Type", c.mediaType_)
}
body, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body)
defer cleanup()
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
req.Header = reqHeaders
@@ -4717,20 +4835,10 @@ func (c *JobsInsertCall) Do(opts ...googleapi.CallOption) (*Job, error) {
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
if c.mediaBuffer_ != nil {
loc := res.Header.Get("Location")
rx := &gensupport.ResumableUpload{
Client: c.s.client,
UserAgent: c.s.userAgent(),
URI: loc,
Media: c.mediaBuffer_,
MediaType: c.mediaType_,
Callback: func(curr int64) {
if c.progressUpdater_ != nil {
c.progressUpdater_(curr, c.mediaSize_)
}
},
}
rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location"))
if rx != nil {
rx.Client = c.s.client
rx.UserAgent = c.s.userAgent()
ctx := c.ctx_
if ctx == nil {
ctx = context.TODO()
@@ -5193,6 +5301,146 @@ func (c *JobsQueryCall) Do(opts ...googleapi.CallOption) (*QueryResponse, error)
}
// method id "bigquery.projects.getServiceAccount":
type ProjectsGetServiceAccountCall struct {
s *Service
projectId string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// GetServiceAccount: Returns the email address of the service account
// for your project used for interactions with Google Cloud KMS.
func (r *ProjectsService) GetServiceAccount(projectId string) *ProjectsGetServiceAccountCall {
c := &ProjectsGetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.projectId = projectId
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsGetServiceAccountCall) Fields(s ...googleapi.Field) *ProjectsGetServiceAccountCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsGetServiceAccountCall) IfNoneMatch(entityTag string) *ProjectsGetServiceAccountCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsGetServiceAccountCall) Context(ctx context.Context) *ProjectsGetServiceAccountCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsGetServiceAccountCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsGetServiceAccountCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/serviceAccount")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"projectId": c.projectId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "bigquery.projects.getServiceAccount" call.
// Exactly one of *GetServiceAccountResponse or error will be non-nil.
// Any non-2xx status code is an error. Response headers are in either
// *GetServiceAccountResponse.ServerResponse.Header or (if a response
// was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsGetServiceAccountCall) Do(opts ...googleapi.CallOption) (*GetServiceAccountResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &GetServiceAccountResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Returns the email address of the service account for your project used for interactions with Google Cloud KMS.",
// "httpMethod": "GET",
// "id": "bigquery.projects.getServiceAccount",
// "parameterOrder": [
// "projectId"
// ],
// "parameters": {
// "projectId": {
// "description": "Project ID for which the service account is requested.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "projects/{projectId}/serviceAccount",
// "response": {
// "$ref": "GetServiceAccountResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/bigquery",
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only"
// ]
// }
}
// method id "bigquery.projects.list":
type ProjectsListCall struct {