diff --git a/cmd/stackdriver-prometheus-sidecar/main.go b/cmd/stackdriver-prometheus-sidecar/main.go
index cc74df8c..8b3e9b5e 100644
--- a/cmd/stackdriver-prometheus-sidecar/main.go
+++ b/cmd/stackdriver-prometheus-sidecar/main.go
@@ -32,6 +32,7 @@ import (
"time"
md "cloud.google.com/go/compute/metadata"
+ oc_stackdriver "contrib.go.opencensus.io/exporter/stackdriver"
"github.com/Stackdriver/stackdriver-prometheus-sidecar/metadata"
"github.com/Stackdriver/stackdriver-prometheus-sidecar/retrieval"
"github.com/Stackdriver/stackdriver-prometheus-sidecar/stackdriver"
@@ -184,6 +185,7 @@ func main() {
filters []string
metricRenames map[string]string
staticMetadata []scrape.MetricMetadata
+ monitoringBackends []string
logLevel promlog.AllowedLevel
}{}
@@ -228,6 +230,9 @@ func main() {
a.Flag("prometheus.api-address", "Address to listen on for UI, API, and telemetry.").
Default("http://127.0.0.1:9090/").URLVar(&cfg.prometheusURL)
+ a.Flag("monitoring.backend", "Monitoring backend(s) for internal metrics").Default("prometheus").
+ EnumsVar(&cfg.monitoringBackends, "prometheus", "stackdriver")
+
a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry.").
Default("0.0.0.0:9091").StringVar(&cfg.listenAddress)
@@ -258,20 +263,38 @@ func main() {
level.Info(logger).Log("host_details", Uname())
level.Info(logger).Log("fd_limits", FdLimits())
- promExporter, err := oc_prometheus.NewExporter(oc_prometheus.Options{
- Registry: prometheus.DefaultRegisterer.(*prometheus.Registry),
- })
- if err != nil {
- level.Error(logger).Log("msg", "Creating Prometheus exporter failed", "err", err)
- os.Exit(1)
- }
- view.RegisterExporter(promExporter)
-
httpClient := &http.Client{Transport: &ochttp.Transport{}}
if *projectId == "" {
*projectId = getGCEProjectID()
}
+
+ for _, backend := range cfg.monitoringBackends {
+ switch backend {
+ case "prometheus":
+ promExporter, err := oc_prometheus.NewExporter(oc_prometheus.Options{
+ Registry: prometheus.DefaultRegisterer.(*prometheus.Registry),
+ })
+ if err != nil {
+ level.Error(logger).Log("msg", "Creating Prometheus exporter failed", "err", err)
+ os.Exit(1)
+ }
+ view.RegisterExporter(promExporter)
+ case "stackdriver":
+ sd, err := oc_stackdriver.NewExporter(oc_stackdriver.Options{ProjectID: *projectId})
+ if err != nil {
+ level.Error(logger).Log("msg", "Creating Stackdriver exporter failed", "err", err)
+ os.Exit(1)
+ }
+ defer sd.Flush()
+ view.RegisterExporter(sd)
+ view.SetReportingPeriod(60 * time.Second)
+ default:
+ level.Error(logger).Log("msg", "Unknown monitoring backend", "backend", backend)
+ os.Exit(1)
+ }
+ }
+
var staticLabels = map[string]string{
retrieval.ProjectIDLabel: *projectId,
retrieval.KubernetesLocationLabel: cfg.kubernetesLabels.location,
diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go
index 9d0660be..125b7033 100644
--- a/vendor/cloud.google.com/go/compute/metadata/metadata.go
+++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go
@@ -20,6 +20,7 @@
package metadata // import "cloud.google.com/go/compute/metadata"
import (
+ "context"
"encoding/json"
"fmt"
"io/ioutil"
@@ -31,9 +32,6 @@ import (
"strings"
"sync"
"time"
-
- "golang.org/x/net/context"
- "golang.org/x/net/context/ctxhttp"
)
const (
@@ -139,11 +137,11 @@ func testOnGCE() bool {
resc := make(chan bool, 2)
// Try two strategies in parallel.
- // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
+ // See https://github.com/googleapis/google-cloud-go/issues/194
go func() {
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
req.Header.Set("User-Agent", userAgent)
- res, err := ctxhttp.Do(ctx, defaultClient.hc, req)
+ res, err := defaultClient.hc.Do(req.WithContext(ctx))
if err != nil {
resc <- false
return
@@ -302,8 +300,8 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
// being stable anyway.
host = metadataIP
}
- url := "http://" + host + "/computeMetadata/v1/" + suffix
- req, _ := http.NewRequest("GET", url, nil)
+ u := "http://" + host + "/computeMetadata/v1/" + suffix
+ req, _ := http.NewRequest("GET", u, nil)
req.Header.Set("Metadata-Flavor", "Google")
req.Header.Set("User-Agent", userAgent)
res, err := c.hc.Do(req)
@@ -314,13 +312,13 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
- if res.StatusCode != 200 {
- return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
- }
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
+ if res.StatusCode != 200 {
+ return "", "", &Error{Code: res.StatusCode, Message: string(all)}
+ }
return string(all), res.Header.Get("Etag"), nil
}
@@ -501,3 +499,15 @@ func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) erro
}
}
}
+
+// Error contains an error response from the server.
+type Error struct {
+ // Code is the HTTP response status code.
+ Code int
+ // Message is the server response message.
+ Message string
+}
+
+func (e *Error) Error() string {
+ return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message)
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go
new file mode 100644
index 00000000..1d56e649
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go
@@ -0,0 +1,284 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by gapic-generator. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/transport"
+ monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+)
+
+// AlertPolicyCallOptions contains the retry settings for each method of AlertPolicyClient.
+type AlertPolicyCallOptions struct {
+ ListAlertPolicies []gax.CallOption
+ GetAlertPolicy []gax.CallOption
+ CreateAlertPolicy []gax.CallOption
+ DeleteAlertPolicy []gax.CallOption
+ UpdateAlertPolicy []gax.CallOption
+}
+
+func defaultAlertPolicyClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ option.WithEndpoint("monitoring.googleapis.com:443"),
+ option.WithScopes(DefaultAuthScopes()...),
+ }
+}
+
+func defaultAlertPolicyCallOptions() *AlertPolicyCallOptions {
+ retry := map[[2]string][]gax.CallOption{
+ {"default", "idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.DeadlineExceeded,
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.3,
+ })
+ }),
+ },
+ }
+ return &AlertPolicyCallOptions{
+ ListAlertPolicies: retry[[2]string{"default", "idempotent"}],
+ GetAlertPolicy: retry[[2]string{"default", "idempotent"}],
+ CreateAlertPolicy: retry[[2]string{"default", "non_idempotent"}],
+ DeleteAlertPolicy: retry[[2]string{"default", "idempotent"}],
+ UpdateAlertPolicy: retry[[2]string{"default", "non_idempotent"}],
+ }
+}
+
+// AlertPolicyClient is a client for interacting with Stackdriver Monitoring API.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type AlertPolicyClient struct {
+ // The connection to the service.
+ conn *grpc.ClientConn
+
+ // The gRPC API client.
+ alertPolicyClient monitoringpb.AlertPolicyServiceClient
+
+ // The call options for this service.
+ CallOptions *AlertPolicyCallOptions
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogMetadata metadata.MD
+}
+
+// NewAlertPolicyClient creates a new alert policy service client.
+//
+// The AlertPolicyService API is used to manage (list, create, delete,
+// edit) alert policies in Stackdriver Monitoring. An alerting policy is
+// a description of the conditions under which some aspect of your
+// system is considered to be "unhealthy" and the ways to notify
+// people or services about this state. In addition to using this API, alert
+// policies can also be managed through
+// Stackdriver Monitoring (at https://cloud.google.com/monitoring/docs/),
+// which can be reached by clicking the "Monitoring" tab in
+// Cloud Console (at https://console.cloud.google.com/).
+func NewAlertPolicyClient(ctx context.Context, opts ...option.ClientOption) (*AlertPolicyClient, error) {
+ conn, err := transport.DialGRPC(ctx, append(defaultAlertPolicyClientOptions(), opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ c := &AlertPolicyClient{
+ conn: conn,
+ CallOptions: defaultAlertPolicyCallOptions(),
+
+ alertPolicyClient: monitoringpb.NewAlertPolicyServiceClient(conn),
+ }
+ c.setGoogleClientInfo()
+ return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *AlertPolicyClient) Connection() *grpc.ClientConn {
+ return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *AlertPolicyClient) Close() error {
+ return c.conn.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *AlertPolicyClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", versionGo()}, keyval...)
+ kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// ListAlertPolicies lists the existing alerting policies for the project.
+func (c *AlertPolicyClient) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest, opts ...gax.CallOption) *AlertPolicyIterator {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.ListAlertPolicies[0:len(c.CallOptions.ListAlertPolicies):len(c.CallOptions.ListAlertPolicies)], opts...)
+ it := &AlertPolicyIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListAlertPoliciesRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.AlertPolicy, string, error) {
+ var resp *monitoringpb.ListAlertPoliciesResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.alertPolicyClient.ListAlertPolicies(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.AlertPolicies, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ return it
+}
+
+// GetAlertPolicy gets a single alerting policy.
+func (c *AlertPolicyClient) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.GetAlertPolicy[0:len(c.CallOptions.GetAlertPolicy):len(c.CallOptions.GetAlertPolicy)], opts...)
+ var resp *monitoringpb.AlertPolicy
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.alertPolicyClient.GetAlertPolicy(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// CreateAlertPolicy creates a new alerting policy.
+func (c *AlertPolicyClient) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.CreateAlertPolicy[0:len(c.CallOptions.CreateAlertPolicy):len(c.CallOptions.CreateAlertPolicy)], opts...)
+ var resp *monitoringpb.AlertPolicy
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.alertPolicyClient.CreateAlertPolicy(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// DeleteAlertPolicy deletes an alerting policy.
+func (c *AlertPolicyClient) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest, opts ...gax.CallOption) error {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.DeleteAlertPolicy[0:len(c.CallOptions.DeleteAlertPolicy):len(c.CallOptions.DeleteAlertPolicy)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.alertPolicyClient.DeleteAlertPolicy(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+// UpdateAlertPolicy updates an alerting policy. You can either replace the entire policy with
+// a new one or replace only certain fields in the current alerting policy by
+// specifying the fields to be updated via updateMask. Returns the
+// updated alerting policy.
+func (c *AlertPolicyClient) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "alert_policy.name", req.GetAlertPolicy().GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.UpdateAlertPolicy[0:len(c.CallOptions.UpdateAlertPolicy):len(c.CallOptions.UpdateAlertPolicy)], opts...)
+ var resp *monitoringpb.AlertPolicy
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.alertPolicyClient.UpdateAlertPolicy(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// AlertPolicyIterator manages a stream of *monitoringpb.AlertPolicy.
+type AlertPolicyIterator struct {
+ items []*monitoringpb.AlertPolicy
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.AlertPolicy, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *AlertPolicyIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *AlertPolicyIterator) Next() (*monitoringpb.AlertPolicy, error) {
+ var item *monitoringpb.AlertPolicy
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *AlertPolicyIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *AlertPolicyIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/doc.go b/vendor/cloud.google.com/go/monitoring/apiv3/doc.go
new file mode 100644
index 00000000..692fc29c
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/doc.go
@@ -0,0 +1,105 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by gapic-generator. DO NOT EDIT.
+
+// Package monitoring is an auto-generated package for the
+// Stackdriver Monitoring API.
+//
+// NOTE: This package is in alpha. It is not stable, and is likely to change.
+//
+// Manages your Stackdriver Monitoring data and configurations. Most projects
+// must be associated with a Stackdriver account, with a few exceptions as
+// noted on the individual method pages.
+//
+// Use of Context
+//
+// The ctx passed to NewClient is used for authentication requests and
+// for creating the underlying connection, but is not used for subsequent calls.
+// Individual methods on the client use the ctx given to them.
+//
+// To close the open connection, use the Close() method.
+//
+// For information about setting deadlines, reusing contexts, and more
+// please visit godoc.org/cloud.google.com/go.
+package monitoring // import "cloud.google.com/go/monitoring/apiv3"
+
+import (
+ "context"
+ "runtime"
+ "strings"
+ "unicode"
+
+ "google.golang.org/grpc/metadata"
+)
+
+func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
+ out, _ := metadata.FromOutgoingContext(ctx)
+ out = out.Copy()
+ for _, md := range mds {
+ for k, v := range md {
+ out[k] = append(out[k], v...)
+ }
+ }
+ return metadata.NewOutgoingContext(ctx, out)
+}
+
+// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
+func DefaultAuthScopes() []string {
+ return []string{
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/monitoring",
+ "https://www.googleapis.com/auth/monitoring.read",
+ "https://www.googleapis.com/auth/monitoring.write",
+ }
+}
+
+// versionGo returns the Go runtime version. The returned string
+// has no whitespace, suitable for reporting in header.
+func versionGo() string {
+ const develPrefix = "devel +"
+
+ s := runtime.Version()
+ if strings.HasPrefix(s, develPrefix) {
+ s = s[len(develPrefix):]
+ if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
+ s = s[:p]
+ }
+ return s
+ }
+
+ notSemverRune := func(r rune) bool {
+ return strings.IndexRune("0123456789.", r) < 0
+ }
+
+ if strings.HasPrefix(s, "go1") {
+ s = s[2:]
+ var prerelease string
+ if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
+ s, prerelease = s[:p], s[p:]
+ }
+ if strings.HasSuffix(s, ".") {
+ s += "0"
+ } else if strings.Count(s, ".") < 2 {
+ s += ".0"
+ }
+ if prerelease != "" {
+ s += "-" + prerelease
+ }
+ return s
+ }
+ return "UNKNOWN"
+}
+
+const versionClient = "20190404"
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go
new file mode 100644
index 00000000..84f4ec6e
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go
@@ -0,0 +1,368 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by gapic-generator. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/transport"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+ monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+)
+
+// GroupCallOptions contains the retry settings for each method of GroupClient.
+type GroupCallOptions struct {
+ ListGroups []gax.CallOption
+ GetGroup []gax.CallOption
+ CreateGroup []gax.CallOption
+ UpdateGroup []gax.CallOption
+ DeleteGroup []gax.CallOption
+ ListGroupMembers []gax.CallOption
+}
+
+func defaultGroupClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ option.WithEndpoint("monitoring.googleapis.com:443"),
+ option.WithScopes(DefaultAuthScopes()...),
+ }
+}
+
+func defaultGroupCallOptions() *GroupCallOptions {
+ retry := map[[2]string][]gax.CallOption{
+ {"default", "idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.DeadlineExceeded,
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.3,
+ })
+ }),
+ },
+ }
+ return &GroupCallOptions{
+ ListGroups: retry[[2]string{"default", "idempotent"}],
+ GetGroup: retry[[2]string{"default", "idempotent"}],
+ CreateGroup: retry[[2]string{"default", "non_idempotent"}],
+ UpdateGroup: retry[[2]string{"default", "idempotent"}],
+ DeleteGroup: retry[[2]string{"default", "idempotent"}],
+ ListGroupMembers: retry[[2]string{"default", "idempotent"}],
+ }
+}
+
+// GroupClient is a client for interacting with Stackdriver Monitoring API.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type GroupClient struct {
+ // The connection to the service.
+ conn *grpc.ClientConn
+
+ // The gRPC API client.
+ groupClient monitoringpb.GroupServiceClient
+
+ // The call options for this service.
+ CallOptions *GroupCallOptions
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogMetadata metadata.MD
+}
+
+// NewGroupClient creates a new group service client.
+//
+// The Group API lets you inspect and manage your
+// groups (at #google.monitoring.v3.Group).
+//
+// A group is a named filter that is used to identify
+// a collection of monitored resources. Groups are typically used to
+// mirror the physical and/or logical topology of the environment.
+// Because group membership is computed dynamically, monitored
+// resources that are started in the future are automatically placed
+// in matching groups. By using a group to name monitored resources in,
+// for example, an alert policy, the target of that alert policy is
+// updated automatically as monitored resources are added and removed
+// from the infrastructure.
+func NewGroupClient(ctx context.Context, opts ...option.ClientOption) (*GroupClient, error) {
+ conn, err := transport.DialGRPC(ctx, append(defaultGroupClientOptions(), opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ c := &GroupClient{
+ conn: conn,
+ CallOptions: defaultGroupCallOptions(),
+
+ groupClient: monitoringpb.NewGroupServiceClient(conn),
+ }
+ c.setGoogleClientInfo()
+ return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *GroupClient) Connection() *grpc.ClientConn {
+ return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *GroupClient) Close() error {
+ return c.conn.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *GroupClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", versionGo()}, keyval...)
+ kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// ListGroups lists the existing groups.
+func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.ListGroups[0:len(c.CallOptions.ListGroups):len(c.CallOptions.ListGroups)], opts...)
+ it := &GroupIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListGroupsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Group, string, error) {
+ var resp *monitoringpb.ListGroupsResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.groupClient.ListGroups(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.Group, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ return it
+}
+
+// GetGroup gets a single group.
+func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.GetGroup[0:len(c.CallOptions.GetGroup):len(c.CallOptions.GetGroup)], opts...)
+ var resp *monitoringpb.Group
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.groupClient.GetGroup(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// CreateGroup creates a new group.
+func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.CreateGroup[0:len(c.CallOptions.CreateGroup):len(c.CallOptions.CreateGroup)], opts...)
+ var resp *monitoringpb.Group
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.groupClient.CreateGroup(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// UpdateGroup updates an existing group.
+// You can change any group attributes except name.
+func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "group.name", req.GetGroup().GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...)
+ var resp *monitoringpb.Group
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.groupClient.UpdateGroup(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// DeleteGroup deletes an existing group.
+func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.DeleteGroup[0:len(c.CallOptions.DeleteGroup):len(c.CallOptions.DeleteGroup)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.groupClient.DeleteGroup(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+// ListGroupMembers lists the monitored resources that are members of a group.
+func (c *GroupClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.ListGroupMembers[0:len(c.CallOptions.ListGroupMembers):len(c.CallOptions.ListGroupMembers)], opts...)
+ it := &MonitoredResourceIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListGroupMembersRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResource, string, error) {
+ var resp *monitoringpb.ListGroupMembersResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.groupClient.ListGroupMembers(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.Members, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ return it
+}
+
+// GroupIterator manages a stream of *monitoringpb.Group.
+type GroupIterator struct {
+ items []*monitoringpb.Group
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Group, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *GroupIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *GroupIterator) Next() (*monitoringpb.Group, error) {
+ var item *monitoringpb.Group
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *GroupIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *GroupIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// MonitoredResourceIterator manages a stream of *monitoredrespb.MonitoredResource.
+type MonitoredResourceIterator struct {
+ items []*monitoredrespb.MonitoredResource
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResource, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *MonitoredResourceIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *MonitoredResourceIterator) Next() (*monitoredrespb.MonitoredResource, error) {
+ var item *monitoredrespb.MonitoredResource
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *MonitoredResourceIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *MonitoredResourceIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go
new file mode 100644
index 00000000..167c168a
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go
@@ -0,0 +1,466 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by gapic-generator. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/transport"
+ metricpb "google.golang.org/genproto/googleapis/api/metric"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+ monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+)
+
+// MetricCallOptions contains the retry settings for each method of MetricClient.
+type MetricCallOptions struct {
+ ListMonitoredResourceDescriptors []gax.CallOption
+ GetMonitoredResourceDescriptor []gax.CallOption
+ ListMetricDescriptors []gax.CallOption
+ GetMetricDescriptor []gax.CallOption
+ CreateMetricDescriptor []gax.CallOption
+ DeleteMetricDescriptor []gax.CallOption
+ ListTimeSeries []gax.CallOption
+ CreateTimeSeries []gax.CallOption
+}
+
+func defaultMetricClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ option.WithEndpoint("monitoring.googleapis.com:443"),
+ option.WithScopes(DefaultAuthScopes()...),
+ }
+}
+
+func defaultMetricCallOptions() *MetricCallOptions {
+ retry := map[[2]string][]gax.CallOption{
+ {"default", "idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.DeadlineExceeded,
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.3,
+ })
+ }),
+ },
+ }
+ return &MetricCallOptions{
+ ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}],
+ GetMonitoredResourceDescriptor: retry[[2]string{"default", "idempotent"}],
+ ListMetricDescriptors: retry[[2]string{"default", "idempotent"}],
+ GetMetricDescriptor: retry[[2]string{"default", "idempotent"}],
+ CreateMetricDescriptor: retry[[2]string{"default", "non_idempotent"}],
+ DeleteMetricDescriptor: retry[[2]string{"default", "idempotent"}],
+ ListTimeSeries: retry[[2]string{"default", "idempotent"}],
+ CreateTimeSeries: retry[[2]string{"default", "non_idempotent"}],
+ }
+}
+
+// MetricClient is a client for interacting with Stackdriver Monitoring API.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type MetricClient struct {
+ // The connection to the service.
+ conn *grpc.ClientConn
+
+ // The gRPC API client.
+ metricClient monitoringpb.MetricServiceClient
+
+ // The call options for this service.
+ CallOptions *MetricCallOptions
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogMetadata metadata.MD
+}
+
+// NewMetricClient creates a new metric service client.
+//
+// Manages metric descriptors, monitored resource descriptors, and
+// time series data.
+func NewMetricClient(ctx context.Context, opts ...option.ClientOption) (*MetricClient, error) {
+ conn, err := transport.DialGRPC(ctx, append(defaultMetricClientOptions(), opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ c := &MetricClient{
+ conn: conn,
+ CallOptions: defaultMetricCallOptions(),
+
+ metricClient: monitoringpb.NewMetricServiceClient(conn),
+ }
+ c.setGoogleClientInfo()
+ return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *MetricClient) Connection() *grpc.ClientConn {
+ return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *MetricClient) Close() error {
+ return c.conn.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *MetricClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", versionGo()}, keyval...)
+ kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter. This method does
+// not require a Stackdriver account.
+func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.ListMonitoredResourceDescriptors[0:len(c.CallOptions.ListMonitoredResourceDescriptors):len(c.CallOptions.ListMonitoredResourceDescriptors)], opts...)
+ it := &MonitoredResourceDescriptorIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListMonitoredResourceDescriptorsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) {
+ var resp *monitoringpb.ListMonitoredResourceDescriptorsResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.metricClient.ListMonitoredResourceDescriptors(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.ResourceDescriptors, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ return it
+}
+
+// GetMonitoredResourceDescriptor gets a single monitored resource descriptor. This method does not require a
+// Stackdriver account.
+func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.GetMonitoredResourceDescriptor[0:len(c.CallOptions.GetMonitoredResourceDescriptor):len(c.CallOptions.GetMonitoredResourceDescriptor)], opts...)
+ var resp *monitoredrespb.MonitoredResourceDescriptor
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.metricClient.GetMonitoredResourceDescriptor(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// ListMetricDescriptors lists metric descriptors that match a filter. This method does not require
+// a Stackdriver account.
+func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.ListMetricDescriptors[0:len(c.CallOptions.ListMetricDescriptors):len(c.CallOptions.ListMetricDescriptors)], opts...)
+ it := &MetricDescriptorIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListMetricDescriptorsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*metricpb.MetricDescriptor, string, error) {
+ var resp *monitoringpb.ListMetricDescriptorsResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.metricClient.ListMetricDescriptors(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.MetricDescriptors, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ return it
+}
+
+// GetMetricDescriptor gets a single metric descriptor. This method does not require a Stackdriver
+// account.
+func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.GetMetricDescriptor[0:len(c.CallOptions.GetMetricDescriptor):len(c.CallOptions.GetMetricDescriptor)], opts...)
+ var resp *metricpb.MetricDescriptor
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.metricClient.GetMetricDescriptor(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// CreateMetricDescriptor creates a new metric descriptor.
+// User-created metric descriptors define
+// custom metrics (at /monitoring/custom-metrics).
+func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.CreateMetricDescriptor[0:len(c.CallOptions.CreateMetricDescriptor):len(c.CallOptions.CreateMetricDescriptor)], opts...)
+ var resp *metricpb.MetricDescriptor
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.metricClient.CreateMetricDescriptor(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// DeleteMetricDescriptor deletes a metric descriptor. Only user-created
+// custom metrics (at /monitoring/custom-metrics) can be deleted.
+func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.DeleteMetricDescriptor[0:len(c.CallOptions.DeleteMetricDescriptor):len(c.CallOptions.DeleteMetricDescriptor)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.metricClient.DeleteMetricDescriptor(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+// ListTimeSeries lists time series that match a filter. This method does not require a
+// Stackdriver account.
+func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.ListTimeSeries[0:len(c.CallOptions.ListTimeSeries):len(c.CallOptions.ListTimeSeries)], opts...)
+ it := &TimeSeriesIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListTimeSeriesRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeries, string, error) {
+ var resp *monitoringpb.ListTimeSeriesResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.metricClient.ListTimeSeries(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.TimeSeries, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ return it
+}
+
+// CreateTimeSeries creates or adds data to one or more time series.
+// The response is empty if all time series in the request were written.
+// If any time series could not be written, a corresponding failure message is
+// included in the error response.
+func (c *MetricClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.CreateTimeSeries[0:len(c.CallOptions.CreateTimeSeries):len(c.CallOptions.CreateTimeSeries)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.metricClient.CreateTimeSeries(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+// MetricDescriptorIterator manages a stream of *metricpb.MetricDescriptor.
+type MetricDescriptorIterator struct {
+ items []*metricpb.MetricDescriptor
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*metricpb.MetricDescriptor, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *MetricDescriptorIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *MetricDescriptorIterator) Next() (*metricpb.MetricDescriptor, error) {
+ var item *metricpb.MetricDescriptor
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *MetricDescriptorIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *MetricDescriptorIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor.
+type MonitoredResourceDescriptorIterator struct {
+ items []*monitoredrespb.MonitoredResourceDescriptor
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) {
+ var item *monitoredrespb.MonitoredResourceDescriptor
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *MonitoredResourceDescriptorIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// TimeSeriesIterator manages a stream of *monitoringpb.TimeSeries.
+type TimeSeriesIterator struct {
+ items []*monitoringpb.TimeSeries
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeries, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *TimeSeriesIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *TimeSeriesIterator) Next() (*monitoringpb.TimeSeries, error) {
+ var item *monitoringpb.TimeSeries
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *TimeSeriesIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *TimeSeriesIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go
new file mode 100644
index 00000000..cc7527c6
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go
@@ -0,0 +1,383 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by gapic-generator. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/transport"
+ monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+)
+
+// NotificationChannelCallOptions contains the retry settings for each method of NotificationChannelClient.
+type NotificationChannelCallOptions struct {
+ ListNotificationChannelDescriptors []gax.CallOption
+ GetNotificationChannelDescriptor []gax.CallOption
+ ListNotificationChannels []gax.CallOption
+ GetNotificationChannel []gax.CallOption
+ CreateNotificationChannel []gax.CallOption
+ UpdateNotificationChannel []gax.CallOption
+ DeleteNotificationChannel []gax.CallOption
+}
+
+func defaultNotificationChannelClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ option.WithEndpoint("monitoring.googleapis.com:443"),
+ option.WithScopes(DefaultAuthScopes()...),
+ }
+}
+
+func defaultNotificationChannelCallOptions() *NotificationChannelCallOptions {
+ retry := map[[2]string][]gax.CallOption{
+ {"default", "idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.DeadlineExceeded,
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.3,
+ })
+ }),
+ },
+ }
+ return &NotificationChannelCallOptions{
+ ListNotificationChannelDescriptors: retry[[2]string{"default", "idempotent"}],
+ GetNotificationChannelDescriptor: retry[[2]string{"default", "idempotent"}],
+ ListNotificationChannels: retry[[2]string{"default", "idempotent"}],
+ GetNotificationChannel: retry[[2]string{"default", "idempotent"}],
+ CreateNotificationChannel: retry[[2]string{"default", "non_idempotent"}],
+ UpdateNotificationChannel: retry[[2]string{"default", "non_idempotent"}],
+ DeleteNotificationChannel: retry[[2]string{"default", "idempotent"}],
+ }
+}
+
+// NotificationChannelClient is a client for interacting with Stackdriver Monitoring API.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type NotificationChannelClient struct {
+ // The connection to the service.
+ conn *grpc.ClientConn
+
+ // The gRPC API client.
+ notificationChannelClient monitoringpb.NotificationChannelServiceClient
+
+ // The call options for this service.
+ CallOptions *NotificationChannelCallOptions
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogMetadata metadata.MD
+}
+
+// NewNotificationChannelClient creates a new notification channel service client.
+//
+// The Notification Channel API provides access to configuration that
+// controls how messages related to incidents are sent.
+func NewNotificationChannelClient(ctx context.Context, opts ...option.ClientOption) (*NotificationChannelClient, error) {
+ conn, err := transport.DialGRPC(ctx, append(defaultNotificationChannelClientOptions(), opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ c := &NotificationChannelClient{
+ conn: conn,
+ CallOptions: defaultNotificationChannelCallOptions(),
+
+ notificationChannelClient: monitoringpb.NewNotificationChannelServiceClient(conn),
+ }
+ c.setGoogleClientInfo()
+ return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *NotificationChannelClient) Connection() *grpc.ClientConn {
+ return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *NotificationChannelClient) Close() error {
+ return c.conn.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *NotificationChannelClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", versionGo()}, keyval...)
+ kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// ListNotificationChannelDescriptors lists the descriptors for supported channel types. The use of descriptors
+// makes it possible for new channel types to be dynamically added.
+func (c *NotificationChannelClient) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest, opts ...gax.CallOption) *NotificationChannelDescriptorIterator {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.ListNotificationChannelDescriptors[0:len(c.CallOptions.ListNotificationChannelDescriptors):len(c.CallOptions.ListNotificationChannelDescriptors)], opts...)
+ it := &NotificationChannelDescriptorIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListNotificationChannelDescriptorsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannelDescriptor, string, error) {
+ var resp *monitoringpb.ListNotificationChannelDescriptorsResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.notificationChannelClient.ListNotificationChannelDescriptors(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.ChannelDescriptors, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ return it
+}
+
+// GetNotificationChannelDescriptor gets a single channel descriptor. The descriptor indicates which fields
+// are expected / permitted for a notification channel of the given type.
+func (c *NotificationChannelClient) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.GetNotificationChannelDescriptor[0:len(c.CallOptions.GetNotificationChannelDescriptor):len(c.CallOptions.GetNotificationChannelDescriptor)], opts...)
+ var resp *monitoringpb.NotificationChannelDescriptor
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.notificationChannelClient.GetNotificationChannelDescriptor(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// ListNotificationChannels lists the notification channels that have been created for the project.
+func (c *NotificationChannelClient) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest, opts ...gax.CallOption) *NotificationChannelIterator {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.ListNotificationChannels[0:len(c.CallOptions.ListNotificationChannels):len(c.CallOptions.ListNotificationChannels)], opts...)
+ it := &NotificationChannelIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListNotificationChannelsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannel, string, error) {
+ var resp *monitoringpb.ListNotificationChannelsResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.notificationChannelClient.ListNotificationChannels(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.NotificationChannels, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ return it
+}
+
+// GetNotificationChannel gets a single notification channel. The channel includes the relevant
+// configuration details with which the channel was created. However, the
+// response may truncate or omit passwords, API keys, or other private key
+// matter and thus the response may not be 100% identical to the information
+// that was supplied in the call to the create method.
+func (c *NotificationChannelClient) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.GetNotificationChannel[0:len(c.CallOptions.GetNotificationChannel):len(c.CallOptions.GetNotificationChannel)], opts...)
+ var resp *monitoringpb.NotificationChannel
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.notificationChannelClient.GetNotificationChannel(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// CreateNotificationChannel creates a new notification channel, representing a single notification
+// endpoint such as an email address, SMS number, or PagerDuty service.
+func (c *NotificationChannelClient) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.CreateNotificationChannel[0:len(c.CallOptions.CreateNotificationChannel):len(c.CallOptions.CreateNotificationChannel)], opts...)
+ var resp *monitoringpb.NotificationChannel
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.notificationChannelClient.CreateNotificationChannel(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// UpdateNotificationChannel updates a notification channel. Fields not specified in the field mask
+// remain unchanged.
+func (c *NotificationChannelClient) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "notification_channel.name", req.GetNotificationChannel().GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.UpdateNotificationChannel[0:len(c.CallOptions.UpdateNotificationChannel):len(c.CallOptions.UpdateNotificationChannel)], opts...)
+ var resp *monitoringpb.NotificationChannel
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.notificationChannelClient.UpdateNotificationChannel(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// DeleteNotificationChannel deletes a notification channel.
+func (c *NotificationChannelClient) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest, opts ...gax.CallOption) error {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.DeleteNotificationChannel[0:len(c.CallOptions.DeleteNotificationChannel):len(c.CallOptions.DeleteNotificationChannel)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.notificationChannelClient.DeleteNotificationChannel(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+// NotificationChannelDescriptorIterator manages a stream of *monitoringpb.NotificationChannelDescriptor.
+type NotificationChannelDescriptorIterator struct {
+ items []*monitoringpb.NotificationChannelDescriptor
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannelDescriptor, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *NotificationChannelDescriptorIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *NotificationChannelDescriptorIterator) Next() (*monitoringpb.NotificationChannelDescriptor, error) {
+ var item *monitoringpb.NotificationChannelDescriptor
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *NotificationChannelDescriptorIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *NotificationChannelDescriptorIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// NotificationChannelIterator manages a stream of *monitoringpb.NotificationChannel.
+type NotificationChannelIterator struct {
+ items []*monitoringpb.NotificationChannel
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannel, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *NotificationChannelIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *NotificationChannelIterator) Next() (*monitoringpb.NotificationChannel, error) {
+ var item *monitoringpb.NotificationChannel
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *NotificationChannelIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *NotificationChannelIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go b/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go
new file mode 100644
index 00000000..b2b514ba
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go
@@ -0,0 +1,107 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package monitoring
+
+// GroupProjectPath returns the path for the project resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s", project)
+// instead.
+func GroupProjectPath(project string) string {
+ return "" +
+ "projects/" +
+ project +
+ ""
+}
+
+// GroupGroupPath returns the path for the group resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s/groups/%s", project, group)
+// instead.
+func GroupGroupPath(project, group string) string {
+ return "" +
+ "projects/" +
+ project +
+ "/groups/" +
+ group +
+ ""
+}
+
+// MetricProjectPath returns the path for the project resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s", project)
+// instead.
+func MetricProjectPath(project string) string {
+ return "" +
+ "projects/" +
+ project +
+ ""
+}
+
+// MetricMetricDescriptorPath returns the path for the metric descriptor resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s/metricDescriptors/%s", project, metricDescriptor)
+// instead.
+func MetricMetricDescriptorPath(project, metricDescriptor string) string {
+ return "" +
+ "projects/" +
+ project +
+ "/metricDescriptors/" +
+ metricDescriptor +
+ ""
+}
+
+// MetricMonitoredResourceDescriptorPath returns the path for the monitored resource descriptor resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s/monitoredResourceDescriptors/%s", project, monitoredResourceDescriptor)
+// instead.
+func MetricMonitoredResourceDescriptorPath(project, monitoredResourceDescriptor string) string {
+ return "" +
+ "projects/" +
+ project +
+ "/monitoredResourceDescriptors/" +
+ monitoredResourceDescriptor +
+ ""
+}
+
+// UptimeCheckProjectPath returns the path for the project resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s", project)
+// instead.
+func UptimeCheckProjectPath(project string) string {
+ return "" +
+ "projects/" +
+ project +
+ ""
+}
+
+// UptimeCheckUptimeCheckConfigPath returns the path for the uptime check config resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s/uptimeCheckConfigs/%s", project, uptimeCheckConfig)
+// instead.
+func UptimeCheckUptimeCheckConfigPath(project, uptimeCheckConfig string) string {
+ return "" +
+ "projects/" +
+ project +
+ "/uptimeCheckConfigs/" +
+ uptimeCheckConfig +
+ ""
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go
new file mode 100644
index 00000000..9eed7b17
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go
@@ -0,0 +1,367 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by gapic-generator. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/transport"
+ monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+)
+
+// UptimeCheckCallOptions contains the retry settings for each method of UptimeCheckClient.
+type UptimeCheckCallOptions struct {
+ ListUptimeCheckConfigs []gax.CallOption
+ GetUptimeCheckConfig []gax.CallOption
+ CreateUptimeCheckConfig []gax.CallOption
+ UpdateUptimeCheckConfig []gax.CallOption
+ DeleteUptimeCheckConfig []gax.CallOption
+ ListUptimeCheckIps []gax.CallOption
+}
+
+func defaultUptimeCheckClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ option.WithEndpoint("monitoring.googleapis.com:443"),
+ option.WithScopes(DefaultAuthScopes()...),
+ }
+}
+
+func defaultUptimeCheckCallOptions() *UptimeCheckCallOptions {
+ retry := map[[2]string][]gax.CallOption{
+ {"default", "idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.DeadlineExceeded,
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.3,
+ })
+ }),
+ },
+ }
+ return &UptimeCheckCallOptions{
+ ListUptimeCheckConfigs: retry[[2]string{"default", "idempotent"}],
+ GetUptimeCheckConfig: retry[[2]string{"default", "idempotent"}],
+ CreateUptimeCheckConfig: retry[[2]string{"default", "non_idempotent"}],
+ UpdateUptimeCheckConfig: retry[[2]string{"default", "non_idempotent"}],
+ DeleteUptimeCheckConfig: retry[[2]string{"default", "idempotent"}],
+ ListUptimeCheckIps: retry[[2]string{"default", "idempotent"}],
+ }
+}
+
+// UptimeCheckClient is a client for interacting with Stackdriver Monitoring API.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type UptimeCheckClient struct {
+ // The connection to the service.
+ conn *grpc.ClientConn
+
+ // The gRPC API client.
+ uptimeCheckClient monitoringpb.UptimeCheckServiceClient
+
+ // The call options for this service.
+ CallOptions *UptimeCheckCallOptions
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogMetadata metadata.MD
+}
+
+// NewUptimeCheckClient creates a new uptime check service client.
+//
+// The UptimeCheckService API is used to manage (list, create, delete, edit)
+// uptime check configurations in the Stackdriver Monitoring product. An uptime
+// check is a piece of configuration that determines which resources and
+// services to monitor for availability. These configurations can also be
+// configured interactively by navigating to the [Cloud Console]
+// (http://console.cloud.google.com), selecting the appropriate project,
+// clicking on "Monitoring" on the left-hand side to navigate to Stackdriver,
+// and then clicking on "Uptime".
+func NewUptimeCheckClient(ctx context.Context, opts ...option.ClientOption) (*UptimeCheckClient, error) {
+ conn, err := transport.DialGRPC(ctx, append(defaultUptimeCheckClientOptions(), opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ c := &UptimeCheckClient{
+ conn: conn,
+ CallOptions: defaultUptimeCheckCallOptions(),
+
+ uptimeCheckClient: monitoringpb.NewUptimeCheckServiceClient(conn),
+ }
+ c.setGoogleClientInfo()
+ return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *UptimeCheckClient) Connection() *grpc.ClientConn {
+ return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *UptimeCheckClient) Close() error {
+ return c.conn.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *UptimeCheckClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", versionGo()}, keyval...)
+ kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// ListUptimeCheckConfigs lists the existing valid uptime check configurations for the project,
+// leaving out any invalid configurations.
+func (c *UptimeCheckClient) ListUptimeCheckConfigs(ctx context.Context, req *monitoringpb.ListUptimeCheckConfigsRequest, opts ...gax.CallOption) *UptimeCheckConfigIterator {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.ListUptimeCheckConfigs[0:len(c.CallOptions.ListUptimeCheckConfigs):len(c.CallOptions.ListUptimeCheckConfigs)], opts...)
+ it := &UptimeCheckConfigIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListUptimeCheckConfigsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckConfig, string, error) {
+ var resp *monitoringpb.ListUptimeCheckConfigsResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.uptimeCheckClient.ListUptimeCheckConfigs(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.UptimeCheckConfigs, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ return it
+}
+
+// GetUptimeCheckConfig gets a single uptime check configuration.
+func (c *UptimeCheckClient) GetUptimeCheckConfig(ctx context.Context, req *monitoringpb.GetUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.GetUptimeCheckConfig[0:len(c.CallOptions.GetUptimeCheckConfig):len(c.CallOptions.GetUptimeCheckConfig)], opts...)
+ var resp *monitoringpb.UptimeCheckConfig
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.uptimeCheckClient.GetUptimeCheckConfig(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// CreateUptimeCheckConfig creates a new uptime check configuration.
+func (c *UptimeCheckClient) CreateUptimeCheckConfig(ctx context.Context, req *monitoringpb.CreateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.CreateUptimeCheckConfig[0:len(c.CallOptions.CreateUptimeCheckConfig):len(c.CallOptions.CreateUptimeCheckConfig)], opts...)
+ var resp *monitoringpb.UptimeCheckConfig
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.uptimeCheckClient.CreateUptimeCheckConfig(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// UpdateUptimeCheckConfig updates an uptime check configuration. You can either replace the entire
+// configuration with a new one or replace only certain fields in the current
+// configuration by specifying the fields to be updated via "updateMask".
+// Returns the updated configuration.
+func (c *UptimeCheckClient) UpdateUptimeCheckConfig(ctx context.Context, req *monitoringpb.UpdateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "uptime_check_config.name", req.GetUptimeCheckConfig().GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.UpdateUptimeCheckConfig[0:len(c.CallOptions.UpdateUptimeCheckConfig):len(c.CallOptions.UpdateUptimeCheckConfig)], opts...)
+ var resp *monitoringpb.UptimeCheckConfig
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.uptimeCheckClient.UpdateUptimeCheckConfig(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// DeleteUptimeCheckConfig deletes an uptime check configuration. Note that this method will fail
+// if the uptime check configuration is referenced by an alert policy or
+// other dependent configs that would be rendered invalid by the deletion.
+func (c *UptimeCheckClient) DeleteUptimeCheckConfig(ctx context.Context, req *monitoringpb.DeleteUptimeCheckConfigRequest, opts ...gax.CallOption) error {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.DeleteUptimeCheckConfig[0:len(c.CallOptions.DeleteUptimeCheckConfig):len(c.CallOptions.DeleteUptimeCheckConfig)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.uptimeCheckClient.DeleteUptimeCheckConfig(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+// ListUptimeCheckIps returns the list of IPs that checkers run from
+func (c *UptimeCheckClient) ListUptimeCheckIps(ctx context.Context, req *monitoringpb.ListUptimeCheckIpsRequest, opts ...gax.CallOption) *UptimeCheckIpIterator {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.ListUptimeCheckIps[0:len(c.CallOptions.ListUptimeCheckIps):len(c.CallOptions.ListUptimeCheckIps)], opts...)
+ it := &UptimeCheckIpIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListUptimeCheckIpsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckIp, string, error) {
+ var resp *monitoringpb.ListUptimeCheckIpsResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.uptimeCheckClient.ListUptimeCheckIps(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.UptimeCheckIps, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ return it
+}
+
+// UptimeCheckConfigIterator manages a stream of *monitoringpb.UptimeCheckConfig.
+type UptimeCheckConfigIterator struct {
+ items []*monitoringpb.UptimeCheckConfig
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckConfig, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *UptimeCheckConfigIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *UptimeCheckConfigIterator) Next() (*monitoringpb.UptimeCheckConfig, error) {
+ var item *monitoringpb.UptimeCheckConfig
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *UptimeCheckConfigIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *UptimeCheckConfigIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// UptimeCheckIpIterator manages a stream of *monitoringpb.UptimeCheckIp.
+type UptimeCheckIpIterator struct {
+ items []*monitoringpb.UptimeCheckIp
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckIp, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *UptimeCheckIpIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *UptimeCheckIpIterator) Next() (*monitoringpb.UptimeCheckIp, error) {
+ var item *monitoringpb.UptimeCheckIp
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *UptimeCheckIpIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *UptimeCheckIpIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/trace/apiv2/doc.go b/vendor/cloud.google.com/go/trace/apiv2/doc.go
new file mode 100644
index 00000000..09753f4f
--- /dev/null
+++ b/vendor/cloud.google.com/go/trace/apiv2/doc.go
@@ -0,0 +1,105 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by gapic-generator. DO NOT EDIT.
+
+// Package trace is an auto-generated package for the
+// Stackdriver Trace API.
+//
+// NOTE: This package is in alpha. It is not stable, and is likely to change.
+//
+// Sends application trace data to Stackdriver Trace for viewing. Trace data
+// is
+// collected for all App Engine applications by default. Trace data from
+// other
+// applications can be provided using this API.
+//
+// Use of Context
+//
+// The ctx passed to NewClient is used for authentication requests and
+// for creating the underlying connection, but is not used for subsequent calls.
+// Individual methods on the client use the ctx given to them.
+//
+// To close the open connection, use the Close() method.
+//
+// For information about setting deadlines, reusing contexts, and more
+// please visit godoc.org/cloud.google.com/go.
+package trace // import "cloud.google.com/go/trace/apiv2"
+
+import (
+ "context"
+ "runtime"
+ "strings"
+ "unicode"
+
+ "google.golang.org/grpc/metadata"
+)
+
+func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
+ out, _ := metadata.FromOutgoingContext(ctx)
+ out = out.Copy()
+ for _, md := range mds {
+ for k, v := range md {
+ out[k] = append(out[k], v...)
+ }
+ }
+ return metadata.NewOutgoingContext(ctx, out)
+}
+
+// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
+func DefaultAuthScopes() []string {
+ return []string{
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/trace.append",
+ }
+}
+
+// versionGo returns the Go runtime version. The returned string
+// has no whitespace, suitable for reporting in header.
+func versionGo() string {
+ const develPrefix = "devel +"
+
+ s := runtime.Version()
+ if strings.HasPrefix(s, develPrefix) {
+ s = s[len(develPrefix):]
+ if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
+ s = s[:p]
+ }
+ return s
+ }
+
+ notSemverRune := func(r rune) bool {
+ return strings.IndexRune("0123456789.", r) < 0
+ }
+
+ if strings.HasPrefix(s, "go1") {
+ s = s[2:]
+ var prerelease string
+ if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
+ s, prerelease = s[:p], s[p:]
+ }
+ if strings.HasSuffix(s, ".") {
+ s += "0"
+ } else if strings.Count(s, ".") < 2 {
+ s += ".0"
+ }
+ if prerelease != "" {
+ s += "-" + prerelease
+ }
+ return s
+ }
+ return "UNKNOWN"
+}
+
+const versionClient = "20190404"
diff --git a/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go b/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go
new file mode 100644
index 00000000..80b8d40b
--- /dev/null
+++ b/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go
@@ -0,0 +1,43 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+// ProjectPath returns the path for the project resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s", project)
+// instead.
+func ProjectPath(project string) string {
+ return "" +
+ "projects/" +
+ project +
+ ""
+}
+
+// SpanPath returns the path for the span resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s/traces/%s/spans/%s", project, trace, span)
+// instead.
+func SpanPath(project, trace, span string) string {
+ return "" +
+ "projects/" +
+ project +
+ "/traces/" +
+ trace +
+ "/spans/" +
+ span +
+ ""
+}
diff --git a/vendor/cloud.google.com/go/trace/apiv2/trace_client.go b/vendor/cloud.google.com/go/trace/apiv2/trace_client.go
new file mode 100644
index 00000000..a9948d24
--- /dev/null
+++ b/vendor/cloud.google.com/go/trace/apiv2/trace_client.go
@@ -0,0 +1,155 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by gapic-generator. DO NOT EDIT.
+
+package trace
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/option"
+ "google.golang.org/api/transport"
+ cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+)
+
+// CallOptions contains the retry settings for each method of Client.
+type CallOptions struct {
+ BatchWriteSpans []gax.CallOption
+ CreateSpan []gax.CallOption
+}
+
+func defaultClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ option.WithEndpoint("cloudtrace.googleapis.com:443"),
+ option.WithScopes(DefaultAuthScopes()...),
+ }
+}
+
+func defaultCallOptions() *CallOptions {
+ retry := map[[2]string][]gax.CallOption{
+ {"default", "idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.DeadlineExceeded,
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 1000 * time.Millisecond,
+ Multiplier: 1.2,
+ })
+ }),
+ },
+ }
+ return &CallOptions{
+ BatchWriteSpans: retry[[2]string{"default", "non_idempotent"}],
+ CreateSpan: retry[[2]string{"default", "idempotent"}],
+ }
+}
+
+// Client is a client for interacting with Stackdriver Trace API.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type Client struct {
+ // The connection to the service.
+ conn *grpc.ClientConn
+
+ // The gRPC API client.
+ client cloudtracepb.TraceServiceClient
+
+ // The call options for this service.
+ CallOptions *CallOptions
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogMetadata metadata.MD
+}
+
+// NewClient creates a new trace service client.
+//
+// This file describes an API for collecting and viewing traces and spans
+// within a trace. A Trace is a collection of spans corresponding to a single
+// operation or set of operations for an application. A span is an individual
+// timed event which forms a node of the trace tree. A single trace may
+// contain span(s) from multiple services.
+func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
+ conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ c := &Client{
+ conn: conn,
+ CallOptions: defaultCallOptions(),
+
+ client: cloudtracepb.NewTraceServiceClient(conn),
+ }
+ c.setGoogleClientInfo()
+ return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *Client) Connection() *grpc.ClientConn {
+ return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *Client) Close() error {
+ return c.conn.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *Client) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", versionGo()}, keyval...)
+ kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// BatchWriteSpans sends new spans to new or existing traces. You cannot update
+// existing spans.
+func (c *Client) BatchWriteSpans(ctx context.Context, req *cloudtracepb.BatchWriteSpansRequest, opts ...gax.CallOption) error {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.BatchWriteSpans[0:len(c.CallOptions.BatchWriteSpans):len(c.CallOptions.BatchWriteSpans)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.client.BatchWriteSpans(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+// CreateSpan creates a new span.
+func (c *Client) CreateSpan(ctx context.Context, req *cloudtracepb.Span, opts ...gax.CallOption) (*cloudtracepb.Span, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName()))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.CreateSpan[0:len(c.CallOptions.CreateSpan):len(c.CallOptions.CreateSpan)], opts...)
+ var resp *cloudtracepb.Span
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.client.CreateSpan(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS b/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS
new file mode 100644
index 00000000..e491a9e7
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS
@@ -0,0 +1 @@
+Google Inc.
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/CONTRIBUTING.md b/vendor/contrib.go.opencensus.io/exporter/stackdriver/CONTRIBUTING.md
new file mode 100644
index 00000000..0786fdf4
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/CONTRIBUTING.md
@@ -0,0 +1,24 @@
+# How to contribute
+
+We'd love to accept your patches and contributions to this project. There are
+just a few small guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution,
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Code reviews
+
+All submissions, including submissions by project members, require review. We
+use GitHub pull requests for this purpose. Consult [GitHub Help] for more
+information on using pull requests.
+
+[GitHub Help]: https://help.github.com/articles/about-pull-requests/
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/Gopkg.lock b/vendor/contrib.go.opencensus.io/exporter/stackdriver/Gopkg.lock
new file mode 100644
index 00000000..8bf6c6ce
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/Gopkg.lock
@@ -0,0 +1,81 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ name = "cloud.google.com/go"
+ packages = ["compute/metadata","internal/version","monitoring/apiv3","trace/apiv2"]
+ revision = "0fd7230b2a7505833d5f69b75cbd6c9582401479"
+ version = "v0.23.0"
+
+[[projects]]
+ name = "github.com/golang/protobuf"
+ packages = ["proto","protoc-gen-go/descriptor","ptypes","ptypes/any","ptypes/duration","ptypes/empty","ptypes/struct","ptypes/timestamp","ptypes/wrappers"]
+ revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
+ version = "v1.1.0"
+
+[[projects]]
+ name = "github.com/googleapis/gax-go"
+ packages = ["."]
+ revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f"
+ version = "v2.0.0"
+
+[[projects]]
+ name = "go.opencensus.io"
+ packages = [".","exporter/stackdriver/propagation","exporterutil","internal","internal/tagencoding","plugin/ocgrpc","plugin/ochttp","plugin/ochttp/propagation/b3","stats","stats/internal","stats/view","tag","trace","trace/internal","trace/propagation"]
+ revision = "5897c5ce32247fc8af19c7710abd96e3304fb43c"
+ version = "v0.12.0"
+
+[[projects]]
+ branch = "master"
+ name = "golang.org/x/net"
+ packages = ["context","context/ctxhttp","http/httpguts","http2","http2/hpack","idna","internal/timeseries","trace"]
+ revision = "1e491301e022f8f977054da4c2d852decd59571f"
+
+[[projects]]
+ branch = "master"
+ name = "golang.org/x/oauth2"
+ packages = [".","google","internal","jws","jwt"]
+ revision = "1e0a3fa8ba9a5c9eb35c271780101fdaf1b205d7"
+
+[[projects]]
+ branch = "master"
+ name = "golang.org/x/sync"
+ packages = ["semaphore"]
+ revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
+
+[[projects]]
+ name = "golang.org/x/text"
+ packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
+ revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
+ version = "v0.3.0"
+
+[[projects]]
+ branch = "master"
+ name = "google.golang.org/api"
+ packages = ["googleapi/transport","internal","iterator","option","support/bundler","transport","transport/grpc","transport/http"]
+ revision = "8e296ef260056b6323d10727db40512dac6d92d5"
+
+[[projects]]
+ name = "google.golang.org/appengine"
+ packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/socket","internal/urlfetch","socket","urlfetch"]
+ revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
+ version = "v1.0.0"
+
+[[projects]]
+ branch = "master"
+ name = "google.golang.org/genproto"
+ packages = ["googleapis/api/annotations","googleapis/api/distribution","googleapis/api/label","googleapis/api/metric","googleapis/api/monitoredres","googleapis/devtools/cloudtrace/v2","googleapis/monitoring/v3","googleapis/rpc/code","googleapis/rpc/status","protobuf/field_mask"]
+ revision = "81158efcc9f219c511e4d3c0d61a0e6e49c01a24"
+
+[[projects]]
+ name = "google.golang.org/grpc"
+ packages = [".","balancer","balancer/base","balancer/roundrobin","channelz","codes","connectivity","credentials","credentials/oauth","encoding","encoding/proto","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"]
+ revision = "41344da2231b913fa3d983840a57a6b1b7b631a1"
+ version = "v1.12.0"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ inputs-digest = "d587e278f7302f82cb7f5c14e5e7ce831c84f198c05ede6c16a8afa4d6112f9e"
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/Gopkg.toml b/vendor/contrib.go.opencensus.io/exporter/stackdriver/Gopkg.toml
new file mode 100644
index 00000000..9b6c523d
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/Gopkg.toml
@@ -0,0 +1,62 @@
+# Gopkg.toml example
+#
+# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+# name = "github.com/user/project"
+# version = "1.0.0"
+#
+# [[constraint]]
+# name = "github.com/user/project2"
+# branch = "dev"
+# source = "github.com/myfork/project2"
+#
+# [[override]]
+# name = "github.com/x/y"
+# version = "2.4.0"
+#
+# [prune]
+# non-go = false
+# go-tests = true
+# unused-packages = true
+
+
+[[constraint]]
+ name = "cloud.google.com/go"
+ version = ">=0.23.0"
+
+[[constraint]]
+ name = "github.com/golang/protobuf"
+ version = "1.1.0"
+
+[[constraint]]
+ name = "go.opencensus.io"
+ version = ">=0.12.0"
+
+[[constraint]]
+ branch = "master"
+ name = "golang.org/x/net"
+
+[[constraint]]
+ branch = "master"
+ name = "golang.org/x/oauth2"
+
+[[constraint]]
+ branch = "master"
+ name = "google.golang.org/api"
+
+[[constraint]]
+ branch = "master"
+ name = "google.golang.org/genproto"
+
+[[constraint]]
+ name = "google.golang.org/grpc"
+ version = "1.12.0"
+
+[prune]
+ go-tests = true
+ unused-packages = true
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/LICENSE b/vendor/contrib.go.opencensus.io/exporter/stackdriver/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/README.md b/vendor/contrib.go.opencensus.io/exporter/stackdriver/README.md
new file mode 100644
index 00000000..24a6e11f
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/README.md
@@ -0,0 +1,14 @@
+# OpenCensus Go Stackdriver
+
+[![Build Status](https://travis-ci.org/census-ecosystem/opencensus-go-exporter-stackdriver.svg?branch=master)](https://travis-ci.org/census-ecosystem/opencensus-go-exporter-stackdriver) [![GoDoc][godoc-image]][godoc-url]
+
+Provides OpenCensus exporter support for Stackdriver Monitoring and Stackdriver Trace.
+
+## Installation
+
+```
+$ go get -u contrib.go.opencensus.io/exporter/stackdriver
+```
+
+[godoc-image]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver?status.svg
+[godoc-url]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/go.mod b/vendor/contrib.go.opencensus.io/exporter/stackdriver/go.mod
new file mode 100644
index 00000000..64ac3c7b
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/go.mod
@@ -0,0 +1,17 @@
+module contrib.go.opencensus.io/exporter/stackdriver
+
+require (
+ cloud.google.com/go v0.23.0
+ github.com/aws/aws-sdk-go v1.15.31
+ github.com/golang/protobuf v1.1.0
+ github.com/googleapis/gax-go v2.0.0+incompatible
+ go.opencensus.io v0.15.0
+ golang.org/x/net v0.0.0-20180530234432-1e491301e022
+ golang.org/x/oauth2 v0.0.0-20180603041954-1e0a3fa8ba9a
+ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f
+ golang.org/x/text v0.3.0
+ google.golang.org/api v0.0.0-20180603000442-8e296ef26005
+ google.golang.org/appengine v1.0.0
+ google.golang.org/genproto v0.0.0-20180601223552-81158efcc9f2
+ google.golang.org/grpc v1.12.0
+)
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/go.sum b/vendor/contrib.go.opencensus.io/exporter/stackdriver/go.sum
new file mode 100644
index 00000000..6f164a93
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/go.sum
@@ -0,0 +1,31 @@
+cloud.google.com/go v0.23.0 h1:w1svupRqvZnfjN9+KksMiggoIRQuMzWkVzpxcR96xDs=
+cloud.google.com/go v0.23.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/aws/aws-sdk-go v1.15.31 h1:ExgD8W8QDeD8Y4CPVlcP/laumxvikDbkVWB+VCHgXxA=
+github.com/aws/aws-sdk-go v1.15.31/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
+github.com/go-ini/ini v1.25.4 h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo=
+github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/golang/protobuf v1.1.0 h1:0iH4Ffd/meGoXqF2lSAhZHt8X+cPgkfn/cb6Cce5Vpc=
+github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=
+github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
+github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE=
+github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+go.opencensus.io v0.12.0 h1:b4qzedsGI32kquU8ld+R0Wo3Rkk5BYtTk0NJCWlrS5I=
+go.opencensus.io v0.12.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0=
+go.opencensus.io v0.15.0 h1:r1SzcjSm4ybA0qZs3B4QYX072f8gK61Kh0qtwyFpfdk=
+go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0=
+golang.org/x/net v0.0.0-20180530234432-1e491301e022 h1:MVYFTUmVD3/+ERcvRRI+P/C2+WOUimXh+Pd8LVsklZ4=
+golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/oauth2 v0.0.0-20180603041954-1e0a3fa8ba9a h1:1Fy38jwe/QZhQfFQBy6dMj9F/WU1C+jo3/zLNr/WhW4=
+golang.org/x/oauth2 v0.0.0-20180603041954-1e0a3fa8ba9a/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+google.golang.org/api v0.0.0-20180603000442-8e296ef26005 h1:m1a5vhNDkQFbbYz9mbJE7dQ3LbA7O/h3J+1o+0Vf1aQ=
+google.golang.org/api v0.0.0-20180603000442-8e296ef26005/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/genproto v0.0.0-20180601223552-81158efcc9f2 h1:sMABGloYWbbW983h5ILd81SriMtAKs09jn9CmSjNW3c=
+google.golang.org/genproto v0.0.0-20180601223552-81158efcc9f2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/grpc v1.12.0 h1:Mm8atZtkT+P6R43n/dqNDWkPPu5BwRVu/1rJnJCeZH8=
+google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go
new file mode 100644
index 00000000..88835cc0
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go
@@ -0,0 +1,33 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stackdriver
+
+// Labels represents a set of Stackdriver Monitoring labels.
+type Labels struct {
+ m map[string]labelValue
+}
+
+type labelValue struct {
+ val, desc string
+}
+
+// Set stores a label with the given key, value and description,
+// overwriting any previous values with the given key.
+func (labels *Labels) Set(key, value, description string) {
+ if labels.m == nil {
+ labels.m = make(map[string]labelValue)
+ }
+ labels.m[key] = labelValue{value, description}
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws_identity_doc_utils.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws_identity_doc_utils.go
new file mode 100644
index 00000000..d6a23a8c
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws_identity_doc_utils.go
@@ -0,0 +1,53 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package monitoredresource
+
+import (
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/session"
+)
+
+// awsIdentityDocument is used to store parsed AWS Identity Document.
+type awsIdentityDocument struct {
+ // accountID is the AWS account number for the VM.
+ accountID string
+
+ // instanceID is the instance id of the instance.
+ instanceID string
+
+ // Region is the AWS region for the VM.
+ region string
+}
+
+// retrieveAWSIdentityDocument attempts to retrieve AWS Identity Document.
+// If the environment is AWS EC2 Instance then a valid document is retrieved.
+// Relevant attributes from the document are stored in awsIdentityDoc.
+// This is only done once.
+func retrieveAWSIdentityDocument() *awsIdentityDocument {
+ awsIdentityDoc := awsIdentityDocument{}
+ c := ec2metadata.New(session.New())
+ if c.Available() == false {
+ return nil
+ }
+ ec2InstanceIdentifyDocument, err := c.GetInstanceIdentityDocument()
+ if err != nil {
+ return nil
+ }
+ awsIdentityDoc.region = ec2InstanceIdentifyDocument.Region
+ awsIdentityDoc.instanceID = ec2InstanceIdentifyDocument.InstanceID
+ awsIdentityDoc.accountID = ec2InstanceIdentifyDocument.AccountID
+
+ return &awsIdentityDoc
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go
new file mode 100644
index 00000000..ceb754e5
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go
@@ -0,0 +1,90 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package monitoredresource
+
+import (
+ "log"
+ "os"
+ "strings"
+
+ "cloud.google.com/go/compute/metadata"
+)
+
+// gcpMetadata represents metadata retrieved from GCP (GKE and GCE) environment.
+type gcpMetadata struct {
+
+ // projectID is the identifier of the GCP project associated with this resource, such as "my-project".
+ projectID string
+
+ // instanceID is the numeric VM instance identifier assigned by Compute Engine.
+ instanceID string
+
+ // clusterName is the name for the cluster the container is running in.
+ clusterName string
+
+ // containerName is the name of the container.
+ containerName string
+
+ // namespaceID is the identifier for the cluster namespace the container is running in
+ namespaceID string
+
+ // podID is the identifier for the pod the container is running in.
+ podID string
+
+ // zone is the Compute Engine zone in which the VM is running.
+ zone string
+}
+
+// retrieveGCPMetadata retrieves value of each Attribute from Metadata Server
+// in GKE container and GCE instance environment.
+// Some attributes are retrieved from the system environment.
+// This is only executed detectOnce.
+func retrieveGCPMetadata() *gcpMetadata {
+ gcpMetadata := gcpMetadata{}
+ var err error
+ gcpMetadata.instanceID, err = metadata.InstanceID()
+ if err != nil {
+ // Not a GCP environment
+ return &gcpMetadata
+ }
+
+ gcpMetadata.projectID, err = metadata.ProjectID()
+ logError(err)
+
+ gcpMetadata.zone, err = metadata.Zone()
+ logError(err)
+
+ clusterName, err := metadata.InstanceAttributeValue("cluster-name")
+ logError(err)
+ gcpMetadata.clusterName = strings.TrimSpace(clusterName)
+
+ // Following attributes are derived from environment variables. They are configured
+ // via yaml file. For details refer to:
+ // https://cloud.google.com/kubernetes-engine/docs/tutorials/custom-metrics-autoscaling#exporting_metrics_from_the_application
+ gcpMetadata.namespaceID = os.Getenv("NAMESPACE")
+ gcpMetadata.containerName = os.Getenv("CONTAINER_NAME")
+ gcpMetadata.podID = os.Getenv("HOSTNAME")
+
+ return &gcpMetadata
+}
+
+// logError logs error only if the error is present and it is not 'not defined'
+func logError(err error) {
+ if err != nil {
+ if !strings.Contains(err.Error(), "not defined") {
+ log.Printf("Error retrieving gcp metadata: %v", err)
+ }
+ }
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go
new file mode 100644
index 00000000..c07e55ce
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go
@@ -0,0 +1,217 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package monitoredresource
+
+import (
+ "fmt"
+ "os"
+ "sync"
+)
+
+// Interface is a type that represent monitor resource that satisfies monitoredresource.Interface
+type Interface interface {
+
+ // MonitoredResource returns the resource type and resource labels.
+ MonitoredResource() (resType string, labels map[string]string)
+}
+
+// GKEContainer represents gke_container type monitored resource.
+// For definition refer to
+// https://cloud.google.com/monitoring/api/resources#tag_gke_container
+type GKEContainer struct {
+
+ // ProjectID is the identifier of the GCP project associated with this resource, such as "my-project".
+ ProjectID string
+
+ // InstanceID is the numeric VM instance identifier assigned by Compute Engine.
+ InstanceID string
+
+ // ClusterName is the name for the cluster the container is running in.
+ ClusterName string
+
+ // ContainerName is the name of the container.
+ ContainerName string
+
+ // NamespaceID is the identifier for the cluster namespace the container is running in
+ NamespaceID string
+
+ // PodID is the identifier for the pod the container is running in.
+ PodID string
+
+ // Zone is the Compute Engine zone in which the VM is running.
+ Zone string
+}
+
+// MonitoredResource returns resource type and resource labels for GKEContainer
+func (gke *GKEContainer) MonitoredResource() (resType string, labels map[string]string) {
+ labels = map[string]string{
+ "project_id": gke.ProjectID,
+ "instance_id": gke.InstanceID,
+ "zone": gke.Zone,
+ "cluster_name": gke.ClusterName,
+ "container_name": gke.ContainerName,
+ "namespace_id": gke.NamespaceID,
+ "pod_id": gke.PodID,
+ }
+ return "gke_container", labels
+}
+
+// GCEInstance represents gce_instance type monitored resource.
+// For definition refer to
+// https://cloud.google.com/monitoring/api/resources#tag_gce_instance
+type GCEInstance struct {
+
+ // ProjectID is the identifier of the GCP project associated with this resource, such as "my-project".
+ ProjectID string
+
+ // InstanceID is the numeric VM instance identifier assigned by Compute Engine.
+ InstanceID string
+
+ // Zone is the Compute Engine zone in which the VM is running.
+ Zone string
+}
+
+// MonitoredResource returns resource type and resource labels for GCEInstance
+func (gce *GCEInstance) MonitoredResource() (resType string, labels map[string]string) {
+ labels = map[string]string{
+ "project_id": gce.ProjectID,
+ "instance_id": gce.InstanceID,
+ "zone": gce.Zone,
+ }
+ return "gce_instance", labels
+}
+
+// AWSEC2Instance represents aws_ec2_instance type monitored resource.
+// For definition refer to
+// https://cloud.google.com/monitoring/api/resources#tag_aws_ec2_instance
+type AWSEC2Instance struct {
+
+ // AWSAccount is the AWS account number for the VM.
+ AWSAccount string
+
+ // InstanceID is the instance id of the instance.
+ InstanceID string
+
+ // Region is the AWS region for the VM. The format of this field is "aws:{region}",
+ // where supported values for {region} are listed at
+ // http://docs.aws.amazon.com/general/latest/gr/rande.html.
+ Region string
+}
+
+// MonitoredResource returns resource type and resource labels for AWSEC2Instance
+func (aws *AWSEC2Instance) MonitoredResource() (resType string, labels map[string]string) {
+ labels = map[string]string{
+ "aws_account": aws.AWSAccount,
+ "instance_id": aws.InstanceID,
+ "region": aws.Region,
+ }
+ return "aws_ec2_instance", labels
+}
+
+// Autodetect auto detects monitored resources based on
+// the environment where the application is running.
+// It supports detection of following resource types
+// 1. gke_container:
+// 2. gce_instance:
+// 3. aws_ec2_instance:
+//
+// Returns MonitoredResInterface which implements getLabels() and getType()
+// For resource definition go to https://cloud.google.com/monitoring/api/resources
+func Autodetect() Interface {
+ return func() Interface {
+ var autoDetected Interface
+ var awsIdentityDoc *awsIdentityDocument
+ var gcpMetadata *gcpMetadata
+ detectOnce.Do(func() {
+
+ // First attempts to retrieve AWS Identity Doc and GCP metadata.
+ // It then determines the resource type
+ // In GCP and AWS environment both func finishes quickly. However,
+ // in an environment other than those (e.g local laptop) it
+ // takes 2 seconds for GCP and 5-6 for AWS.
+ var wg sync.WaitGroup
+ wg.Add(2)
+
+ go func() {
+ defer wg.Done()
+ awsIdentityDoc = retrieveAWSIdentityDocument()
+ }()
+ go func() {
+ defer wg.Done()
+ gcpMetadata = retrieveGCPMetadata()
+ }()
+
+ wg.Wait()
+ autoDetected = detectResourceType(awsIdentityDoc, gcpMetadata)
+ })
+ return autoDetected
+ }()
+
+}
+
+// createAWSEC2InstanceMonitoredResource creates a aws_ec2_instance monitored resource
+// awsIdentityDoc contains AWS EC2 specific attributes.
+func createAWSEC2InstanceMonitoredResource(awsIdentityDoc *awsIdentityDocument) *AWSEC2Instance {
+ awsInstance := AWSEC2Instance{
+ AWSAccount: awsIdentityDoc.accountID,
+ InstanceID: awsIdentityDoc.instanceID,
+ Region: fmt.Sprintf("aws:%s", awsIdentityDoc.region),
+ }
+ return &awsInstance
+}
+
+// createGCEInstanceMonitoredResource creates a gce_instance monitored resource
+// gcpMetadata contains GCP (GKE or GCE) specific attributes.
+func createGCEInstanceMonitoredResource(gcpMetadata *gcpMetadata) *GCEInstance {
+ gceInstance := GCEInstance{
+ ProjectID: gcpMetadata.projectID,
+ InstanceID: gcpMetadata.instanceID,
+ Zone: gcpMetadata.zone,
+ }
+ return &gceInstance
+}
+
+// createGKEContainerMonitoredResource creates a gke_container monitored resource
+// gcpMetadata contains GCP (GKE or GCE) specific attributes.
+func createGKEContainerMonitoredResource(gcpMetadata *gcpMetadata) *GKEContainer {
+ gkeContainer := GKEContainer{
+ ProjectID: gcpMetadata.projectID,
+ InstanceID: gcpMetadata.instanceID,
+ Zone: gcpMetadata.zone,
+ ContainerName: gcpMetadata.containerName,
+ ClusterName: gcpMetadata.clusterName,
+ NamespaceID: gcpMetadata.namespaceID,
+ PodID: gcpMetadata.podID,
+ }
+ return &gkeContainer
+}
+
+// detectOnce is used to make sure GCP and AWS metadata detect function executes only once.
+var detectOnce sync.Once
+
+// detectResourceType determines the resource type.
+// awsIdentityDoc contains AWS EC2 attributes. nil if it is not AWS EC2 environment
+// gcpMetadata contains GCP (GKE or GCE) specific attributes.
+func detectResourceType(awsIdentityDoc *awsIdentityDocument, gcpMetadata *gcpMetadata) Interface {
+ if os.Getenv("KUBERNETES_SERVICE_HOST") != "" &&
+ gcpMetadata != nil && gcpMetadata.instanceID != "" {
+ return createGKEContainerMonitoredResource(gcpMetadata)
+ } else if gcpMetadata != nil && gcpMetadata.instanceID != "" {
+ return createGCEInstanceMonitoredResource(gcpMetadata)
+ } else if awsIdentityDoc != nil {
+ return createAWSEC2InstanceMonitoredResource(awsIdentityDoc)
+ }
+ return nil
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go
new file mode 100644
index 00000000..184bb1d4
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go
@@ -0,0 +1,50 @@
+// Copyright 2017, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stackdriver
+
+import (
+ "strings"
+ "unicode"
+)
+
+const labelKeySizeLimit = 100
+
+// sanitize returns a string that is trunacated to 100 characters if it's too
+// long, and replaces non-alphanumeric characters to underscores.
+func sanitize(s string) string {
+ if len(s) == 0 {
+ return s
+ }
+ if len(s) > labelKeySizeLimit {
+ s = s[:labelKeySizeLimit]
+ }
+ s = strings.Map(sanitizeRune, s)
+ if unicode.IsDigit(rune(s[0])) {
+ s = "key_" + s
+ }
+ if s[0] == '_' {
+ s = "key" + s
+ }
+ return s
+}
+
+// converts anything that is not a letter or digit to an underscore
+func sanitizeRune(r rune) rune {
+ if unicode.IsLetter(r) || unicode.IsDigit(r) {
+ return r
+ }
+ // Everything else turns into an underscore
+ return '_'
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go
new file mode 100644
index 00000000..8b07faf1
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go
@@ -0,0 +1,337 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package stackdriver contains the OpenCensus exporters for
+// Stackdriver Monitoring and Stackdriver Tracing.
+//
+// This exporter can be used to send metrics to Stackdriver Monitoring and traces
+// to Stackdriver trace.
+//
+// The package uses Application Default Credentials to authenticate by default.
+// See: https://developers.google.com/identity/protocols/application-default-credentials
+//
+// Alternatively, pass the authentication options in both the MonitoringClientOptions
+// and the TraceClientOptions fields of Options.
+//
+// Stackdriver Monitoring
+//
+// This exporter support exporting OpenCensus views to Stackdriver Monitoring.
+// Each registered view becomes a metric in Stackdriver Monitoring, with the
+// tags becoming labels.
+//
+// The aggregation function determines the metric kind: LastValue aggregations
+// generate Gauge metrics and all other aggregations generate Cumulative metrics.
+//
+// In order to be able to push your stats to Stackdriver Monitoring, you must:
+//
+// 1. Create a Cloud project: https://support.google.com/cloud/answer/6251787?hl=en
+// 2. Enable billing: https://support.google.com/cloud/answer/6288653#new-billing
+// 3. Enable the Stackdriver Monitoring API: https://console.cloud.google.com/apis/dashboard
+//
+// These steps enable the API but don't require that your app is hosted on Google Cloud Platform.
+//
+// Stackdriver Trace
+//
+// This exporter supports exporting Trace Spans to Stackdriver Trace. It also
+// supports the Google "Cloud Trace" propagation format header.
+package stackdriver // import "contrib.go.opencensus.io/exporter/stackdriver"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log"
+ "time"
+
+ traceapi "cloud.google.com/go/trace/apiv2"
+ "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource"
+ "go.opencensus.io/stats/view"
+ "go.opencensus.io/tag"
+ "go.opencensus.io/trace"
+ "golang.org/x/oauth2/google"
+ "google.golang.org/api/option"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+)
+
+// Options contains options for configuring the exporter.
+type Options struct {
+ // ProjectID is the identifier of the Stackdriver
+ // project the user is uploading the stats data to.
+ // If not set, this will default to your "Application Default Credentials".
+ // For details see: https://developers.google.com/accounts/docs/application-default-credentials
+ ProjectID string
+
+ // OnError is the hook to be called when there is
+ // an error uploading the stats or tracing data.
+ // If no custom hook is set, errors are logged.
+ // Optional.
+ OnError func(err error)
+
+ // MonitoringClientOptions are additional options to be passed
+ // to the underlying Stackdriver Monitoring API client.
+ // Optional.
+ MonitoringClientOptions []option.ClientOption
+
+ // TraceClientOptions are additional options to be passed
+ // to the underlying Stackdriver Trace API client.
+ // Optional.
+ TraceClientOptions []option.ClientOption
+
+ // BundleDelayThreshold determines the max amount of time
+ // the exporter can wait before uploading view data or trace spans to
+ // the backend.
+ // Optional.
+ BundleDelayThreshold time.Duration
+
+ // BundleCountThreshold determines how many view data events or trace spans
+ // can be buffered before batch uploading them to the backend.
+ // Optional.
+ BundleCountThreshold int
+
+ // TraceSpansBufferMaxBytes is the maximum size (in bytes) of spans that
+ // will be buffered in memory before being dropped.
+ //
+ // If unset, a default of 8MB will be used.
+ TraceSpansBufferMaxBytes int
+
+ // Resource sets the MonitoredResource against which all views will be
+ // recorded by this exporter.
+ //
+ // All Stackdriver metrics created by this exporter are custom metrics,
+ // so only a limited number of MonitoredResource types are supported, see:
+ // https://cloud.google.com/monitoring/custom-metrics/creating-metrics#which-resource
+ //
+ // An important consideration when setting the Resource here is that
+ // Stackdriver Monitoring only allows a single writer per
+ // TimeSeries, see: https://cloud.google.com/monitoring/api/v3/metrics-details#intro-time-series
+ // A TimeSeries is uniquely defined by the metric type name
+ // (constructed from the view name and the MetricPrefix), the Resource field,
+ // and the set of label key/value pairs (in OpenCensus terminology: tag).
+ //
+ // If no custom Resource is set, a default MonitoredResource
+ // with type global and no resource labels will be used. If you explicitly
+ // set this field, you may also want to set custom DefaultMonitoringLabels.
+ //
+ // Deprecated: Use MonitoredResource instead.
+ Resource *monitoredrespb.MonitoredResource
+
+ // MonitoredResource sets the MonitoredResource against which all views will be
+ // recorded by this exporter.
+ //
+ // All Stackdriver metrics created by this exporter are custom metrics,
+ // so only a limited number of MonitoredResource types are supported, see:
+ // https://cloud.google.com/monitoring/custom-metrics/creating-metrics#which-resource
+ //
+ // An important consideration when setting the MonitoredResource here is that
+ // Stackdriver Monitoring only allows a single writer per
+ // TimeSeries, see: https://cloud.google.com/monitoring/api/v3/metrics-details#intro-time-series
+ // A TimeSeries is uniquely defined by the metric type name
+ // (constructed from the view name and the MetricPrefix), the MonitoredResource field,
+ // and the set of label key/value pairs (in OpenCensus terminology: tag).
+ //
+ // If no custom MonitoredResource is set AND if Resource is also not set then
+ // a default MonitoredResource with type global and no resource labels will be used.
+ // If you explicitly set this field, you may also want to set custom DefaultMonitoringLabels.
+ //
+ // This field replaces Resource field. If this is set then it will override the
+ // Resource field.
+ // Optional, but encouraged.
+ MonitoredResource monitoredresource.Interface
+
+ // MetricPrefix overrides the prefix of a Stackdriver metric display names.
+ // Optional. If unset defaults to "OpenCensus/".
+ // Deprecated: Provide GetMetricDisplayName to change the display name of
+ // the metric.
+ // If GetMetricDisplayName is non-nil, this option is ignored.
+ MetricPrefix string
+
+ // GetMetricDisplayName allows customizing the display name for the metric
+ // associated with the given view. By default it will be:
+ // MetricPrefix + view.Name
+ GetMetricDisplayName func(view *view.View) string
+
+ // GetMetricType allows customizing the metric type for the given view.
+ // By default, it will be:
+ // "custom.googleapis.com/opencensus/" + view.Name
+ //
+ // See: https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricDescriptor
+ GetMetricType func(view *view.View) string
+
+ // DefaultTraceAttributes will be appended to every span that is exported to
+ // Stackdriver Trace.
+ DefaultTraceAttributes map[string]interface{}
+
+ // DefaultMonitoringLabels are labels added to every metric created by this
+ // exporter in Stackdriver Monitoring.
+ //
+ // If unset, this defaults to a single label with key "opencensus_task" and
+ // value "go-@". This default ensures that the set of labels
+ // together with the default Resource (global) are unique to this
+ // process, as required by Stackdriver Monitoring.
+ //
+ // If you set DefaultMonitoringLabels, make sure that the Resource field
+ // together with these labels is unique to the
+ // current process. This is to ensure that there is only a single writer to
+ // each TimeSeries in Stackdriver.
+ //
+ // Set this to &Labels{} (a pointer to an empty Labels) to avoid getting the
+ // default "opencensus_task" label. You should only do this if you know that
+ // the Resource you set uniquely identifies this Go process.
+ DefaultMonitoringLabels *Labels
+
+ // Context allows you to provide a custom context for API calls.
+ //
+ // This context will be used several times: first, to create Stackdriver
+ // trace and metric clients, and then every time a new batch of traces or
+ // stats needs to be uploaded.
+ //
+ // Do not set a timeout on this context. Instead, set the Timeout option.
+ //
+ // If unset, context.Background() will be used.
+ Context context.Context
+
+ // Timeout for all API calls. If not set, defaults to 5 seconds.
+ Timeout time.Duration
+
+ // GetMonitoredResource may be provided to supply the details of the
+ // monitored resource dynamically based on the tags associated with each
+ // data point. Most users will not need to set this, but should instead
+ // set the MonitoredResource field.
+ //
+ // GetMonitoredResource may add or remove tags by returning a new set of
+ // tags. It is safe for the function to mutate its argument and return it.
+ //
+ // See the documentation on the MonitoredResource field for guidance on the
+ // interaction between monitored resources and labels.
+ //
+ // The MonitoredResource field is ignored if this field is set to a non-nil
+ // value.
+ GetMonitoredResource func(*view.View, []tag.Tag) ([]tag.Tag, monitoredresource.Interface)
+}
+
+const defaultTimeout = 5 * time.Second
+
+// Exporter is a stats and trace exporter that uploads data to Stackdriver.
+//
+// You can create a single Exporter and register it as both a trace exporter
+// (to export to Stackdriver Trace) and a stats exporter (to integrate with
+// Stackdriver Monitoring).
+type Exporter struct {
+ traceExporter *traceExporter
+ statsExporter *statsExporter
+}
+
+// NewExporter creates a new Exporter that implements both stats.Exporter and
+// trace.Exporter.
+func NewExporter(o Options) (*Exporter, error) {
+ if o.ProjectID == "" {
+ ctx := o.Context
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ creds, err := google.FindDefaultCredentials(ctx, traceapi.DefaultAuthScopes()...)
+ if err != nil {
+ return nil, fmt.Errorf("stackdriver: %v", err)
+ }
+ if creds.ProjectID == "" {
+ return nil, errors.New("stackdriver: no project found with application default credentials")
+ }
+ o.ProjectID = creds.ProjectID
+ }
+
+ if o.MonitoredResource != nil {
+ o.Resource = convertMonitoredResourceToPB(o.MonitoredResource)
+ }
+
+ se, err := newStatsExporter(o)
+ if err != nil {
+ return nil, err
+ }
+ te, err := newTraceExporter(o)
+ if err != nil {
+ return nil, err
+ }
+ return &Exporter{
+ statsExporter: se,
+ traceExporter: te,
+ }, nil
+}
+
+// ExportView exports to the Stackdriver Monitoring if view data
+// has one or more rows.
+func (e *Exporter) ExportView(vd *view.Data) {
+ e.statsExporter.ExportView(vd)
+}
+
+// ExportSpan exports a SpanData to Stackdriver Trace.
+func (e *Exporter) ExportSpan(sd *trace.SpanData) {
+ if len(e.traceExporter.o.DefaultTraceAttributes) > 0 {
+ sd = e.sdWithDefaultTraceAttributes(sd)
+ }
+ e.traceExporter.ExportSpan(sd)
+}
+
+func (e *Exporter) sdWithDefaultTraceAttributes(sd *trace.SpanData) *trace.SpanData {
+ newSD := *sd
+ newSD.Attributes = make(map[string]interface{})
+ for k, v := range e.traceExporter.o.DefaultTraceAttributes {
+ newSD.Attributes[k] = v
+ }
+ for k, v := range sd.Attributes {
+ newSD.Attributes[k] = v
+ }
+ return &newSD
+}
+
+// Flush waits for exported data to be uploaded.
+//
+// This is useful if your program is ending and you do not
+// want to lose recent stats or spans.
+func (e *Exporter) Flush() {
+ e.statsExporter.Flush()
+ e.traceExporter.Flush()
+}
+
+func (o Options) handleError(err error) {
+ if o.OnError != nil {
+ o.OnError(err)
+ return
+ }
+ log.Printf("Failed to export to Stackdriver: %v", err)
+}
+
+func (o Options) newContextWithTimeout() (context.Context, func()) {
+ ctx := o.Context
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ timeout := o.Timeout
+ if timeout <= 0 {
+ timeout = defaultTimeout
+ }
+ return context.WithTimeout(ctx, timeout)
+}
+
+// convertMonitoredResourceToPB converts MonitoredResource data in to
+// protocol buffer.
+func convertMonitoredResourceToPB(mr monitoredresource.Interface) *monitoredrespb.MonitoredResource {
+ mrpb := new(monitoredrespb.MonitoredResource)
+ var labels map[string]string
+ mrpb.Type, labels = mr.MonitoredResource()
+ mrpb.Labels = make(map[string]string)
+ for k, v := range labels {
+ mrpb.Labels[k] = v
+ }
+ return mrpb
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go
new file mode 100644
index 00000000..937606ba
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go
@@ -0,0 +1,496 @@
+// Copyright 2017, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stackdriver
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "go.opencensus.io"
+ "go.opencensus.io/stats"
+ "go.opencensus.io/stats/view"
+ "go.opencensus.io/tag"
+ "go.opencensus.io/trace"
+
+ "cloud.google.com/go/monitoring/apiv3"
+ "github.com/golang/protobuf/ptypes/timestamp"
+ "google.golang.org/api/option"
+ "google.golang.org/api/support/bundler"
+ distributionpb "google.golang.org/genproto/googleapis/api/distribution"
+ labelpb "google.golang.org/genproto/googleapis/api/label"
+ "google.golang.org/genproto/googleapis/api/metric"
+ metricpb "google.golang.org/genproto/googleapis/api/metric"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+ monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
+)
+
+const (
+ maxTimeSeriesPerUpload = 200
+ opencensusTaskKey = "opencensus_task"
+ opencensusTaskDescription = "Opencensus task identifier"
+ defaultDisplayNamePrefix = "OpenCensus"
+ version = "0.8.0"
+)
+
+var userAgent = fmt.Sprintf("opencensus-go %s; stackdriver-exporter %s", opencensus.Version(), version)
+
+// statsExporter exports stats to the Stackdriver Monitoring.
+type statsExporter struct {
+ bundler *bundler.Bundler
+ o Options
+
+ createdViewsMu sync.Mutex
+ createdViews map[string]*metricpb.MetricDescriptor // Views already created remotely
+
+ c *monitoring.MetricClient
+ defaultLabels map[string]labelValue
+}
+
+var (
+ errBlankProjectID = errors.New("expecting a non-blank ProjectID")
+)
+
+// newStatsExporter returns an exporter that uploads stats data to Stackdriver Monitoring.
+// Only one Stackdriver exporter should be created per ProjectID per process, any subsequent
+// invocations of NewExporter with the same ProjectID will return an error.
+func newStatsExporter(o Options) (*statsExporter, error) {
+ if strings.TrimSpace(o.ProjectID) == "" {
+ return nil, errBlankProjectID
+ }
+
+ opts := append(o.MonitoringClientOptions, option.WithUserAgent(userAgent))
+ ctx, cancel := o.newContextWithTimeout()
+ defer cancel()
+ client, err := monitoring.NewMetricClient(ctx, opts...)
+ if err != nil {
+ return nil, err
+ }
+ e := &statsExporter{
+ c: client,
+ o: o,
+ createdViews: make(map[string]*metricpb.MetricDescriptor),
+ }
+
+ if o.DefaultMonitoringLabels != nil {
+ e.defaultLabels = o.DefaultMonitoringLabels.m
+ } else {
+ e.defaultLabels = map[string]labelValue{
+ opencensusTaskKey: {val: getTaskValue(), desc: opencensusTaskDescription},
+ }
+ }
+ e.bundler = bundler.NewBundler((*view.Data)(nil), func(bundle interface{}) {
+ vds := bundle.([]*view.Data)
+ e.handleUpload(vds...)
+ })
+ if e.o.BundleDelayThreshold > 0 {
+ e.bundler.DelayThreshold = e.o.BundleDelayThreshold
+ }
+ if e.o.BundleCountThreshold > 0 {
+ e.bundler.BundleCountThreshold = e.o.BundleCountThreshold
+ }
+ return e, nil
+}
+
+func (e *statsExporter) getMonitoredResource(v *view.View, tags []tag.Tag) ([]tag.Tag, *monitoredrespb.MonitoredResource) {
+ if get := e.o.GetMonitoredResource; get != nil {
+ newTags, mr := get(v, tags)
+ return newTags, convertMonitoredResourceToPB(mr)
+ } else {
+ resource := e.o.Resource
+ if resource == nil {
+ resource = &monitoredrespb.MonitoredResource{
+ Type: "global",
+ }
+ }
+ return tags, resource
+ }
+}
+
+// ExportView exports to the Stackdriver Monitoring if view data
+// has one or more rows.
+func (e *statsExporter) ExportView(vd *view.Data) {
+ if len(vd.Rows) == 0 {
+ return
+ }
+ err := e.bundler.Add(vd, 1)
+ switch err {
+ case nil:
+ return
+ case bundler.ErrOverflow:
+ e.o.handleError(errors.New("failed to upload: buffer full"))
+ default:
+ e.o.handleError(err)
+ }
+}
+
+// getTaskValue returns a task label value in the format of
+// "go-@".
+func getTaskValue() string {
+ hostname, err := os.Hostname()
+ if err != nil {
+ hostname = "localhost"
+ }
+ return "go-" + strconv.Itoa(os.Getpid()) + "@" + hostname
+}
+
+// handleUpload handles uploading a slice
+// of Data, as well as error handling.
+func (e *statsExporter) handleUpload(vds ...*view.Data) {
+ if err := e.uploadStats(vds); err != nil {
+ e.o.handleError(err)
+ }
+}
+
+// Flush waits for exported view data to be uploaded.
+//
+// This is useful if your program is ending and you do not
+// want to lose recent spans.
+func (e *statsExporter) Flush() {
+ e.bundler.Flush()
+}
+
+func (e *statsExporter) uploadStats(vds []*view.Data) error {
+ ctx, cancel := e.o.newContextWithTimeout()
+ defer cancel()
+ ctx, span := trace.StartSpan(
+ ctx,
+ "contrib.go.opencensus.io/exporter/stackdriver.uploadStats",
+ trace.WithSampler(trace.NeverSample()),
+ )
+ defer span.End()
+
+ for _, vd := range vds {
+ if err := e.createMeasure(ctx, vd.View); err != nil {
+ span.SetStatus(trace.Status{Code: 2, Message: err.Error()})
+ return err
+ }
+ }
+ for _, req := range e.makeReq(vds, maxTimeSeriesPerUpload) {
+ if err := createTimeSeries(ctx, e.c, req); err != nil {
+ span.SetStatus(trace.Status{Code: 2, Message: err.Error()})
+ // TODO(jbd): Don't fail fast here, batch errors?
+ return err
+ }
+ }
+ return nil
+}
+
+func (e *statsExporter) makeReq(vds []*view.Data, limit int) []*monitoringpb.CreateTimeSeriesRequest {
+ var reqs []*monitoringpb.CreateTimeSeriesRequest
+ var timeSeries []*monitoringpb.TimeSeries
+ for _, vd := range vds {
+ for _, row := range vd.Rows {
+ tags, resource := e.getMonitoredResource(vd.View, append([]tag.Tag(nil), row.Tags...))
+ ts := &monitoringpb.TimeSeries{
+ Metric: &metricpb.Metric{
+ Type: e.metricType(vd.View),
+ Labels: newLabels(e.defaultLabels, tags),
+ },
+ Resource: resource,
+ Points: []*monitoringpb.Point{newPoint(vd.View, row, vd.Start, vd.End)},
+ }
+ timeSeries = append(timeSeries, ts)
+ if len(timeSeries) == limit {
+ reqs = append(reqs, &monitoringpb.CreateTimeSeriesRequest{
+ Name: monitoring.MetricProjectPath(e.o.ProjectID),
+ TimeSeries: timeSeries,
+ })
+ timeSeries = []*monitoringpb.TimeSeries{}
+ }
+ }
+ }
+ if len(timeSeries) > 0 {
+ reqs = append(reqs, &monitoringpb.CreateTimeSeriesRequest{
+ Name: monitoring.MetricProjectPath(e.o.ProjectID),
+ TimeSeries: timeSeries,
+ })
+ }
+ return reqs
+}
+
+// createMeasure creates a MetricDescriptor for the given view data in Stackdriver Monitoring.
+// An error will be returned if there is already a metric descriptor created with the same name
+// but it has a different aggregation or keys.
+func (e *statsExporter) createMeasure(ctx context.Context, v *view.View) error {
+ e.createdViewsMu.Lock()
+ defer e.createdViewsMu.Unlock()
+
+ m := v.Measure
+ agg := v.Aggregation
+ tagKeys := v.TagKeys
+ viewName := v.Name
+
+ if md, ok := e.createdViews[viewName]; ok {
+ return e.equalMeasureAggTagKeys(md, m, agg, tagKeys)
+ }
+
+ metricType := e.metricType(v)
+ var valueType metricpb.MetricDescriptor_ValueType
+ unit := m.Unit()
+ // Default metric Kind
+ metricKind := metricpb.MetricDescriptor_CUMULATIVE
+
+ switch agg.Type {
+ case view.AggTypeCount:
+ valueType = metricpb.MetricDescriptor_INT64
+ // If the aggregation type is count, which counts the number of recorded measurements, the unit must be "1",
+ // because this view does not apply to the recorded values.
+ unit = stats.UnitDimensionless
+ case view.AggTypeSum:
+ switch m.(type) {
+ case *stats.Int64Measure:
+ valueType = metricpb.MetricDescriptor_INT64
+ case *stats.Float64Measure:
+ valueType = metricpb.MetricDescriptor_DOUBLE
+ }
+ case view.AggTypeDistribution:
+ valueType = metricpb.MetricDescriptor_DISTRIBUTION
+ case view.AggTypeLastValue:
+ metricKind = metricpb.MetricDescriptor_GAUGE
+ switch m.(type) {
+ case *stats.Int64Measure:
+ valueType = metricpb.MetricDescriptor_INT64
+ case *stats.Float64Measure:
+ valueType = metricpb.MetricDescriptor_DOUBLE
+ }
+ default:
+ return fmt.Errorf("unsupported aggregation type: %s", agg.Type.String())
+ }
+
+ var displayName string
+ if e.o.GetMetricDisplayName == nil {
+ displayNamePrefix := defaultDisplayNamePrefix
+ if e.o.MetricPrefix != "" {
+ displayNamePrefix = e.o.MetricPrefix
+ }
+ displayName = path.Join(displayNamePrefix, viewName)
+ } else {
+ displayName = e.o.GetMetricDisplayName(v)
+ }
+
+ md, err := createMetricDescriptor(ctx, e.c, &monitoringpb.CreateMetricDescriptorRequest{
+ Name: fmt.Sprintf("projects/%s", e.o.ProjectID),
+ MetricDescriptor: &metricpb.MetricDescriptor{
+ Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", e.o.ProjectID, metricType),
+ DisplayName: displayName,
+ Description: v.Description,
+ Unit: unit,
+ Type: metricType,
+ MetricKind: metricKind,
+ ValueType: valueType,
+ Labels: newLabelDescriptors(e.defaultLabels, v.TagKeys),
+ },
+ })
+ if err != nil {
+ return err
+ }
+
+ e.createdViews[viewName] = md
+ return nil
+}
+
+func newPoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.Point {
+ switch v.Aggregation.Type {
+ case view.AggTypeLastValue:
+ return newGaugePoint(v, row, end)
+ default:
+ return newCumulativePoint(v, row, start, end)
+ }
+}
+
+func newCumulativePoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.Point {
+ return &monitoringpb.Point{
+ Interval: &monitoringpb.TimeInterval{
+ StartTime: ×tamp.Timestamp{
+ Seconds: start.Unix(),
+ Nanos: int32(start.Nanosecond()),
+ },
+ EndTime: ×tamp.Timestamp{
+ Seconds: end.Unix(),
+ Nanos: int32(end.Nanosecond()),
+ },
+ },
+ Value: newTypedValue(v, row),
+ }
+}
+
+func newGaugePoint(v *view.View, row *view.Row, end time.Time) *monitoringpb.Point {
+ gaugeTime := ×tamp.Timestamp{
+ Seconds: end.Unix(),
+ Nanos: int32(end.Nanosecond()),
+ }
+ return &monitoringpb.Point{
+ Interval: &monitoringpb.TimeInterval{
+ EndTime: gaugeTime,
+ },
+ Value: newTypedValue(v, row),
+ }
+}
+
+func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue {
+ switch v := r.Data.(type) {
+ case *view.CountData:
+ return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
+ Int64Value: v.Value,
+ }}
+ case *view.SumData:
+ switch vd.Measure.(type) {
+ case *stats.Int64Measure:
+ return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
+ Int64Value: int64(v.Value),
+ }}
+ case *stats.Float64Measure:
+ return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
+ DoubleValue: v.Value,
+ }}
+ }
+ case *view.DistributionData:
+ return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{
+ DistributionValue: &distributionpb.Distribution{
+ Count: v.Count,
+ Mean: v.Mean,
+ SumOfSquaredDeviation: v.SumOfSquaredDev,
+ // TODO(songya): uncomment this once Stackdriver supports min/max.
+ // Range: &distributionpb.Distribution_Range{
+ // Min: v.Min,
+ // Max: v.Max,
+ // },
+ BucketOptions: &distributionpb.Distribution_BucketOptions{
+ Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{
+ ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{
+ Bounds: vd.Aggregation.Buckets,
+ },
+ },
+ },
+ BucketCounts: v.CountPerBucket,
+ },
+ }}
+ case *view.LastValueData:
+ switch vd.Measure.(type) {
+ case *stats.Int64Measure:
+ return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
+ Int64Value: int64(v.Value),
+ }}
+ case *stats.Float64Measure:
+ return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
+ DoubleValue: v.Value,
+ }}
+ }
+ }
+ return nil
+}
+
+func (e *statsExporter) metricType(v *view.View) string {
+ if formatter := e.o.GetMetricType; formatter != nil {
+ return formatter(v)
+ } else {
+ return path.Join("custom.googleapis.com", "opencensus", v.Name)
+ }
+}
+
+func newLabels(defaults map[string]labelValue, tags []tag.Tag) map[string]string {
+ labels := make(map[string]string)
+ for k, lbl := range defaults {
+ labels[sanitize(k)] = lbl.val
+ }
+ for _, tag := range tags {
+ labels[sanitize(tag.Key.Name())] = tag.Value
+ }
+ return labels
+}
+
+func newLabelDescriptors(defaults map[string]labelValue, keys []tag.Key) []*labelpb.LabelDescriptor {
+ labelDescriptors := make([]*labelpb.LabelDescriptor, 0, len(keys)+len(defaults))
+ for key, lbl := range defaults {
+ labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{
+ Key: sanitize(key),
+ Description: lbl.desc,
+ ValueType: labelpb.LabelDescriptor_STRING,
+ })
+ }
+ for _, key := range keys {
+ labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{
+ Key: sanitize(key.Name()),
+ ValueType: labelpb.LabelDescriptor_STRING, // We only use string tags
+ })
+ }
+ return labelDescriptors
+}
+
+func (e *statsExporter) equalMeasureAggTagKeys(md *metricpb.MetricDescriptor, m stats.Measure, agg *view.Aggregation, keys []tag.Key) error {
+ var aggTypeMatch bool
+ switch md.ValueType {
+ case metricpb.MetricDescriptor_INT64:
+ if _, ok := m.(*stats.Int64Measure); !(ok || agg.Type == view.AggTypeCount) {
+ return fmt.Errorf("stackdriver metric descriptor was not created as int64")
+ }
+ aggTypeMatch = agg.Type == view.AggTypeCount || agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue
+ case metricpb.MetricDescriptor_DOUBLE:
+ if _, ok := m.(*stats.Float64Measure); !ok {
+ return fmt.Errorf("stackdriver metric descriptor was not created as double")
+ }
+ aggTypeMatch = agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue
+ case metricpb.MetricDescriptor_DISTRIBUTION:
+ aggTypeMatch = agg.Type == view.AggTypeDistribution
+ }
+
+ if !aggTypeMatch {
+ return fmt.Errorf("stackdriver metric descriptor was not created with aggregation type %T", agg.Type)
+ }
+
+ labels := make(map[string]struct{}, len(keys)+len(e.defaultLabels))
+ for _, k := range keys {
+ labels[sanitize(k.Name())] = struct{}{}
+ }
+ for k := range e.defaultLabels {
+ labels[sanitize(k)] = struct{}{}
+ }
+
+ for _, k := range md.Labels {
+ if _, ok := labels[k.Key]; !ok {
+ return fmt.Errorf("stackdriver metric descriptor %q was not created with label %q", md.Type, k)
+ }
+ delete(labels, k.Key)
+ }
+
+ if len(labels) > 0 {
+ extra := make([]string, 0, len(labels))
+ for k := range labels {
+ extra = append(extra, k)
+ }
+ return fmt.Errorf("stackdriver metric descriptor %q contains unexpected labels: %s", md.Type, strings.Join(extra, ", "))
+ }
+
+ return nil
+}
+
+var createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) {
+ return c.CreateMetricDescriptor(ctx, mdr)
+}
+
+var getMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) {
+ return c.GetMetricDescriptor(ctx, mdr)
+}
+
+var createTimeSeries = func(ctx context.Context, c *monitoring.MetricClient, ts *monitoringpb.CreateTimeSeriesRequest) error {
+ return c.CreateTimeSeries(ctx, ts)
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go
new file mode 100644
index 00000000..71e7f36d
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go
@@ -0,0 +1,178 @@
+// Copyright 2017, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stackdriver
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "sync"
+ "time"
+
+ tracingclient "cloud.google.com/go/trace/apiv2"
+ "github.com/golang/protobuf/proto"
+ "go.opencensus.io/trace"
+ "google.golang.org/api/support/bundler"
+ tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2"
+)
+
+// traceExporter is an implementation of trace.Exporter that uploads spans to
+// Stackdriver.
+//
+type traceExporter struct {
+ o Options
+ projectID string
+ bundler *bundler.Bundler
+ // uploadFn defaults to uploadSpans; it can be replaced for tests.
+ uploadFn func(spans []*tracepb.Span)
+ overflowLogger
+ client *tracingclient.Client
+}
+
+var _ trace.Exporter = (*traceExporter)(nil)
+
+func newTraceExporter(o Options) (*traceExporter, error) {
+ ctx := o.Context
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ client, err := tracingclient.NewClient(ctx, o.TraceClientOptions...)
+ if err != nil {
+ return nil, fmt.Errorf("stackdriver: couldn't initialize trace client: %v", err)
+ }
+ return newTraceExporterWithClient(o, client), nil
+}
+
+const defaultBufferedByteLimit = 8 * 1024 * 1024
+
+func newTraceExporterWithClient(o Options, c *tracingclient.Client) *traceExporter {
+ e := &traceExporter{
+ projectID: o.ProjectID,
+ client: c,
+ o: o,
+ }
+ b := bundler.NewBundler((*tracepb.Span)(nil), func(bundle interface{}) {
+ e.uploadFn(bundle.([]*tracepb.Span))
+ })
+ if o.BundleDelayThreshold > 0 {
+ b.DelayThreshold = o.BundleDelayThreshold
+ } else {
+ b.DelayThreshold = 2 * time.Second
+ }
+ if o.BundleCountThreshold > 0 {
+ b.BundleCountThreshold = o.BundleCountThreshold
+ } else {
+ b.BundleCountThreshold = 50
+ }
+ // The measured "bytes" are not really bytes, see exportReceiver.
+ b.BundleByteThreshold = b.BundleCountThreshold * 200
+ b.BundleByteLimit = b.BundleCountThreshold * 1000
+ if o.TraceSpansBufferMaxBytes > 0 {
+ b.BufferedByteLimit = o.TraceSpansBufferMaxBytes
+ } else {
+ b.BufferedByteLimit = defaultBufferedByteLimit
+ }
+
+ e.bundler = b
+ e.uploadFn = e.uploadSpans
+ return e
+}
+
+// ExportSpan exports a SpanData to Stackdriver Trace.
+func (e *traceExporter) ExportSpan(s *trace.SpanData) {
+ protoSpan := protoFromSpanData(s, e.projectID, e.o.Resource)
+ protoSize := proto.Size(protoSpan)
+ err := e.bundler.Add(protoSpan, protoSize)
+ switch err {
+ case nil:
+ return
+ case bundler.ErrOversizedItem:
+ case bundler.ErrOverflow:
+ e.overflowLogger.log()
+ default:
+ e.o.handleError(err)
+ }
+}
+
+// Flush waits for exported trace spans to be uploaded.
+//
+// This is useful if your program is ending and you do not want to lose recent
+// spans.
+func (e *traceExporter) Flush() {
+ e.bundler.Flush()
+}
+
+// uploadSpans uploads a set of spans to Stackdriver.
+func (e *traceExporter) uploadSpans(spans []*tracepb.Span) {
+ req := tracepb.BatchWriteSpansRequest{
+ Name: "projects/" + e.projectID,
+ Spans: spans,
+ }
+ // Create a never-sampled span to prevent traces associated with exporter.
+ ctx, cancel := e.o.newContextWithTimeout()
+ defer cancel()
+ ctx, span := trace.StartSpan(
+ ctx,
+ "contrib.go.opencensus.io/exporter/stackdriver.uploadSpans",
+ trace.WithSampler(trace.NeverSample()),
+ )
+ defer span.End()
+ span.AddAttributes(trace.Int64Attribute("num_spans", int64(len(spans))))
+
+ err := e.client.BatchWriteSpans(ctx, &req)
+ if err != nil {
+ span.SetStatus(trace.Status{Code: 2, Message: err.Error()})
+ e.o.handleError(err)
+ }
+}
+
+// overflowLogger ensures that at most one overflow error log message is
+// written every 5 seconds.
+type overflowLogger struct {
+ mu sync.Mutex
+ pause bool
+ accum int
+}
+
+func (o *overflowLogger) delay() {
+ o.pause = true
+ time.AfterFunc(5*time.Second, func() {
+ o.mu.Lock()
+ defer o.mu.Unlock()
+ switch {
+ case o.accum == 0:
+ o.pause = false
+ case o.accum == 1:
+ log.Println("OpenCensus Stackdriver exporter: failed to upload span: buffer full")
+ o.accum = 0
+ o.delay()
+ default:
+ log.Printf("OpenCensus Stackdriver exporter: failed to upload %d spans: buffer full", o.accum)
+ o.accum = 0
+ o.delay()
+ }
+ })
+}
+
+func (o *overflowLogger) log() {
+ o.mu.Lock()
+ defer o.mu.Unlock()
+ if !o.pause {
+ log.Println("OpenCensus Stackdriver exporter: failed to upload span: buffer full")
+ o.delay()
+ } else {
+ o.accum++
+ }
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go
new file mode 100644
index 00000000..2d259cf3
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go
@@ -0,0 +1,277 @@
+// Copyright 2017, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stackdriver
+
+import (
+ "fmt"
+ "math"
+ "time"
+ "unicode/utf8"
+
+ timestamppb "github.com/golang/protobuf/ptypes/timestamp"
+ wrapperspb "github.com/golang/protobuf/ptypes/wrappers"
+ "go.opencensus.io/plugin/ochttp"
+ "go.opencensus.io/trace"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+ tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2"
+ statuspb "google.golang.org/genproto/googleapis/rpc/status"
+)
+
+const (
+ maxAnnotationEventsPerSpan = 32
+ maxMessageEventsPerSpan = 128
+ maxAttributeStringValue = 256
+ agentLabel = "g.co/agent"
+
+ labelHTTPHost = `/http/host`
+ labelHTTPMethod = `/http/method`
+ labelHTTPStatusCode = `/http/status_code`
+ labelHTTPPath = `/http/path`
+ labelHTTPUserAgent = `/http/user_agent`
+)
+
+// proto returns a protocol buffer representation of a SpanData.
+func protoFromSpanData(s *trace.SpanData, projectID string, mr *monitoredrespb.MonitoredResource) *tracepb.Span {
+ if s == nil {
+ return nil
+ }
+
+ traceIDString := s.SpanContext.TraceID.String()
+ spanIDString := s.SpanContext.SpanID.String()
+
+ name := s.Name
+ switch s.SpanKind {
+ case trace.SpanKindClient:
+ name = "Sent." + name
+ case trace.SpanKindServer:
+ name = "Recv." + name
+ }
+
+ sp := &tracepb.Span{
+ Name: "projects/" + projectID + "/traces/" + traceIDString + "/spans/" + spanIDString,
+ SpanId: spanIDString,
+ DisplayName: trunc(name, 128),
+ StartTime: timestampProto(s.StartTime),
+ EndTime: timestampProto(s.EndTime),
+ SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: !s.HasRemoteParent},
+ }
+ if p := s.ParentSpanID; p != (trace.SpanID{}) {
+ sp.ParentSpanId = p.String()
+ }
+ if s.Status.Code != 0 || s.Status.Message != "" {
+ sp.Status = &statuspb.Status{Code: s.Status.Code, Message: s.Status.Message}
+ }
+
+ var annotations, droppedAnnotationsCount, messageEvents, droppedMessageEventsCount int
+ copyAttributes(&sp.Attributes, s.Attributes)
+
+ // Copy MonitoredResources as span Attributes
+ sp.Attributes = copyMonitoredResourceAttributes(sp.Attributes, mr)
+
+ as := s.Annotations
+ for i, a := range as {
+ if annotations >= maxAnnotationEventsPerSpan {
+ droppedAnnotationsCount = len(as) - i
+ break
+ }
+ annotation := &tracepb.Span_TimeEvent_Annotation{Description: trunc(a.Message, maxAttributeStringValue)}
+ copyAttributes(&annotation.Attributes, a.Attributes)
+ event := &tracepb.Span_TimeEvent{
+ Time: timestampProto(a.Time),
+ Value: &tracepb.Span_TimeEvent_Annotation_{Annotation: annotation},
+ }
+ annotations++
+ if sp.TimeEvents == nil {
+ sp.TimeEvents = &tracepb.Span_TimeEvents{}
+ }
+ sp.TimeEvents.TimeEvent = append(sp.TimeEvents.TimeEvent, event)
+ }
+
+ if sp.Attributes == nil {
+ sp.Attributes = &tracepb.Span_Attributes{
+ AttributeMap: make(map[string]*tracepb.AttributeValue),
+ }
+ }
+ sp.Attributes.AttributeMap[agentLabel] = &tracepb.AttributeValue{
+ Value: &tracepb.AttributeValue_StringValue{
+ StringValue: trunc(userAgent, maxAttributeStringValue),
+ },
+ }
+
+ es := s.MessageEvents
+ for i, e := range es {
+ if messageEvents >= maxMessageEventsPerSpan {
+ droppedMessageEventsCount = len(es) - i
+ break
+ }
+ messageEvents++
+ if sp.TimeEvents == nil {
+ sp.TimeEvents = &tracepb.Span_TimeEvents{}
+ }
+ sp.TimeEvents.TimeEvent = append(sp.TimeEvents.TimeEvent, &tracepb.Span_TimeEvent{
+ Time: timestampProto(e.Time),
+ Value: &tracepb.Span_TimeEvent_MessageEvent_{
+ MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{
+ Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType),
+ Id: e.MessageID,
+ UncompressedSizeBytes: e.UncompressedByteSize,
+ CompressedSizeBytes: e.CompressedByteSize,
+ },
+ },
+ })
+ }
+
+ if droppedAnnotationsCount != 0 || droppedMessageEventsCount != 0 {
+ if sp.TimeEvents == nil {
+ sp.TimeEvents = &tracepb.Span_TimeEvents{}
+ }
+ sp.TimeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount)
+ sp.TimeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount)
+ }
+
+ if len(s.Links) > 0 {
+ sp.Links = &tracepb.Span_Links{}
+ sp.Links.Link = make([]*tracepb.Span_Link, 0, len(s.Links))
+ for _, l := range s.Links {
+ link := &tracepb.Span_Link{
+ TraceId: l.TraceID.String(),
+ SpanId: l.SpanID.String(),
+ Type: tracepb.Span_Link_Type(l.Type),
+ }
+ copyAttributes(&link.Attributes, l.Attributes)
+ sp.Links.Link = append(sp.Links.Link, link)
+ }
+ }
+ return sp
+}
+
+// timestampProto creates a timestamp proto for a time.Time.
+func timestampProto(t time.Time) *timestamppb.Timestamp {
+ return ×tamppb.Timestamp{
+ Seconds: t.Unix(),
+ Nanos: int32(t.Nanosecond()),
+ }
+}
+
+// copyMonitoredResourceAttributes copies proto monitoredResource to proto map field (Span_Attributes)
+// it creates the map if it is nil.
+func copyMonitoredResourceAttributes(out *tracepb.Span_Attributes, mr *monitoredrespb.MonitoredResource) *tracepb.Span_Attributes {
+ if mr == nil {
+ return out
+ }
+ if out == nil {
+ out = &tracepb.Span_Attributes{}
+ }
+ if out.AttributeMap == nil {
+ out.AttributeMap = make(map[string]*tracepb.AttributeValue)
+ }
+ for k, v := range mr.Labels {
+ av := attributeValue(v)
+ out.AttributeMap[fmt.Sprintf("g.co/r/%s/%s", mr.Type, k)] = av
+ }
+ return out
+}
+
+// copyAttributes copies a map of attributes to a proto map field.
+// It creates the map if it is nil.
+func copyAttributes(out **tracepb.Span_Attributes, in map[string]interface{}) {
+ if len(in) == 0 {
+ return
+ }
+ if *out == nil {
+ *out = &tracepb.Span_Attributes{}
+ }
+ if (*out).AttributeMap == nil {
+ (*out).AttributeMap = make(map[string]*tracepb.AttributeValue)
+ }
+ var dropped int32
+ for key, value := range in {
+ av := attributeValue(value)
+ if av == nil {
+ continue
+ }
+ switch key {
+ case ochttp.PathAttribute:
+ (*out).AttributeMap[labelHTTPPath] = av
+ case ochttp.HostAttribute:
+ (*out).AttributeMap[labelHTTPHost] = av
+ case ochttp.MethodAttribute:
+ (*out).AttributeMap[labelHTTPMethod] = av
+ case ochttp.UserAgentAttribute:
+ (*out).AttributeMap[labelHTTPUserAgent] = av
+ case ochttp.StatusCodeAttribute:
+ (*out).AttributeMap[labelHTTPStatusCode] = av
+ default:
+ if len(key) > 128 {
+ dropped++
+ continue
+ }
+ (*out).AttributeMap[key] = av
+ }
+ }
+ (*out).DroppedAttributesCount = dropped
+}
+
+func attributeValue(v interface{}) *tracepb.AttributeValue {
+ switch value := v.(type) {
+ case bool:
+ return &tracepb.AttributeValue{
+ Value: &tracepb.AttributeValue_BoolValue{BoolValue: value},
+ }
+ case int64:
+ return &tracepb.AttributeValue{
+ Value: &tracepb.AttributeValue_IntValue{IntValue: value},
+ }
+ case string:
+ return &tracepb.AttributeValue{
+ Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(value, maxAttributeStringValue)},
+ }
+ }
+ return nil
+}
+
+// trunc returns a TruncatableString truncated to the given limit.
+func trunc(s string, limit int) *tracepb.TruncatableString {
+ if len(s) > limit {
+ b := []byte(s[:limit])
+ for {
+ r, size := utf8.DecodeLastRune(b)
+ if r == utf8.RuneError && size == 1 {
+ b = b[:len(b)-1]
+ } else {
+ break
+ }
+ }
+ return &tracepb.TruncatableString{
+ Value: string(b),
+ TruncatedByteCount: clip32(len(s) - len(b)),
+ }
+ }
+ return &tracepb.TruncatableString{
+ Value: s,
+ TruncatedByteCount: 0,
+ }
+}
+
+// clip32 clips an int to the range of an int32.
+func clip32(x int) int32 {
+ if x < math.MinInt32 {
+ return math.MinInt32
+ }
+ if x > math.MaxInt32 {
+ return math.MaxInt32
+ }
+ return int32(x)
+}
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
index 0f646931..1b1b1921 100644
--- a/vendor/github.com/golang/protobuf/LICENSE
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -1,4 +1,7 @@
+Go support for Protocol Buffers - Google's data interchange format
+
Copyright 2010 The Go Authors. All rights reserved.
+https://github.com/golang/protobuf
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
new file mode 100644
index 00000000..4cfe6081
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
@@ -0,0 +1,380 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/struct.proto
+
+/*
+Package structpb is a generated protocol buffer package.
+
+It is generated from these files:
+ google/protobuf/struct.proto
+
+It has these top-level messages:
+ Struct
+ Value
+ ListValue
+*/
+package structpb
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+// The JSON representation for `NullValue` is JSON `null`.
+type NullValue int32
+
+const (
+ // Null value.
+ NullValue_NULL_VALUE NullValue = 0
+)
+
+var NullValue_name = map[int32]string{
+ 0: "NULL_VALUE",
+}
+var NullValue_value = map[string]int32{
+ "NULL_VALUE": 0,
+}
+
+func (x NullValue) String() string {
+ return proto.EnumName(NullValue_name, int32(x))
+}
+func (NullValue) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (NullValue) XXX_WellKnownType() string { return "NullValue" }
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+type Struct struct {
+ // Unordered map of dynamically typed values.
+ Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+}
+
+func (m *Struct) Reset() { *m = Struct{} }
+func (m *Struct) String() string { return proto.CompactTextString(m) }
+func (*Struct) ProtoMessage() {}
+func (*Struct) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (*Struct) XXX_WellKnownType() string { return "Struct" }
+
+func (m *Struct) GetFields() map[string]*Value {
+ if m != nil {
+ return m.Fields
+ }
+ return nil
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of that
+// variants, absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+type Value struct {
+ // The kind of value.
+ //
+ // Types that are valid to be assigned to Kind:
+ // *Value_NullValue
+ // *Value_NumberValue
+ // *Value_StringValue
+ // *Value_BoolValue
+ // *Value_StructValue
+ // *Value_ListValue
+ Kind isValue_Kind `protobuf_oneof:"kind"`
+}
+
+func (m *Value) Reset() { *m = Value{} }
+func (m *Value) String() string { return proto.CompactTextString(m) }
+func (*Value) ProtoMessage() {}
+func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+func (*Value) XXX_WellKnownType() string { return "Value" }
+
+type isValue_Kind interface {
+ isValue_Kind()
+}
+
+type Value_NullValue struct {
+ NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,enum=google.protobuf.NullValue,oneof"`
+}
+type Value_NumberValue struct {
+ NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,oneof"`
+}
+type Value_StringValue struct {
+ StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,oneof"`
+}
+type Value_BoolValue struct {
+ BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,oneof"`
+}
+type Value_StructValue struct {
+ StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,oneof"`
+}
+type Value_ListValue struct {
+ ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+func (*Value_NumberValue) isValue_Kind() {}
+func (*Value_StringValue) isValue_Kind() {}
+func (*Value_BoolValue) isValue_Kind() {}
+func (*Value_StructValue) isValue_Kind() {}
+func (*Value_ListValue) isValue_Kind() {}
+
+func (m *Value) GetKind() isValue_Kind {
+ if m != nil {
+ return m.Kind
+ }
+ return nil
+}
+
+func (m *Value) GetNullValue() NullValue {
+ if x, ok := m.GetKind().(*Value_NullValue); ok {
+ return x.NullValue
+ }
+ return NullValue_NULL_VALUE
+}
+
+func (m *Value) GetNumberValue() float64 {
+ if x, ok := m.GetKind().(*Value_NumberValue); ok {
+ return x.NumberValue
+ }
+ return 0
+}
+
+func (m *Value) GetStringValue() string {
+ if x, ok := m.GetKind().(*Value_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (m *Value) GetBoolValue() bool {
+ if x, ok := m.GetKind().(*Value_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (m *Value) GetStructValue() *Struct {
+ if x, ok := m.GetKind().(*Value_StructValue); ok {
+ return x.StructValue
+ }
+ return nil
+}
+
+func (m *Value) GetListValue() *ListValue {
+ if x, ok := m.GetKind().(*Value_ListValue); ok {
+ return x.ListValue
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{
+ (*Value_NullValue)(nil),
+ (*Value_NumberValue)(nil),
+ (*Value_StringValue)(nil),
+ (*Value_BoolValue)(nil),
+ (*Value_StructValue)(nil),
+ (*Value_ListValue)(nil),
+ }
+}
+
+func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Value)
+ // kind
+ switch x := m.Kind.(type) {
+ case *Value_NullValue:
+ b.EncodeVarint(1<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.NullValue))
+ case *Value_NumberValue:
+ b.EncodeVarint(2<<3 | proto.WireFixed64)
+ b.EncodeFixed64(math.Float64bits(x.NumberValue))
+ case *Value_StringValue:
+ b.EncodeVarint(3<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.StringValue)
+ case *Value_BoolValue:
+ t := uint64(0)
+ if x.BoolValue {
+ t = 1
+ }
+ b.EncodeVarint(4<<3 | proto.WireVarint)
+ b.EncodeVarint(t)
+ case *Value_StructValue:
+ b.EncodeVarint(5<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.StructValue); err != nil {
+ return err
+ }
+ case *Value_ListValue:
+ b.EncodeVarint(6<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ListValue); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("Value.Kind has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Value)
+ switch tag {
+ case 1: // kind.null_value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Kind = &Value_NullValue{NullValue(x)}
+ return true, err
+ case 2: // kind.number_value
+ if wire != proto.WireFixed64 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed64()
+ m.Kind = &Value_NumberValue{math.Float64frombits(x)}
+ return true, err
+ case 3: // kind.string_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Kind = &Value_StringValue{x}
+ return true, err
+ case 4: // kind.bool_value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Kind = &Value_BoolValue{x != 0}
+ return true, err
+ case 5: // kind.struct_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Struct)
+ err := b.DecodeMessage(msg)
+ m.Kind = &Value_StructValue{msg}
+ return true, err
+ case 6: // kind.list_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(ListValue)
+ err := b.DecodeMessage(msg)
+ m.Kind = &Value_ListValue{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Value_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Value)
+ // kind
+ switch x := m.Kind.(type) {
+ case *Value_NullValue:
+ n += proto.SizeVarint(1<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.NullValue))
+ case *Value_NumberValue:
+ n += proto.SizeVarint(2<<3 | proto.WireFixed64)
+ n += 8
+ case *Value_StringValue:
+ n += proto.SizeVarint(3<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.StringValue)))
+ n += len(x.StringValue)
+ case *Value_BoolValue:
+ n += proto.SizeVarint(4<<3 | proto.WireVarint)
+ n += 1
+ case *Value_StructValue:
+ s := proto.Size(x.StructValue)
+ n += proto.SizeVarint(5<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Value_ListValue:
+ s := proto.Size(x.ListValue)
+ n += proto.SizeVarint(6<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+type ListValue struct {
+ // Repeated field of dynamically typed values.
+ Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"`
+}
+
+func (m *ListValue) Reset() { *m = ListValue{} }
+func (m *ListValue) String() string { return proto.CompactTextString(m) }
+func (*ListValue) ProtoMessage() {}
+func (*ListValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+func (*ListValue) XXX_WellKnownType() string { return "ListValue" }
+
+func (m *ListValue) GetValues() []*Value {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
+ proto.RegisterType((*Value)(nil), "google.protobuf.Value")
+ proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
+ proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
+}
+
+func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 417 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40,
+ 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09,
+ 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94,
+ 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa,
+ 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff,
+ 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc,
+ 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15,
+ 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d,
+ 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce,
+ 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39,
+ 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab,
+ 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84,
+ 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48,
+ 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f,
+ 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59,
+ 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a,
+ 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64,
+ 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92,
+ 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25,
+ 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37,
+ 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6,
+ 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4,
+ 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda,
+ 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9,
+ 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53,
+ 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00,
+ 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
new file mode 100644
index 00000000..7d7808e7
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
@@ -0,0 +1,96 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/struct;structpb";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "StructProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+message Struct {
+ // Unordered map of dynamically typed values.
+ map fields = 1;
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of that
+// variants, absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+message Value {
+ // The kind of value.
+ oneof kind {
+ // Represents a null value.
+ NullValue null_value = 1;
+ // Represents a double value.
+ double number_value = 2;
+ // Represents a string value.
+ string string_value = 3;
+ // Represents a boolean value.
+ bool bool_value = 4;
+ // Represents a structured value.
+ Struct struct_value = 5;
+ // Represents a repeated `Value`.
+ ListValue list_value = 6;
+ }
+}
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+// The JSON representation for `NullValue` is JSON `null`.
+enum NullValue {
+ // Null value.
+ NULL_VALUE = 0;
+}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+message ListValue {
+ // Repeated field of dynamically typed values.
+ repeated Value values = 1;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
new file mode 100644
index 00000000..0ed59bf1
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
@@ -0,0 +1,260 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/wrappers.proto
+
+/*
+Package wrappers is a generated protocol buffer package.
+
+It is generated from these files:
+ google/protobuf/wrappers.proto
+
+It has these top-level messages:
+ DoubleValue
+ FloatValue
+ Int64Value
+ UInt64Value
+ Int32Value
+ UInt32Value
+ BoolValue
+ StringValue
+ BytesValue
+*/
+package wrappers
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Wrapper message for `double`.
+//
+// The JSON representation for `DoubleValue` is JSON number.
+type DoubleValue struct {
+ // The double value.
+ Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *DoubleValue) Reset() { *m = DoubleValue{} }
+func (m *DoubleValue) String() string { return proto.CompactTextString(m) }
+func (*DoubleValue) ProtoMessage() {}
+func (*DoubleValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" }
+
+func (m *DoubleValue) GetValue() float64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `float`.
+//
+// The JSON representation for `FloatValue` is JSON number.
+type FloatValue struct {
+ // The float value.
+ Value float32 `protobuf:"fixed32,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *FloatValue) Reset() { *m = FloatValue{} }
+func (m *FloatValue) String() string { return proto.CompactTextString(m) }
+func (*FloatValue) ProtoMessage() {}
+func (*FloatValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" }
+
+func (m *FloatValue) GetValue() float32 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `int64`.
+//
+// The JSON representation for `Int64Value` is JSON string.
+type Int64Value struct {
+ // The int64 value.
+ Value int64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *Int64Value) Reset() { *m = Int64Value{} }
+func (m *Int64Value) String() string { return proto.CompactTextString(m) }
+func (*Int64Value) ProtoMessage() {}
+func (*Int64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" }
+
+func (m *Int64Value) GetValue() int64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `uint64`.
+//
+// The JSON representation for `UInt64Value` is JSON string.
+type UInt64Value struct {
+ // The uint64 value.
+ Value uint64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *UInt64Value) Reset() { *m = UInt64Value{} }
+func (m *UInt64Value) String() string { return proto.CompactTextString(m) }
+func (*UInt64Value) ProtoMessage() {}
+func (*UInt64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" }
+
+func (m *UInt64Value) GetValue() uint64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `int32`.
+//
+// The JSON representation for `Int32Value` is JSON number.
+type Int32Value struct {
+ // The int32 value.
+ Value int32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *Int32Value) Reset() { *m = Int32Value{} }
+func (m *Int32Value) String() string { return proto.CompactTextString(m) }
+func (*Int32Value) ProtoMessage() {}
+func (*Int32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" }
+
+func (m *Int32Value) GetValue() int32 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `uint32`.
+//
+// The JSON representation for `UInt32Value` is JSON number.
+type UInt32Value struct {
+ // The uint32 value.
+ Value uint32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *UInt32Value) Reset() { *m = UInt32Value{} }
+func (m *UInt32Value) String() string { return proto.CompactTextString(m) }
+func (*UInt32Value) ProtoMessage() {}
+func (*UInt32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" }
+
+func (m *UInt32Value) GetValue() uint32 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `bool`.
+//
+// The JSON representation for `BoolValue` is JSON `true` and `false`.
+type BoolValue struct {
+ // The bool value.
+ Value bool `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *BoolValue) Reset() { *m = BoolValue{} }
+func (m *BoolValue) String() string { return proto.CompactTextString(m) }
+func (*BoolValue) ProtoMessage() {}
+func (*BoolValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" }
+
+func (m *BoolValue) GetValue() bool {
+ if m != nil {
+ return m.Value
+ }
+ return false
+}
+
+// Wrapper message for `string`.
+//
+// The JSON representation for `StringValue` is JSON string.
+type StringValue struct {
+ // The string value.
+ Value string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *StringValue) Reset() { *m = StringValue{} }
+func (m *StringValue) String() string { return proto.CompactTextString(m) }
+func (*StringValue) ProtoMessage() {}
+func (*StringValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+func (*StringValue) XXX_WellKnownType() string { return "StringValue" }
+
+func (m *StringValue) GetValue() string {
+ if m != nil {
+ return m.Value
+ }
+ return ""
+}
+
+// Wrapper message for `bytes`.
+//
+// The JSON representation for `BytesValue` is JSON string.
+type BytesValue struct {
+ // The bytes value.
+ Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *BytesValue) Reset() { *m = BytesValue{} }
+func (m *BytesValue) String() string { return proto.CompactTextString(m) }
+func (*BytesValue) ProtoMessage() {}
+func (*BytesValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" }
+
+func (m *BytesValue) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue")
+ proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue")
+ proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value")
+ proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value")
+ proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value")
+ proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value")
+ proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue")
+ proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue")
+ proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue")
+}
+
+func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 259 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c,
+ 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca,
+ 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c,
+ 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5,
+ 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13,
+ 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8,
+ 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca,
+ 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a,
+ 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d,
+ 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24,
+ 0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f,
+ 0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c,
+ 0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e,
+ 0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b,
+ 0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe,
+ 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
new file mode 100644
index 00000000..01947639
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
@@ -0,0 +1,118 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Wrappers for primitive (non-message) types. These types are useful
+// for embedding primitives in the `google.protobuf.Any` type and for places
+// where we need to distinguish between the absence of a primitive
+// typed field and its default value.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/wrappers";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "WrappersProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// Wrapper message for `double`.
+//
+// The JSON representation for `DoubleValue` is JSON number.
+message DoubleValue {
+ // The double value.
+ double value = 1;
+}
+
+// Wrapper message for `float`.
+//
+// The JSON representation for `FloatValue` is JSON number.
+message FloatValue {
+ // The float value.
+ float value = 1;
+}
+
+// Wrapper message for `int64`.
+//
+// The JSON representation for `Int64Value` is JSON string.
+message Int64Value {
+ // The int64 value.
+ int64 value = 1;
+}
+
+// Wrapper message for `uint64`.
+//
+// The JSON representation for `UInt64Value` is JSON string.
+message UInt64Value {
+ // The uint64 value.
+ uint64 value = 1;
+}
+
+// Wrapper message for `int32`.
+//
+// The JSON representation for `Int32Value` is JSON number.
+message Int32Value {
+ // The int32 value.
+ int32 value = 1;
+}
+
+// Wrapper message for `uint32`.
+//
+// The JSON representation for `UInt32Value` is JSON number.
+message UInt32Value {
+ // The uint32 value.
+ uint32 value = 1;
+}
+
+// Wrapper message for `bool`.
+//
+// The JSON representation for `BoolValue` is JSON `true` and `false`.
+message BoolValue {
+ // The bool value.
+ bool value = 1;
+}
+
+// Wrapper message for `string`.
+//
+// The JSON representation for `StringValue` is JSON string.
+message StringValue {
+ // The string value.
+ string value = 1;
+}
+
+// Wrapper message for `bytes`.
+//
+// The JSON representation for `BytesValue` is JSON string.
+message BytesValue {
+ // The bytes value.
+ bytes value = 1;
+}
diff --git a/vendor/github.com/googleapis/gax-go/LICENSE b/vendor/github.com/googleapis/gax-go/LICENSE
new file mode 100644
index 00000000..6d16b657
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/LICENSE
@@ -0,0 +1,27 @@
+Copyright 2016, Google Inc.
+All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/googleapis/gax-go/v2/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go
new file mode 100644
index 00000000..b1d53dd1
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/call_option.go
@@ -0,0 +1,161 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gax
+
+import (
+ "math/rand"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+// CallOption is an option used by Invoke to control behaviors of RPC calls.
+// CallOption works by modifying relevant fields of CallSettings.
+type CallOption interface {
+ // Resolve applies the option by modifying cs.
+ Resolve(cs *CallSettings)
+}
+
+// Retryer is used by Invoke to determine retry behavior.
+type Retryer interface {
+ // Retry reports whether a request should be retriedand how long to pause before retrying
+ // if the previous attempt returned with err. Invoke never calls Retry with nil error.
+ Retry(err error) (pause time.Duration, shouldRetry bool)
+}
+
+type retryerOption func() Retryer
+
+func (o retryerOption) Resolve(s *CallSettings) {
+ s.Retry = o
+}
+
+// WithRetry sets CallSettings.Retry to fn.
+func WithRetry(fn func() Retryer) CallOption {
+ return retryerOption(fn)
+}
+
+// OnCodes returns a Retryer that retries if and only if
+// the previous attempt returns a GRPC error whose error code is stored in cc.
+// Pause times between retries are specified by bo.
+//
+// bo is only used for its parameters; each Retryer has its own copy.
+func OnCodes(cc []codes.Code, bo Backoff) Retryer {
+ return &boRetryer{
+ backoff: bo,
+ codes: append([]codes.Code(nil), cc...),
+ }
+}
+
+type boRetryer struct {
+ backoff Backoff
+ codes []codes.Code
+}
+
+func (r *boRetryer) Retry(err error) (time.Duration, bool) {
+ st, ok := status.FromError(err)
+ if !ok {
+ return 0, false
+ }
+ c := st.Code()
+ for _, rc := range r.codes {
+ if c == rc {
+ return r.backoff.Pause(), true
+ }
+ }
+ return 0, false
+}
+
+// Backoff implements exponential backoff.
+// The wait time between retries is a random value between 0 and the "retry envelope".
+// The envelope starts at Initial and increases by the factor of Multiplier every retry,
+// but is capped at Max.
+type Backoff struct {
+ // Initial is the initial value of the retry envelope, defaults to 1 second.
+ Initial time.Duration
+
+ // Max is the maximum value of the retry envelope, defaults to 30 seconds.
+ Max time.Duration
+
+ // Multiplier is the factor by which the retry envelope increases.
+ // It should be greater than 1 and defaults to 2.
+ Multiplier float64
+
+ // cur is the current retry envelope
+ cur time.Duration
+}
+
+// Pause returns the next time.Duration that the caller should use to backoff.
+func (bo *Backoff) Pause() time.Duration {
+ if bo.Initial == 0 {
+ bo.Initial = time.Second
+ }
+ if bo.cur == 0 {
+ bo.cur = bo.Initial
+ }
+ if bo.Max == 0 {
+ bo.Max = 30 * time.Second
+ }
+ if bo.Multiplier < 1 {
+ bo.Multiplier = 2
+ }
+ // Select a duration between 1ns and the current max. It might seem
+ // counterintuitive to have so much jitter, but
+ // https://www.awsarchitectureblog.com/2015/03/backoff.html argues that
+ // that is the best strategy.
+ d := time.Duration(1 + rand.Int63n(int64(bo.cur)))
+ bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier)
+ if bo.cur > bo.Max {
+ bo.cur = bo.Max
+ }
+ return d
+}
+
+type grpcOpt []grpc.CallOption
+
+func (o grpcOpt) Resolve(s *CallSettings) {
+ s.GRPC = o
+}
+
+// WithGRPCOptions allows passing gRPC call options during client creation.
+func WithGRPCOptions(opt ...grpc.CallOption) CallOption {
+ return grpcOpt(append([]grpc.CallOption(nil), opt...))
+}
+
+// CallSettings allow fine-grained control over how calls are made.
+type CallSettings struct {
+ // Retry returns a Retryer to be used to control retry logic of a method call.
+ // If Retry is nil or the returned Retryer is nil, the call will not be retried.
+ Retry func() Retryer
+
+ // CallOptions to be forwarded to GRPC.
+ GRPC []grpc.CallOption
+}
diff --git a/vendor/github.com/googleapis/gax-go/v2/gax.go b/vendor/github.com/googleapis/gax-go/v2/gax.go
new file mode 100644
index 00000000..8040dcb0
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/gax.go
@@ -0,0 +1,39 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package gax contains a set of modules which aid the development of APIs
+// for clients and servers based on gRPC and Google API conventions.
+//
+// Application code will rarely need to use this library directly.
+// However, code generated automatically from API definition files can use it
+// to simplify code generation and to provide more convenient and idiomatic API surfaces.
+package gax
+
+// Version specifies the gax-go version being used.
+const Version = "2.0.3"
diff --git a/vendor/github.com/googleapis/gax-go/v2/go.mod b/vendor/github.com/googleapis/gax-go/v2/go.mod
new file mode 100644
index 00000000..0c91100b
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/go.mod
@@ -0,0 +1,5 @@
+module github.com/googleapis/gax-go/v2
+
+go 1.12
+
+require google.golang.org/grpc v1.19.0
diff --git a/vendor/github.com/googleapis/gax-go/v2/go.sum b/vendor/github.com/googleapis/gax-go/v2/go.sum
new file mode 100644
index 00000000..7fa23ecf
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/go.sum
@@ -0,0 +1,25 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go
new file mode 100644
index 00000000..139371a0
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/header.go
@@ -0,0 +1,53 @@
+// Copyright 2018, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gax
+
+import "bytes"
+
+// XGoogHeader is for use by the Google Cloud Libraries only.
+//
+// XGoogHeader formats key-value pairs.
+// The resulting string is suitable for x-goog-api-client header.
+func XGoogHeader(keyval ...string) string {
+ if len(keyval) == 0 {
+ return ""
+ }
+ if len(keyval)%2 != 0 {
+ panic("gax.Header: odd argument count")
+ }
+ var buf bytes.Buffer
+ for i := 0; i < len(keyval); i += 2 {
+ buf.WriteByte(' ')
+ buf.WriteString(keyval[i])
+ buf.WriteByte('/')
+ buf.WriteString(keyval[i+1])
+ }
+ return buf.String()[1:]
+}
diff --git a/vendor/github.com/googleapis/gax-go/v2/invoke.go b/vendor/github.com/googleapis/gax-go/v2/invoke.go
new file mode 100644
index 00000000..fe31dd00
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/invoke.go
@@ -0,0 +1,99 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gax
+
+import (
+ "context"
+ "strings"
+ "time"
+)
+
+// APICall is a user defined call stub.
+type APICall func(context.Context, CallSettings) error
+
+// Invoke calls the given APICall,
+// performing retries as specified by opts, if any.
+func Invoke(ctx context.Context, call APICall, opts ...CallOption) error {
+ var settings CallSettings
+ for _, opt := range opts {
+ opt.Resolve(&settings)
+ }
+ return invoke(ctx, call, settings, Sleep)
+}
+
+// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing.
+// If interrupted, Sleep returns ctx.Err().
+func Sleep(ctx context.Context, d time.Duration) error {
+ t := time.NewTimer(d)
+ select {
+ case <-ctx.Done():
+ t.Stop()
+ return ctx.Err()
+ case <-t.C:
+ return nil
+ }
+}
+
+type sleeper func(ctx context.Context, d time.Duration) error
+
+// invoke implements Invoke, taking an additional sleeper argument for testing.
+func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error {
+ var retryer Retryer
+ for {
+ err := call(ctx, settings)
+ if err == nil {
+ return nil
+ }
+ if settings.Retry == nil {
+ return err
+ }
+ // Never retry permanent certificate errors. (e.x. if ca-certificates
+ // are not installed). We should only make very few, targeted
+ // exceptions: many (other) status=Unavailable should be retried, such
+ // as if there's a network hiccup, or the internet goes out for a
+ // minute. This is also why here we are doing string parsing instead of
+ // simply making Unavailable a non-retried code elsewhere.
+ if strings.Contains(err.Error(), "x509: certificate signed by unknown authority") {
+ return err
+ }
+ if retryer == nil {
+ if r := settings.Retry(); r != nil {
+ retryer = r
+ } else {
+ return err
+ }
+ }
+ if d, ok := retryer.Retry(err); !ok {
+ return err
+ } else if err = sp(ctx, d); err != nil {
+ return err
+ }
+ }
+}
diff --git a/vendor/go.opencensus.io/AUTHORS b/vendor/go.opencensus.io/AUTHORS
new file mode 100644
index 00000000..e491a9e7
--- /dev/null
+++ b/vendor/go.opencensus.io/AUTHORS
@@ -0,0 +1 @@
+Google Inc.
diff --git a/vendor/go.opencensus.io/CONTRIBUTING.md b/vendor/go.opencensus.io/CONTRIBUTING.md
new file mode 100644
index 00000000..1ba3962c
--- /dev/null
+++ b/vendor/go.opencensus.io/CONTRIBUTING.md
@@ -0,0 +1,63 @@
+# How to contribute
+
+We'd love to accept your patches and contributions to this project. There are
+just a few small guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution,
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Code reviews
+
+All submissions, including submissions by project members, require review. We
+use GitHub pull requests for this purpose. Consult [GitHub Help] for more
+information on using pull requests.
+
+[GitHub Help]: https://help.github.com/articles/about-pull-requests/
+
+## Instructions
+
+Fork the repo, checkout the upstream repo to your GOPATH by:
+
+```
+$ go get -d go.opencensus.io
+```
+
+Add your fork as an origin:
+
+```
+cd $(go env GOPATH)/src/go.opencensus.io
+git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opencensus-go.git
+```
+
+Run tests:
+
+```
+$ make install-tools # Only first time.
+$ make
+```
+
+Checkout a new branch, make modifications and push the branch to your fork:
+
+```
+$ git checkout -b feature
+# edit files
+$ git commit
+$ git push fork feature
+```
+
+Open a pull request against the main opencensus-go repo.
+
+## General Notes
+This project uses Appveyor and Travis for CI.
+
+The dependencies are managed with `go mod` if you work with the sources under your
+`$GOPATH` you need to set the environment variable `GO111MODULE=on`.
\ No newline at end of file
diff --git a/vendor/go.opencensus.io/Gopkg.lock b/vendor/go.opencensus.io/Gopkg.lock
new file mode 100644
index 00000000..3be12ac8
--- /dev/null
+++ b/vendor/go.opencensus.io/Gopkg.lock
@@ -0,0 +1,231 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ branch = "master"
+ digest = "1:eee9386329f4fcdf8d6c0def0c9771b634bdd5ba460d888aa98c17d59b37a76c"
+ name = "git.apache.org/thrift.git"
+ packages = ["lib/go/thrift"]
+ pruneopts = "UT"
+ revision = "6e67faa92827ece022380b211c2caaadd6145bf5"
+ source = "github.com/apache/thrift"
+
+[[projects]]
+ branch = "master"
+ digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
+ name = "github.com/beorn7/perks"
+ packages = ["quantile"]
+ pruneopts = "UT"
+ revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
+
+[[projects]]
+ digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf"
+ name = "github.com/golang/protobuf"
+ packages = [
+ "proto",
+ "ptypes",
+ "ptypes/any",
+ "ptypes/duration",
+ "ptypes/timestamp",
+ ]
+ pruneopts = "UT"
+ revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
+ version = "v1.2.0"
+
+[[projects]]
+ digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
+ name = "github.com/matttproud/golang_protobuf_extensions"
+ packages = ["pbutil"]
+ pruneopts = "UT"
+ revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
+ version = "v1.0.1"
+
+[[projects]]
+ digest = "1:824c8f3aa4c5f23928fa84ebbd5ed2e9443b3f0cb958a40c1f2fbed5cf5e64b1"
+ name = "github.com/openzipkin/zipkin-go"
+ packages = [
+ ".",
+ "idgenerator",
+ "model",
+ "propagation",
+ "reporter",
+ "reporter/http",
+ ]
+ pruneopts = "UT"
+ revision = "d455a5674050831c1e187644faa4046d653433c2"
+ version = "v0.1.1"
+
+[[projects]]
+ digest = "1:d14a5f4bfecf017cb780bdde1b6483e5deb87e12c332544d2c430eda58734bcb"
+ name = "github.com/prometheus/client_golang"
+ packages = [
+ "prometheus",
+ "prometheus/promhttp",
+ ]
+ pruneopts = "UT"
+ revision = "c5b7fccd204277076155f10851dad72b76a49317"
+ version = "v0.8.0"
+
+[[projects]]
+ branch = "master"
+ digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
+ name = "github.com/prometheus/client_model"
+ packages = ["go"]
+ pruneopts = "UT"
+ revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
+
+[[projects]]
+ branch = "master"
+ digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5"
+ name = "github.com/prometheus/common"
+ packages = [
+ "expfmt",
+ "internal/bitbucket.org/ww/goautoneg",
+ "model",
+ ]
+ pruneopts = "UT"
+ revision = "c7de2306084e37d54b8be01f3541a8464345e9a5"
+
+[[projects]]
+ branch = "master"
+ digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290"
+ name = "github.com/prometheus/procfs"
+ packages = [
+ ".",
+ "internal/util",
+ "nfs",
+ "xfs",
+ ]
+ pruneopts = "UT"
+ revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92"
+
+[[projects]]
+ branch = "master"
+ digest = "1:deafe4ab271911fec7de5b693d7faae3f38796d9eb8622e2b9e7df42bb3dfea9"
+ name = "golang.org/x/net"
+ packages = [
+ "context",
+ "http/httpguts",
+ "http2",
+ "http2/hpack",
+ "idna",
+ "internal/timeseries",
+ "trace",
+ ]
+ pruneopts = "UT"
+ revision = "922f4815f713f213882e8ef45e0d315b164d705c"
+
+[[projects]]
+ branch = "master"
+ digest = "1:e0140c0c868c6e0f01c0380865194592c011fe521d6e12d78bfd33e756fe018a"
+ name = "golang.org/x/sync"
+ packages = ["semaphore"]
+ pruneopts = "UT"
+ revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
+
+[[projects]]
+ branch = "master"
+ digest = "1:a3f00ac457c955fe86a41e1495e8f4c54cb5399d609374c5cc26aa7d72e542c8"
+ name = "golang.org/x/sys"
+ packages = ["unix"]
+ pruneopts = "UT"
+ revision = "3b58ed4ad3395d483fc92d5d14123ce2c3581fec"
+
+[[projects]]
+ digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18"
+ name = "golang.org/x/text"
+ packages = [
+ "collate",
+ "collate/build",
+ "internal/colltab",
+ "internal/gen",
+ "internal/tag",
+ "internal/triegen",
+ "internal/ucd",
+ "language",
+ "secure/bidirule",
+ "transform",
+ "unicode/bidi",
+ "unicode/cldr",
+ "unicode/norm",
+ "unicode/rangetable",
+ ]
+ pruneopts = "UT"
+ revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
+ version = "v0.3.0"
+
+[[projects]]
+ branch = "master"
+ digest = "1:c0c17c94fe8bc1ab34e7f586a4a8b788c5e1f4f9f750ff23395b8b2f5a523530"
+ name = "google.golang.org/api"
+ packages = ["support/bundler"]
+ pruneopts = "UT"
+ revision = "e21acd801f91da814261b938941d193bb036441a"
+
+[[projects]]
+ branch = "master"
+ digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c"
+ name = "google.golang.org/genproto"
+ packages = ["googleapis/rpc/status"]
+ pruneopts = "UT"
+ revision = "c66870c02cf823ceb633bcd05be3c7cda29976f4"
+
+[[projects]]
+ digest = "1:3dd7996ce6bf52dec6a2f69fa43e7c4cefea1d4dfa3c8ab7a5f8a9f7434e239d"
+ name = "google.golang.org/grpc"
+ packages = [
+ ".",
+ "balancer",
+ "balancer/base",
+ "balancer/roundrobin",
+ "codes",
+ "connectivity",
+ "credentials",
+ "encoding",
+ "encoding/proto",
+ "grpclog",
+ "internal",
+ "internal/backoff",
+ "internal/channelz",
+ "internal/envconfig",
+ "internal/grpcrand",
+ "internal/transport",
+ "keepalive",
+ "metadata",
+ "naming",
+ "peer",
+ "resolver",
+ "resolver/dns",
+ "resolver/passthrough",
+ "stats",
+ "status",
+ "tap",
+ ]
+ pruneopts = "UT"
+ revision = "32fb0ac620c32ba40a4626ddf94d90d12cce3455"
+ version = "v1.14.0"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ input-imports = [
+ "git.apache.org/thrift.git/lib/go/thrift",
+ "github.com/golang/protobuf/proto",
+ "github.com/openzipkin/zipkin-go",
+ "github.com/openzipkin/zipkin-go/model",
+ "github.com/openzipkin/zipkin-go/reporter",
+ "github.com/openzipkin/zipkin-go/reporter/http",
+ "github.com/prometheus/client_golang/prometheus",
+ "github.com/prometheus/client_golang/prometheus/promhttp",
+ "golang.org/x/net/context",
+ "golang.org/x/net/http2",
+ "google.golang.org/api/support/bundler",
+ "google.golang.org/grpc",
+ "google.golang.org/grpc/codes",
+ "google.golang.org/grpc/grpclog",
+ "google.golang.org/grpc/metadata",
+ "google.golang.org/grpc/stats",
+ "google.golang.org/grpc/status",
+ ]
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/vendor/go.opencensus.io/Gopkg.toml b/vendor/go.opencensus.io/Gopkg.toml
new file mode 100644
index 00000000..a9f3cd68
--- /dev/null
+++ b/vendor/go.opencensus.io/Gopkg.toml
@@ -0,0 +1,36 @@
+# For v0.x.y dependencies, prefer adding a constraints of the form: version=">= 0.x.y"
+# to avoid locking to a particular minor version which can cause dep to not be
+# able to find a satisfying dependency graph.
+
+[[constraint]]
+ branch = "master"
+ name = "git.apache.org/thrift.git"
+ source = "github.com/apache/thrift"
+
+[[constraint]]
+ name = "github.com/golang/protobuf"
+ version = "1.0.0"
+
+[[constraint]]
+ name = "github.com/openzipkin/zipkin-go"
+ version = ">=0.1.0"
+
+[[constraint]]
+ name = "github.com/prometheus/client_golang"
+ version = ">=0.8.0"
+
+[[constraint]]
+ branch = "master"
+ name = "golang.org/x/net"
+
+[[constraint]]
+ branch = "master"
+ name = "google.golang.org/api"
+
+[[constraint]]
+ name = "google.golang.org/grpc"
+ version = "1.11.3"
+
+[prune]
+ go-tests = true
+ unused-packages = true
diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile
new file mode 100644
index 00000000..e2f2ed59
--- /dev/null
+++ b/vendor/go.opencensus.io/Makefile
@@ -0,0 +1,95 @@
+# TODO: Fix this on windows.
+ALL_SRC := $(shell find . -name '*.go' \
+ -not -path './vendor/*' \
+ -not -path '*/gen-go/*' \
+ -type f | sort)
+ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC))))
+
+GOTEST_OPT?=-v -race -timeout 30s
+GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic
+GOTEST=go test
+GOFMT=gofmt
+GOLINT=golint
+GOVET=go vet
+EMBEDMD=embedmd
+# TODO decide if we need to change these names.
+TRACE_ID_LINT_EXCEPTION="type name will be used as trace.TraceID by other packages"
+TRACE_OPTION_LINT_EXCEPTION="type name will be used as trace.TraceOptions by other packages"
+
+.DEFAULT_GOAL := fmt-lint-vet-embedmd-test
+
+.PHONY: fmt-lint-vet-embedmd-test
+fmt-lint-vet-embedmd-test: fmt lint vet embedmd test
+
+# TODO enable test-with-coverage in tavis
+.PHONY: travis-ci
+travis-ci: fmt lint vet embedmd test test-386
+
+all-pkgs:
+ @echo $(ALL_PKGS) | tr ' ' '\n' | sort
+
+all-srcs:
+ @echo $(ALL_SRC) | tr ' ' '\n' | sort
+
+.PHONY: test
+test:
+ $(GOTEST) $(GOTEST_OPT) $(ALL_PKGS)
+
+.PHONY: test-386
+test-386:
+ GOARCH=386 $(GOTEST) -v -timeout 30s $(ALL_PKGS)
+
+.PHONY: test-with-coverage
+test-with-coverage:
+ $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS)
+
+.PHONY: fmt
+fmt:
+ @FMTOUT=`$(GOFMT) -s -l $(ALL_SRC) 2>&1`; \
+ if [ "$$FMTOUT" ]; then \
+ echo "$(GOFMT) FAILED => gofmt the following files:\n"; \
+ echo "$$FMTOUT\n"; \
+ exit 1; \
+ else \
+ echo "Fmt finished successfully"; \
+ fi
+
+.PHONY: lint
+lint:
+ @LINTOUT=`$(GOLINT) $(ALL_PKGS) | grep -v $(TRACE_ID_LINT_EXCEPTION) | grep -v $(TRACE_OPTION_LINT_EXCEPTION) 2>&1`; \
+ if [ "$$LINTOUT" ]; then \
+ echo "$(GOLINT) FAILED => clean the following lint errors:\n"; \
+ echo "$$LINTOUT\n"; \
+ exit 1; \
+ else \
+ echo "Lint finished successfully"; \
+ fi
+
+.PHONY: vet
+vet:
+ # TODO: Understand why go vet downloads "github.com/google/go-cmp v0.2.0"
+ @VETOUT=`$(GOVET) ./... | grep -v "go: downloading" 2>&1`; \
+ if [ "$$VETOUT" ]; then \
+ echo "$(GOVET) FAILED => go vet the following files:\n"; \
+ echo "$$VETOUT\n"; \
+ exit 1; \
+ else \
+ echo "Vet finished successfully"; \
+ fi
+
+.PHONY: embedmd
+embedmd:
+ @EMBEDMDOUT=`$(EMBEDMD) -d README.md 2>&1`; \
+ if [ "$$EMBEDMDOUT" ]; then \
+ echo "$(EMBEDMD) FAILED => embedmd the following files:\n"; \
+ echo "$$EMBEDMDOUT\n"; \
+ exit 1; \
+ else \
+ echo "Embedmd finished successfully"; \
+ fi
+
+.PHONY: install-tools
+install-tools:
+ go get -u golang.org/x/tools/cmd/cover
+ go get -u golang.org/x/lint/golint
+ go get -u github.com/rakyll/embedmd
diff --git a/vendor/go.opencensus.io/README.md b/vendor/go.opencensus.io/README.md
new file mode 100644
index 00000000..3f40ed5c
--- /dev/null
+++ b/vendor/go.opencensus.io/README.md
@@ -0,0 +1,263 @@
+# OpenCensus Libraries for Go
+
+[![Build Status][travis-image]][travis-url]
+[![Windows Build Status][appveyor-image]][appveyor-url]
+[![GoDoc][godoc-image]][godoc-url]
+[![Gitter chat][gitter-image]][gitter-url]
+
+OpenCensus Go is a Go implementation of OpenCensus, a toolkit for
+collecting application performance and behavior monitoring data.
+Currently it consists of three major components: tags, stats and tracing.
+
+## Installation
+
+```
+$ go get -u go.opencensus.io
+```
+
+The API of this project is still evolving, see: [Deprecation Policy](#deprecation-policy).
+The use of vendoring or a dependency management tool is recommended.
+
+## Prerequisites
+
+OpenCensus Go libraries require Go 1.8 or later.
+
+## Getting Started
+
+The easiest way to get started using OpenCensus in your application is to use an existing
+integration with your RPC framework:
+
+* [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp)
+* [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc)
+* [database/sql](https://godoc.org/github.com/opencensus-integrations/ocsql)
+* [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus)
+* [Groupcache](https://godoc.org/github.com/orijtech/groupcache)
+* [Caddy webserver](https://godoc.org/github.com/orijtech/caddy)
+* [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver)
+* [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo)
+* [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis)
+* [Memcache](https://godoc.org/github.com/orijtech/gomemcache)
+
+If you're using a framework not listed here, you could either implement your own middleware for your
+framework or use [custom stats](#stats) and [spans](#spans) directly in your application.
+
+## Exporters
+
+OpenCensus can export instrumentation data to various backends.
+OpenCensus has exporter implementations for the following, users
+can implement their own exporters by implementing the exporter interfaces
+([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter),
+[trace](https://godoc.org/go.opencensus.io/trace#Exporter)):
+
+* [Prometheus][exporter-prom] for stats
+* [OpenZipkin][exporter-zipkin] for traces
+* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces
+* [Jaeger][exporter-jaeger] for traces
+* [AWS X-Ray][exporter-xray] for traces
+* [Datadog][exporter-datadog] for stats and traces
+* [Graphite][exporter-graphite] for stats
+* [Honeycomb][exporter-honeycomb] for traces
+
+## Overview
+
+![OpenCensus Overview](https://i.imgur.com/cf4ElHE.jpg)
+
+In a microservices environment, a user request may go through
+multiple services until there is a response. OpenCensus allows
+you to instrument your services and collect diagnostics data all
+through your services end-to-end.
+
+## Tags
+
+Tags represent propagated key-value pairs. They are propagated using `context.Context`
+in the same process or can be encoded to be transmitted on the wire. Usually, this will
+be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler`
+for gRPC.
+
+Package `tag` allows adding or modifying tags in the current context.
+
+[embedmd]:# (internal/readme/tags.go new)
+```go
+ctx, err = tag.New(ctx,
+ tag.Insert(osKey, "macOS-10.12.5"),
+ tag.Upsert(userIDKey, "cde36753ed"),
+)
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+## Stats
+
+OpenCensus is a low-overhead framework even if instrumentation is always enabled.
+In order to be so, it is optimized to make recording of data points fast
+and separate from the data aggregation.
+
+OpenCensus stats collection happens in two stages:
+
+* Definition of measures and recording of data points
+* Definition of views and aggregation of the recorded data
+
+### Recording
+
+Measurements are data points associated with a measure.
+Recording implicitly tags the set of Measurements with the tags from the
+provided context:
+
+[embedmd]:# (internal/readme/stats.go record)
+```go
+stats.Record(ctx, videoSize.M(102478))
+```
+
+### Views
+
+Views are how Measures are aggregated. You can think of them as queries over the
+set of recorded data points (measurements).
+
+Views have two parts: the tags to group by and the aggregation type used.
+
+Currently three types of aggregations are supported:
+* CountAggregation is used to count the number of times a sample was recorded.
+* DistributionAggregation is used to provide a histogram of the values of the samples.
+* SumAggregation is used to sum up all sample values.
+
+[embedmd]:# (internal/readme/stats.go aggs)
+```go
+distAgg := view.Distribution(1<<32, 2<<32, 3<<32)
+countAgg := view.Count()
+sumAgg := view.Sum()
+```
+
+Here we create a view with the DistributionAggregation over our measure.
+
+[embedmd]:# (internal/readme/stats.go view)
+```go
+if err := view.Register(&view.View{
+ Name: "example.com/video_size_distribution",
+ Description: "distribution of processed video size over time",
+ Measure: videoSize,
+ Aggregation: view.Distribution(1<<32, 2<<32, 3<<32),
+}); err != nil {
+ log.Fatalf("Failed to register view: %v", err)
+}
+```
+
+Register begins collecting data for the view. Registered views' data will be
+exported via the registered exporters.
+
+## Traces
+
+A distributed trace tracks the progression of a single user request as
+it is handled by the services and processes that make up an application.
+Each step is called a span in the trace. Spans include metadata about the step,
+including especially the time spent in the step, called the span’s latency.
+
+Below you see a trace and several spans underneath it.
+
+![Traces and spans](https://i.imgur.com/7hZwRVj.png)
+
+### Spans
+
+Span is the unit step in a trace. Each span has a name, latency, status and
+additional metadata.
+
+Below we are starting a span for a cache read and ending it
+when we are done:
+
+[embedmd]:# (internal/readme/trace.go startend)
+```go
+ctx, span := trace.StartSpan(ctx, "cache.Get")
+defer span.End()
+
+// Do work to get from cache.
+```
+
+### Propagation
+
+Spans can have parents or can be root spans if they don't have any parents.
+The current span is propagated in-process and across the network to allow associating
+new child spans with the parent.
+
+In the same process, `context.Context` is used to propagate spans.
+`trace.StartSpan` creates a new span as a root if the current context
+doesn't contain a span. Or, it creates a child of the span that is
+already in current context. The returned context can be used to keep
+propagating the newly created span in the current context.
+
+[embedmd]:# (internal/readme/trace.go startend)
+```go
+ctx, span := trace.StartSpan(ctx, "cache.Get")
+defer span.End()
+
+// Do work to get from cache.
+```
+
+Across the network, OpenCensus provides different propagation
+methods for different protocols.
+
+* gRPC integrations use the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation).
+* HTTP integrations use Zipkin's [B3](https://github.com/openzipkin/b3-propagation)
+ by default but can be configured to use a custom propagation method by setting another
+ [propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat).
+
+## Execution Tracer
+
+With Go 1.11, OpenCensus Go will support integration with the Go execution tracer.
+See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68)
+for an example of their mutual use.
+
+## Profiles
+
+OpenCensus tags can be applied as profiler labels
+for users who are on Go 1.9 and above.
+
+[embedmd]:# (internal/readme/tags.go profiler)
+```go
+ctx, err = tag.New(ctx,
+ tag.Insert(osKey, "macOS-10.12.5"),
+ tag.Insert(userIDKey, "fff0989878"),
+)
+if err != nil {
+ log.Fatal(err)
+}
+tag.Do(ctx, func(ctx context.Context) {
+ // Do work.
+ // When profiling is on, samples will be
+ // recorded with the key/values from the tag map.
+})
+```
+
+A screenshot of the CPU profile from the program above:
+
+![CPU profile](https://i.imgur.com/jBKjlkw.png)
+
+## Deprecation Policy
+
+Before version 1.0.0, the following deprecation policy will be observed:
+
+No backwards-incompatible changes will be made except for the removal of symbols that have
+been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release
+removing the *Deprecated* functionality will be made no sooner than 28 days after the first
+release in which the functionality was marked *Deprecated*.
+
+[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master
+[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go
+[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true
+[appveyor-url]: https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/branch/master
+[godoc-image]: https://godoc.org/go.opencensus.io?status.svg
+[godoc-url]: https://godoc.org/go.opencensus.io
+[gitter-image]: https://badges.gitter.im/census-instrumentation/lobby.svg
+[gitter-url]: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
+
+
+[new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap
+[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace
+
+[exporter-prom]: https://godoc.org/go.opencensus.io/exporter/prometheus
+[exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver
+[exporter-zipkin]: https://godoc.org/go.opencensus.io/exporter/zipkin
+[exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger
+[exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws
+[exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog
+[exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite
+[exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter
diff --git a/vendor/go.opencensus.io/appveyor.yml b/vendor/go.opencensus.io/appveyor.yml
new file mode 100644
index 00000000..12bd7c4c
--- /dev/null
+++ b/vendor/go.opencensus.io/appveyor.yml
@@ -0,0 +1,25 @@
+version: "{build}"
+
+platform: x64
+
+clone_folder: c:\gopath\src\go.opencensus.io
+
+environment:
+ GOPATH: 'c:\gopath'
+ GOVERSION: '1.11'
+ GO111MODULE: 'on'
+ CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613
+
+install:
+ - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
+ - choco upgrade golang --version 1.11.5 # Temporary fix because of a go.sum bug in 1.11
+ - go version
+ - go env
+
+build: false
+deploy: false
+
+test_script:
+ - cd %APPVEYOR_BUILD_FOLDER%
+ - go build -v .\...
+ - go test -v .\... # No -race because cgo is disabled
diff --git a/vendor/go.opencensus.io/go.mod b/vendor/go.opencensus.io/go.mod
new file mode 100644
index 00000000..cc9febc0
--- /dev/null
+++ b/vendor/go.opencensus.io/go.mod
@@ -0,0 +1,13 @@
+module go.opencensus.io
+
+require (
+ github.com/apache/thrift v0.12.0
+ github.com/golang/protobuf v1.2.0
+ github.com/google/go-cmp v0.2.0
+ github.com/hashicorp/golang-lru v0.5.0
+ github.com/openzipkin/zipkin-go v0.1.6
+ github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829
+ golang.org/x/net v0.0.0-20190311183353-d8887717615a
+ google.golang.org/api v0.3.1
+ google.golang.org/grpc v1.19.0
+)
diff --git a/vendor/go.opencensus.io/go.sum b/vendor/go.opencensus.io/go.sum
new file mode 100644
index 00000000..954fadf7
--- /dev/null
+++ b/vendor/go.opencensus.io/go.sum
@@ -0,0 +1,127 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs=
+github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/openzipkin/zipkin-go v0.1.6 h1:yXiysv1CSK7Q5yjGy1710zZGnsbMUIjluWBxtLXHPBo=
+github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA=
+github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU=
+github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU=
+github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg=
+github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3 h1:ulvT7fqt0yHWzpJwI57MezWnYDVpCAYBVuYst/L+fAY=
+golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8=
+google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go
new file mode 100644
index 00000000..d2565f1e
--- /dev/null
+++ b/vendor/go.opencensus.io/opencensus.go
@@ -0,0 +1,21 @@
+// Copyright 2017, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package opencensus contains Go support for OpenCensus.
+package opencensus // import "go.opencensus.io"
+
+// Version is the current release version of OpenCensus in use.
+func Version() string {
+ return "0.21.0"
+}
diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md
index 46aa2b12..dfbed62c 100644
--- a/vendor/golang.org/x/oauth2/CONTRIBUTING.md
+++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md
@@ -4,16 +4,15 @@ Go is an open source project.
It is the work of hundreds of contributors. We appreciate your help!
-
## Filing issues
When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
-1. What version of Go are you using (`go version`)?
-2. What operating system and processor architecture are you using?
-3. What did you do?
-4. What did you expect to see?
-5. What did you see instead?
+1. What version of Go are you using (`go version`)?
+2. What operating system and processor architecture are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
The gophers there will answer or ask you to file an issue if you've tripped over a bug.
@@ -23,9 +22,5 @@ The gophers there will answer or ask you to file an issue if you've tripped over
Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
before sending patches.
-**We do not accept GitHub pull requests**
-(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
-
Unless otherwise noted, the Go source files are distributed under
the BSD-style license found in the LICENSE file.
-
diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE
index d02f24fd..6a66aea5 100644
--- a/vendor/golang.org/x/oauth2/LICENSE
+++ b/vendor/golang.org/x/oauth2/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The oauth2 Authors. All rights reserved.
+Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md
index 0d514173..0f443e69 100644
--- a/vendor/golang.org/x/oauth2/README.md
+++ b/vendor/golang.org/x/oauth2/README.md
@@ -1,6 +1,7 @@
# OAuth2 for Go
[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2)
+[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2)
oauth2 package contains a client implementation for OAuth 2.0 spec.
@@ -10,55 +11,25 @@ oauth2 package contains a client implementation for OAuth 2.0 spec.
go get golang.org/x/oauth2
~~~~
+Or you can manually git clone the repository to
+`$(go env GOPATH)/src/golang.org/x/oauth2`.
+
See godoc for further documentation and examples.
* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)
* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)
+## Policy for new packages
-## App Engine
-
-In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor
-of the [`context.Context`](https://golang.org/x/net/context#Context) type from
-the `golang.org/x/net/context` package
-
-This means its no longer possible to use the "Classic App Engine"
-`appengine.Context` type with the `oauth2` package. (You're using
-Classic App Engine if you import the package `"appengine"`.)
-
-To work around this, you may use the new `"google.golang.org/appengine"`
-package. This package has almost the same API as the `"appengine"` package,
-but it can be fetched with `go get` and used on "Managed VMs" and well as
-Classic App Engine.
-
-See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app)
-for information on updating your app.
-
-If you don't want to update your entire app to use the new App Engine packages,
-you may use both sets of packages in parallel, using only the new packages
-with the `oauth2` package.
-
- import (
- "golang.org/x/net/context"
- "golang.org/x/oauth2"
- "golang.org/x/oauth2/google"
- newappengine "google.golang.org/appengine"
- newurlfetch "google.golang.org/appengine/urlfetch"
-
- "appengine"
- )
+We no longer accept new provider-specific packages in this repo. For
+defining provider endpoints and provider-specific OAuth2 behavior, we
+encourage you to create packages elsewhere. We'll keep the existing
+packages for compatibility.
- func handler(w http.ResponseWriter, r *http.Request) {
- var c appengine.Context = appengine.NewContext(r)
- c.Infof("Logging a message with the old package")
+## Report Issues / Send Patches
- var ctx context.Context = newappengine.NewContext(r)
- client := &http.Client{
- Transport: &oauth2.Transport{
- Source: google.AppEngineTokenSource(ctx, "scope"),
- Base: &newurlfetch.Transport{Context: ctx},
- },
- }
- client.Get("...")
- }
+This repository uses Gerrit for code changes. To learn how to submit changes to
+this repository, see https://golang.org/doc/contribute.html.
+The main issue tracker for the oauth2 repository is located at
+https://github.com/golang/oauth2/issues.
diff --git a/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/golang.org/x/oauth2/client_appengine.go
deleted file mode 100644
index 8962c49d..00000000
--- a/vendor/golang.org/x/oauth2/client_appengine.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-// App Engine hooks.
-
-package oauth2
-
-import (
- "net/http"
-
- "golang.org/x/net/context"
- "golang.org/x/oauth2/internal"
- "google.golang.org/appengine/urlfetch"
-)
-
-func init() {
- internal.RegisterContextClientFunc(contextClientAppEngine)
-}
-
-func contextClientAppEngine(ctx context.Context) (*http.Client, error) {
- return urlfetch.Client(ctx), nil
-}
diff --git a/vendor/golang.org/x/oauth2/go.mod b/vendor/golang.org/x/oauth2/go.mod
new file mode 100644
index 00000000..b3457815
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/go.mod
@@ -0,0 +1,10 @@
+module golang.org/x/oauth2
+
+go 1.11
+
+require (
+ cloud.google.com/go v0.34.0
+ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e
+ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect
+ google.golang.org/appengine v1.4.0
+)
diff --git a/vendor/golang.org/x/oauth2/go.sum b/vendor/golang.org/x/oauth2/go.sum
new file mode 100644
index 00000000..6f0079b0
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/go.sum
@@ -0,0 +1,12 @@
+cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go
index dc993efb..feb1157b 100644
--- a/vendor/golang.org/x/oauth2/google/appengine.go
+++ b/vendor/golang.org/x/oauth2/google/appengine.go
@@ -5,82 +5,34 @@
package google
import (
- "sort"
- "strings"
- "sync"
+ "context"
"time"
- "golang.org/x/net/context"
"golang.org/x/oauth2"
)
-// Set at init time by appenginevm_hook.go. If true, we are on App Engine Managed VMs.
-var appengineVM bool
-
-// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
+// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible.
var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error)
-// AppEngineTokenSource returns a token source that fetches tokens
-// issued to the current App Engine application's service account.
-// If you are implementing a 3-legged OAuth 2.0 flow on App Engine
-// that involves user accounts, see oauth2.Config instead.
+// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible.
+var appengineAppIDFunc func(c context.Context) string
+
+// AppEngineTokenSource returns a token source that fetches tokens from either
+// the current application's service account or from the metadata server,
+// depending on the App Engine environment. See below for environment-specific
+// details. If you are implementing a 3-legged OAuth 2.0 flow on App Engine that
+// involves user accounts, see oauth2.Config instead.
//
-// The provided context must have come from appengine.NewContext.
+// First generation App Engine runtimes (<= Go 1.9):
+// AppEngineTokenSource returns a token source that fetches tokens issued to the
+// current App Engine application's service account. The provided context must have
+// come from appengine.NewContext.
+//
+// Second generation App Engine runtimes (>= Go 1.11) and App Engine flexible:
+// AppEngineTokenSource is DEPRECATED on second generation runtimes and on the
+// flexible environment. It delegates to ComputeTokenSource, and the provided
+// context and scopes are not used. Please use DefaultTokenSource (or ComputeTokenSource,
+// which DefaultTokenSource will use in this case) instead.
func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource {
- if appengineTokenFunc == nil {
- panic("google: AppEngineTokenSource can only be used on App Engine.")
- }
- scopes := append([]string{}, scope...)
- sort.Strings(scopes)
- return &appEngineTokenSource{
- ctx: ctx,
- scopes: scopes,
- key: strings.Join(scopes, " "),
- }
-}
-
-// aeTokens helps the fetched tokens to be reused until their expiration.
-var (
- aeTokensMu sync.Mutex
- aeTokens = make(map[string]*tokenLock) // key is space-separated scopes
-)
-
-type tokenLock struct {
- mu sync.Mutex // guards t; held while fetching or updating t
- t *oauth2.Token
-}
-
-type appEngineTokenSource struct {
- ctx context.Context
- scopes []string
- key string // to aeTokens map; space-separated scopes
-}
-
-func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) {
- if appengineTokenFunc == nil {
- panic("google: AppEngineTokenSource can only be used on App Engine.")
- }
-
- aeTokensMu.Lock()
- tok, ok := aeTokens[ts.key]
- if !ok {
- tok = &tokenLock{}
- aeTokens[ts.key] = tok
- }
- aeTokensMu.Unlock()
-
- tok.mu.Lock()
- defer tok.mu.Unlock()
- if tok.t.Valid() {
- return tok.t, nil
- }
- access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...)
- if err != nil {
- return nil, err
- }
- tok.t = &oauth2.Token{
- AccessToken: access,
- Expiry: exp,
- }
- return tok.t, nil
+ return appEngineTokenSource(ctx, scope...)
}
diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go
new file mode 100644
index 00000000..83dacac3
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/appengine_gen1.go
@@ -0,0 +1,77 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+// This file applies to App Engine first generation runtimes (<= Go 1.9).
+
+package google
+
+import (
+ "context"
+ "sort"
+ "strings"
+ "sync"
+
+ "golang.org/x/oauth2"
+ "google.golang.org/appengine"
+)
+
+func init() {
+ appengineTokenFunc = appengine.AccessToken
+ appengineAppIDFunc = appengine.AppID
+}
+
+// See comment on AppEngineTokenSource in appengine.go.
+func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource {
+ scopes := append([]string{}, scope...)
+ sort.Strings(scopes)
+ return &gaeTokenSource{
+ ctx: ctx,
+ scopes: scopes,
+ key: strings.Join(scopes, " "),
+ }
+}
+
+// aeTokens helps the fetched tokens to be reused until their expiration.
+var (
+ aeTokensMu sync.Mutex
+ aeTokens = make(map[string]*tokenLock) // key is space-separated scopes
+)
+
+type tokenLock struct {
+ mu sync.Mutex // guards t; held while fetching or updating t
+ t *oauth2.Token
+}
+
+type gaeTokenSource struct {
+ ctx context.Context
+ scopes []string
+ key string // to aeTokens map; space-separated scopes
+}
+
+func (ts *gaeTokenSource) Token() (*oauth2.Token, error) {
+ aeTokensMu.Lock()
+ tok, ok := aeTokens[ts.key]
+ if !ok {
+ tok = &tokenLock{}
+ aeTokens[ts.key] = tok
+ }
+ aeTokensMu.Unlock()
+
+ tok.mu.Lock()
+ defer tok.mu.Unlock()
+ if tok.t.Valid() {
+ return tok.t, nil
+ }
+ access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...)
+ if err != nil {
+ return nil, err
+ }
+ tok.t = &oauth2.Token{
+ AccessToken: access,
+ Expiry: exp,
+ }
+ return tok.t, nil
+}
diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go
new file mode 100644
index 00000000..04c2c221
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go
@@ -0,0 +1,27 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+// This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible.
+
+package google
+
+import (
+ "context"
+ "log"
+ "sync"
+
+ "golang.org/x/oauth2"
+)
+
+var logOnce sync.Once // only spam about deprecation once
+
+// See comment on AppEngineTokenSource in appengine.go.
+func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource {
+ logOnce.Do(func() {
+ log.Print("google: AppEngineTokenSource is deprecated on App Engine standard second generation runtimes (>= Go 1.11) and App Engine flexible. Please use DefaultTokenSource or ComputeTokenSource.")
+ })
+ return ComputeTokenSource("")
+}
diff --git a/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/golang.org/x/oauth2/google/appengine_hook.go
deleted file mode 100644
index 4f42c8b3..00000000
--- a/vendor/golang.org/x/oauth2/google/appengine_hook.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package google
-
-import "google.golang.org/appengine"
-
-func init() {
- appengineTokenFunc = appengine.AccessToken
-}
diff --git a/vendor/golang.org/x/oauth2/google/appenginevm_hook.go b/vendor/golang.org/x/oauth2/google/appenginevm_hook.go
deleted file mode 100644
index 633611cc..00000000
--- a/vendor/golang.org/x/oauth2/google/appenginevm_hook.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2015 The oauth2 Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appenginevm
-
-package google
-
-import "google.golang.org/appengine"
-
-func init() {
- appengineVM = true
- appengineTokenFunc = appengine.AccessToken
-}
diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go
index b9523629..ad2c0923 100644
--- a/vendor/golang.org/x/oauth2/google/default.go
+++ b/vendor/golang.org/x/oauth2/google/default.go
@@ -5,8 +5,8 @@
package google
import (
+ "context"
"encoding/json"
- "errors"
"fmt"
"io/ioutil"
"net/http"
@@ -14,22 +14,31 @@ import (
"path/filepath"
"runtime"
- "golang.org/x/net/context"
+ "cloud.google.com/go/compute/metadata"
"golang.org/x/oauth2"
- "golang.org/x/oauth2/jwt"
- "google.golang.org/cloud/compute/metadata"
)
-// DefaultClient returns an HTTP Client that uses the
-// DefaultTokenSource to obtain authentication credentials.
-//
-// This client should be used when developing services
-// that run on Google App Engine or Google Compute Engine
-// and use "Application Default Credentials."
-//
+// Credentials holds Google credentials, including "Application Default Credentials".
// For more details, see:
// https://developers.google.com/accounts/docs/application-default-credentials
+type Credentials struct {
+ ProjectID string // may be empty
+ TokenSource oauth2.TokenSource
+
+ // JSON contains the raw bytes from a JSON credentials file.
+ // This field may be nil if authentication is provided by the
+ // environment and not with a credentials file, e.g. when code is
+ // running on Google Cloud Platform.
+ JSON []byte
+}
+
+// DefaultCredentials is the old name of Credentials.
//
+// Deprecated: use Credentials instead.
+type DefaultCredentials = Credentials
+
+// DefaultClient returns an HTTP Client that uses the
+// DefaultTokenSource to obtain authentication credentials.
func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
ts, err := DefaultTokenSource(ctx, scope...)
if err != nil {
@@ -38,8 +47,18 @@ func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
return oauth2.NewClient(ctx, ts), nil
}
-// DefaultTokenSource is a token source that uses
+// DefaultTokenSource returns the token source for
// "Application Default Credentials".
+// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource.
+func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) {
+ creds, err := FindDefaultCredentials(ctx, scope...)
+ if err != nil {
+ return nil, err
+ }
+ return creds.TokenSource, nil
+}
+
+// FindDefaultCredentials searches for "Application Default Credentials".
//
// It looks for credentials in the following places,
// preferring the first location found:
@@ -49,49 +68,48 @@ func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
// 2. A JSON file in a location known to the gcloud command-line tool.
// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
// On other systems, $HOME/.config/gcloud/application_default_credentials.json.
-// 3. On Google App Engine it uses the appengine.AccessToken function.
-// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches
+// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses
+// the appengine.AccessToken function.
+// 4. On Google Compute Engine, Google App Engine standard second generation runtimes
+// (>= Go 1.11), and Google App Engine flexible environment, it fetches
// credentials from the metadata server.
-// (In this final case any provided scopes are ignored.)
-//
-// For more details, see:
-// https://developers.google.com/accounts/docs/application-default-credentials
-//
-func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) {
+func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) {
// First, try the environment variable.
const envVar = "GOOGLE_APPLICATION_CREDENTIALS"
if filename := os.Getenv(envVar); filename != "" {
- ts, err := tokenSourceFromFile(ctx, filename, scope)
+ creds, err := readCredentialsFile(ctx, filename, scopes)
if err != nil {
return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err)
}
- return ts, nil
+ return creds, nil
}
// Second, try a well-known file.
filename := wellKnownFile()
- _, err := os.Stat(filename)
- if err == nil {
- ts, err2 := tokenSourceFromFile(ctx, filename, scope)
- if err2 == nil {
- return ts, nil
- }
- err = err2
- } else if os.IsNotExist(err) {
- err = nil // ignore this error
- }
- if err != nil {
+ if creds, err := readCredentialsFile(ctx, filename, scopes); err == nil {
+ return creds, nil
+ } else if !os.IsNotExist(err) {
return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err)
}
- // Third, if we're on Google App Engine use those credentials.
- if appengineTokenFunc != nil && !appengineVM {
- return AppEngineTokenSource(ctx, scope...), nil
+ // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9)
+ // use those credentials. App Engine standard second generation runtimes (>= Go 1.11)
+ // and App Engine flexible use ComputeTokenSource and the metadata server.
+ if appengineTokenFunc != nil {
+ return &DefaultCredentials{
+ ProjectID: appengineAppIDFunc(ctx),
+ TokenSource: AppEngineTokenSource(ctx, scopes...),
+ }, nil
}
- // Fourth, if we're on Google Compute Engine use the metadata server.
+ // Fourth, if we're on Google Compute Engine, an App Engine standard second generation runtime,
+ // or App Engine flexible, use the metadata server.
if metadata.OnGCE() {
- return ComputeTokenSource(""), nil
+ id, _ := metadata.ProjectID()
+ return &DefaultCredentials{
+ ProjectID: id,
+ TokenSource: ComputeTokenSource("", scopes...),
+ }, nil
}
// None are found; return helpful error.
@@ -99,6 +117,26 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc
return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url)
}
+// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can
+// represent either a Google Developers Console client_credentials.json file (as in
+// ConfigFromJSON) or a Google Developers service account key file (as in
+// JWTConfigFromJSON).
+func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) {
+ var f credentialsFile
+ if err := json.Unmarshal(jsonData, &f); err != nil {
+ return nil, err
+ }
+ ts, err := f.tokenSource(ctx, append([]string(nil), scopes...))
+ if err != nil {
+ return nil, err
+ }
+ return &DefaultCredentials{
+ ProjectID: f.ProjectID,
+ TokenSource: ts,
+ JSON: jsonData,
+ }, nil
+}
+
func wellKnownFile() string {
const f = "application_default_credentials.json"
if runtime.GOOS == "windows" {
@@ -107,49 +145,10 @@ func wellKnownFile() string {
return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f)
}
-func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) {
+func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) {
b, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
- var d struct {
- // Common fields
- Type string
- ClientID string `json:"client_id"`
-
- // User Credential fields
- ClientSecret string `json:"client_secret"`
- RefreshToken string `json:"refresh_token"`
-
- // Service Account fields
- ClientEmail string `json:"client_email"`
- PrivateKeyID string `json:"private_key_id"`
- PrivateKey string `json:"private_key"`
- }
- if err := json.Unmarshal(b, &d); err != nil {
- return nil, err
- }
- switch d.Type {
- case "authorized_user":
- cfg := &oauth2.Config{
- ClientID: d.ClientID,
- ClientSecret: d.ClientSecret,
- Scopes: append([]string{}, scopes...), // copy
- Endpoint: Endpoint,
- }
- tok := &oauth2.Token{RefreshToken: d.RefreshToken}
- return cfg.TokenSource(ctx, tok), nil
- case "service_account":
- cfg := &jwt.Config{
- Email: d.ClientEmail,
- PrivateKey: []byte(d.PrivateKey),
- Scopes: append([]string{}, scopes...), // copy
- TokenURL: JWTTokenURL,
- }
- return cfg.TokenSource(ctx), nil
- case "":
- return nil, errors.New("missing 'type' field in credentials")
- default:
- return nil, fmt.Errorf("unknown credential type: %q", d.Type)
- }
+ return CredentialsFromJSON(ctx, b, scopes...)
}
diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go
new file mode 100644
index 00000000..73be6290
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/doc.go
@@ -0,0 +1,40 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package google provides support for making OAuth2 authorized and authenticated
+// HTTP requests to Google APIs. It supports the Web server flow, client-side
+// credentials, service accounts, Google Compute Engine service accounts, and Google
+// App Engine service accounts.
+//
+// A brief overview of the package follows. For more information, please read
+// https://developers.google.com/accounts/docs/OAuth2
+// and
+// https://developers.google.com/accounts/docs/application-default-credentials.
+//
+// OAuth2 Configs
+//
+// Two functions in this package return golang.org/x/oauth2.Config values from Google credential
+// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON,
+// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or
+// create an http.Client.
+//
+//
+// Credentials
+//
+// The Credentials type represents Google credentials, including Application Default
+// Credentials.
+//
+// Use FindDefaultCredentials to obtain Application Default Credentials.
+// FindDefaultCredentials looks in some well-known places for a credentials file, and
+// will call AppEngineTokenSource or ComputeTokenSource as needed.
+//
+// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials,
+// then use the credentials to construct an http.Client or an oauth2.TokenSource.
+//
+// Use CredentialsFromJSON to obtain credentials from either of the two JSON formats
+// described in OAuth2 Configs, above. The TokenSource in the returned value is the
+// same as the one obtained from the oauth2.Config returned from ConfigFromJSON or
+// JWTConfigFromJSON, but the Credentials may contain additional information
+// that is useful is some circumstances.
+package google // import "golang.org/x/oauth2/google"
diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go
index 464c75aa..6eb2aa95 100644
--- a/vendor/golang.org/x/oauth2/google/google.go
+++ b/vendor/golang.org/x/oauth2/google/google.go
@@ -2,38 +2,31 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package google provides support for making OAuth2 authorized and
-// authenticated HTTP requests to Google APIs.
-// It supports the Web server flow, client-side credentials, service accounts,
-// Google Compute Engine service accounts, and Google App Engine service
-// accounts.
-//
-// For more information, please read
-// https://developers.google.com/accounts/docs/OAuth2
-// and
-// https://developers.google.com/accounts/docs/application-default-credentials.
-package google // import "golang.org/x/oauth2/google"
+package google
import (
+ "context"
"encoding/json"
"errors"
"fmt"
+ "net/url"
"strings"
"time"
+ "cloud.google.com/go/compute/metadata"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jwt"
- "google.golang.org/cloud/compute/metadata"
)
// Endpoint is Google's OAuth 2.0 endpoint.
var Endpoint = oauth2.Endpoint{
- AuthURL: "https://accounts.google.com/o/oauth2/auth",
- TokenURL: "https://accounts.google.com/o/oauth2/token",
+ AuthURL: "https://accounts.google.com/o/oauth2/auth",
+ TokenURL: "https://oauth2.googleapis.com/token",
+ AuthStyle: oauth2.AuthStyleInParams,
}
// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow.
-const JWTTokenURL = "https://accounts.google.com/o/oauth2/token"
+const JWTTokenURL = "https://oauth2.googleapis.com/token"
// ConfigFromJSON uses a Google Developers Console client_credentials.json
// file to construct a config.
@@ -85,33 +78,90 @@ func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) {
// Create a service account on "Credentials" for your project at
// https://console.developers.google.com to download a JSON key file.
func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) {
- var key struct {
- Email string `json:"client_email"`
- PrivateKey string `json:"private_key"`
- }
- if err := json.Unmarshal(jsonKey, &key); err != nil {
+ var f credentialsFile
+ if err := json.Unmarshal(jsonKey, &f); err != nil {
return nil, err
}
- return &jwt.Config{
- Email: key.Email,
- PrivateKey: []byte(key.PrivateKey),
- Scopes: scope,
- TokenURL: JWTTokenURL,
- }, nil
+ if f.Type != serviceAccountKey {
+ return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey)
+ }
+ scope = append([]string(nil), scope...) // copy
+ return f.jwtConfig(scope), nil
+}
+
+// JSON key file types.
+const (
+ serviceAccountKey = "service_account"
+ userCredentialsKey = "authorized_user"
+)
+
+// credentialsFile is the unmarshalled representation of a credentials file.
+type credentialsFile struct {
+ Type string `json:"type"` // serviceAccountKey or userCredentialsKey
+
+ // Service Account fields
+ ClientEmail string `json:"client_email"`
+ PrivateKeyID string `json:"private_key_id"`
+ PrivateKey string `json:"private_key"`
+ TokenURL string `json:"token_uri"`
+ ProjectID string `json:"project_id"`
+
+ // User Credential fields
+ // (These typically come from gcloud auth.)
+ ClientSecret string `json:"client_secret"`
+ ClientID string `json:"client_id"`
+ RefreshToken string `json:"refresh_token"`
+}
+
+func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config {
+ cfg := &jwt.Config{
+ Email: f.ClientEmail,
+ PrivateKey: []byte(f.PrivateKey),
+ PrivateKeyID: f.PrivateKeyID,
+ Scopes: scopes,
+ TokenURL: f.TokenURL,
+ }
+ if cfg.TokenURL == "" {
+ cfg.TokenURL = JWTTokenURL
+ }
+ return cfg
+}
+
+func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oauth2.TokenSource, error) {
+ switch f.Type {
+ case serviceAccountKey:
+ cfg := f.jwtConfig(scopes)
+ return cfg.TokenSource(ctx), nil
+ case userCredentialsKey:
+ cfg := &oauth2.Config{
+ ClientID: f.ClientID,
+ ClientSecret: f.ClientSecret,
+ Scopes: scopes,
+ Endpoint: Endpoint,
+ }
+ tok := &oauth2.Token{RefreshToken: f.RefreshToken}
+ return cfg.TokenSource(ctx, tok), nil
+ case "":
+ return nil, errors.New("missing 'type' field in credentials")
+ default:
+ return nil, fmt.Errorf("unknown credential type: %q", f.Type)
+ }
}
// ComputeTokenSource returns a token source that fetches access tokens
// from Google Compute Engine (GCE)'s metadata server. It's only valid to use
// this token source if your program is running on a GCE instance.
// If no account is specified, "default" is used.
+// If no scopes are specified, a set of default scopes are automatically granted.
// Further information about retrieving access tokens from the GCE metadata
// server can be found at https://cloud.google.com/compute/docs/authentication.
-func ComputeTokenSource(account string) oauth2.TokenSource {
- return oauth2.ReuseTokenSource(nil, computeSource{account: account})
+func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource {
+ return oauth2.ReuseTokenSource(nil, computeSource{account: account, scopes: scope})
}
type computeSource struct {
account string
+ scopes []string
}
func (cs computeSource) Token() (*oauth2.Token, error) {
@@ -122,7 +172,13 @@ func (cs computeSource) Token() (*oauth2.Token, error) {
if acct == "" {
acct = "default"
}
- tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token")
+ tokenURI := "instance/service-accounts/" + acct + "/token"
+ if len(cs.scopes) > 0 {
+ v := url.Values{}
+ v.Set("scopes", strings.Join(cs.scopes, ","))
+ tokenURI = tokenURI + "?" + v.Encode()
+ }
+ tokenJSON, err := metadata.Get(tokenURI)
if err != nil {
return nil, err
}
diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go
index b9199178..b0fdb3a8 100644
--- a/vendor/golang.org/x/oauth2/google/jwt.go
+++ b/vendor/golang.org/x/oauth2/google/jwt.go
@@ -36,6 +36,7 @@ func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.Token
email: cfg.Email,
audience: audience,
pk: pk,
+ pkID: cfg.PrivateKeyID,
}
tok, err := ts.Token()
if err != nil {
@@ -47,6 +48,7 @@ func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.Token
type jwtAccessTokenSource struct {
email, audience string
pk *rsa.PrivateKey
+ pkID string
}
func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) {
@@ -62,6 +64,7 @@ func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) {
hdr := &jws.Header{
Algorithm: "RS256",
Typ: "JWT",
+ KeyID: string(ts.pkID),
}
msg, err := jws.Encode(hdr, cs, ts.pk)
if err != nil {
diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go
index d29a3bb9..456224bc 100644
--- a/vendor/golang.org/x/oauth2/google/sdk.go
+++ b/vendor/golang.org/x/oauth2/google/sdk.go
@@ -5,9 +5,12 @@
package google
import (
+ "bufio"
+ "context"
"encoding/json"
"errors"
"fmt"
+ "io"
"net/http"
"os"
"os/user"
@@ -16,9 +19,7 @@ import (
"strings"
"time"
- "golang.org/x/net/context"
"golang.org/x/oauth2"
- "golang.org/x/oauth2/internal"
)
type sdkCredentials struct {
@@ -76,7 +77,7 @@ func NewSDKConfig(account string) (*SDKConfig, error) {
return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err)
}
defer f.Close()
- ini, err := internal.ParseINI(f)
+ ini, err := parseINI(f)
if err != nil {
return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err)
}
@@ -146,6 +147,34 @@ func (c *SDKConfig) Scopes() []string {
return c.conf.Scopes
}
+func parseINI(ini io.Reader) (map[string]map[string]string, error) {
+ result := map[string]map[string]string{
+ "": {}, // root section
+ }
+ scanner := bufio.NewScanner(ini)
+ currentSection := ""
+ for scanner.Scan() {
+ line := strings.TrimSpace(scanner.Text())
+ if strings.HasPrefix(line, ";") {
+ // comment.
+ continue
+ }
+ if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
+ currentSection = strings.TrimSpace(line[1 : len(line)-1])
+ result[currentSection] = map[string]string{}
+ continue
+ }
+ parts := strings.SplitN(line, "=", 2)
+ if len(parts) == 2 && parts[0] != "" {
+ result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("error scanning ini: %v", err)
+ }
+ return result, nil
+}
+
// sdkConfigPath tries to guess where the gcloud config is located.
// It can be overridden during tests.
var sdkConfigPath = func() (string, error) {
@@ -160,9 +189,13 @@ var sdkConfigPath = func() (string, error) {
}
func guessUnixHomeDir() string {
- usr, err := user.Current()
- if err == nil {
- return usr.HomeDir
+ // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470
+ if v := os.Getenv("HOME"); v != "" {
+ return v
+ }
+ // Else, fall back to user.Current:
+ if u, err := user.Current(); err == nil {
+ return u.HomeDir
}
- return os.Getenv("HOME")
+ return ""
}
diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go
new file mode 100644
index 00000000..74348718
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go
@@ -0,0 +1,13 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import "google.golang.org/appengine/urlfetch"
+
+func init() {
+ appengineClientHook = urlfetch.Client
+}
diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go
new file mode 100644
index 00000000..03265e88
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/doc.go
@@ -0,0 +1,6 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go
index fbe1028d..c0ab196c 100644
--- a/vendor/golang.org/x/oauth2/internal/oauth2.go
+++ b/vendor/golang.org/x/oauth2/internal/oauth2.go
@@ -2,18 +2,14 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package internal contains support packages for oauth2 package.
package internal
import (
- "bufio"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
- "io"
- "strings"
)
// ParseKey converts the binary contents of a private key file
@@ -30,7 +26,7 @@ func ParseKey(key []byte) (*rsa.PrivateKey, error) {
if err != nil {
parsedKey, err = x509.ParsePKCS1PrivateKey(key)
if err != nil {
- return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err)
+ return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8; parse error: %v", err)
}
}
parsed, ok := parsedKey.(*rsa.PrivateKey)
@@ -39,38 +35,3 @@ func ParseKey(key []byte) (*rsa.PrivateKey, error) {
}
return parsed, nil
}
-
-func ParseINI(ini io.Reader) (map[string]map[string]string, error) {
- result := map[string]map[string]string{
- "": map[string]string{}, // root section
- }
- scanner := bufio.NewScanner(ini)
- currentSection := ""
- for scanner.Scan() {
- line := strings.TrimSpace(scanner.Text())
- if strings.HasPrefix(line, ";") {
- // comment.
- continue
- }
- if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
- currentSection = strings.TrimSpace(line[1 : len(line)-1])
- result[currentSection] = map[string]string{}
- continue
- }
- parts := strings.SplitN(line, "=", 2)
- if len(parts) == 2 && parts[0] != "" {
- result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
- }
- }
- if err := scanner.Err(); err != nil {
- return nil, fmt.Errorf("error scanning ini: %v", err)
- }
- return result, nil
-}
-
-func CondVal(v string) []string {
- if v == "" {
- return nil
- }
- return []string{v}
-}
diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go
index a6ed3cc7..355c3869 100644
--- a/vendor/golang.org/x/oauth2/internal/token.go
+++ b/vendor/golang.org/x/oauth2/internal/token.go
@@ -2,25 +2,28 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package internal contains support packages for oauth2 package.
package internal
import (
+ "context"
"encoding/json"
+ "errors"
"fmt"
"io"
"io/ioutil"
+ "math"
"mime"
"net/http"
"net/url"
"strconv"
"strings"
+ "sync"
"time"
- "golang.org/x/net/context"
+ "golang.org/x/net/context/ctxhttp"
)
-// Token represents the crendentials used to authorize
+// Token represents the credentials used to authorize
// the requests to access protected resources on the OAuth 2.0
// provider's backend.
//
@@ -60,22 +63,21 @@ type tokenJSON struct {
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
- Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in
}
func (e *tokenJSON) expiry() (t time.Time) {
if v := e.ExpiresIn; v != 0 {
return time.Now().Add(time.Duration(v) * time.Second)
}
- if v := e.Expires; v != 0 {
- return time.Now().Add(time.Duration(v) * time.Second)
- }
return
}
type expirationTime int32
func (e *expirationTime) UnmarshalJSON(b []byte) error {
+ if len(b) == 0 || string(b) == "null" {
+ return nil
+ }
var n json.Number
err := json.Unmarshal(b, &n)
if err != nil {
@@ -85,96 +87,162 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error {
if err != nil {
return err
}
+ if i > math.MaxInt32 {
+ i = math.MaxInt32
+ }
*e = expirationTime(i)
return nil
}
-var brokenAuthHeaderProviders = []string{
- "https://accounts.google.com/",
- "https://api.dropbox.com/",
- "https://api.dropboxapi.com/",
- "https://api.instagram.com/",
- "https://api.netatmo.net/",
- "https://api.odnoklassniki.ru/",
- "https://api.pushbullet.com/",
- "https://api.soundcloud.com/",
- "https://api.twitch.tv/",
- "https://app.box.com/",
- "https://connect.stripe.com/",
- "https://login.microsoftonline.com/",
- "https://login.salesforce.com/",
- "https://oauth.sandbox.trainingpeaks.com/",
- "https://oauth.trainingpeaks.com/",
- "https://oauth.vk.com/",
- "https://openapi.baidu.com/",
- "https://slack.com/",
- "https://test-sandbox.auth.corp.google.com",
- "https://test.salesforce.com/",
- "https://user.gini.net/",
- "https://www.douban.com/",
- "https://www.googleapis.com/",
- "https://www.linkedin.com/",
- "https://www.strava.com/oauth/",
- "https://www.wunderlist.com/oauth/",
- "https://api.patreon.com/",
+// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
+//
+// Deprecated: this function no longer does anything. Caller code that
+// wants to avoid potential extra HTTP requests made during
+// auto-probing of the provider's auth style should set
+// Endpoint.AuthStyle.
+func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
+
+// AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type.
+type AuthStyle int
+
+const (
+ AuthStyleUnknown AuthStyle = 0
+ AuthStyleInParams AuthStyle = 1
+ AuthStyleInHeader AuthStyle = 2
+)
+
+// authStyleCache is the set of tokenURLs we've successfully used via
+// RetrieveToken and which style auth we ended up using.
+// It's called a cache, but it doesn't (yet?) shrink. It's expected that
+// the set of OAuth2 servers a program contacts over time is fixed and
+// small.
+var authStyleCache struct {
+ sync.Mutex
+ m map[string]AuthStyle // keyed by tokenURL
}
-func RegisterBrokenAuthHeaderProvider(tokenURL string) {
- brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL)
+// ResetAuthCache resets the global authentication style cache used
+// for AuthStyleUnknown token requests.
+func ResetAuthCache() {
+ authStyleCache.Lock()
+ defer authStyleCache.Unlock()
+ authStyleCache.m = nil
}
-// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL
-// implements the OAuth2 spec correctly
-// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
-// In summary:
-// - Reddit only accepts client secret in the Authorization header
-// - Dropbox accepts either it in URL param or Auth header, but not both.
-// - Google only accepts URL param (not spec compliant?), not Auth header
-// - Stripe only accepts client secret in Auth header with Bearer method, not Basic
-func providerAuthHeaderWorks(tokenURL string) bool {
- for _, s := range brokenAuthHeaderProviders {
- if strings.HasPrefix(tokenURL, s) {
- // Some sites fail to implement the OAuth2 spec fully.
- return false
- }
- }
+// lookupAuthStyle reports which auth style we last used with tokenURL
+// when calling RetrieveToken and whether we have ever done so.
+func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) {
+ authStyleCache.Lock()
+ defer authStyleCache.Unlock()
+ style, ok = authStyleCache.m[tokenURL]
+ return
+}
- // Assume the provider implements the spec properly
- // otherwise. We can add more exceptions as they're
- // discovered. We will _not_ be adding configurable hooks
- // to this package to let users select server bugs.
- return true
+// setAuthStyle adds an entry to authStyleCache, documented above.
+func setAuthStyle(tokenURL string, v AuthStyle) {
+ authStyleCache.Lock()
+ defer authStyleCache.Unlock()
+ if authStyleCache.m == nil {
+ authStyleCache.m = make(map[string]AuthStyle)
+ }
+ authStyleCache.m[tokenURL] = v
}
-func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) {
- hc, err := ContextClient(ctx)
+// newTokenRequest returns a new *http.Request to retrieve a new token
+// from tokenURL using the provided clientID, clientSecret, and POST
+// body parameters.
+//
+// inParams is whether the clientID & clientSecret should be encoded
+// as the POST body. An 'inParams' value of true means to send it in
+// the POST body (along with any values in v); false means to send it
+// in the Authorization header.
+func newTokenRequest(tokenURL, clientID, clientSecret string, v url.Values, authStyle AuthStyle) (*http.Request, error) {
+ if authStyle == AuthStyleInParams {
+ v = cloneURLValues(v)
+ if clientID != "" {
+ v.Set("client_id", clientID)
+ }
+ if clientSecret != "" {
+ v.Set("client_secret", clientSecret)
+ }
+ }
+ req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode()))
if err != nil {
return nil, err
}
- v.Set("client_id", ClientID)
- bustedAuth := !providerAuthHeaderWorks(TokenURL)
- if bustedAuth && ClientSecret != "" {
- v.Set("client_secret", ClientSecret)
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ if authStyle == AuthStyleInHeader {
+ req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret))
+ }
+ return req, nil
+}
+
+func cloneURLValues(v url.Values) url.Values {
+ v2 := make(url.Values, len(v))
+ for k, vv := range v {
+ v2[k] = append([]string(nil), vv...)
}
- req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode()))
+ return v2
+}
+
+func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) {
+ needsAuthStyleProbe := authStyle == 0
+ if needsAuthStyleProbe {
+ if style, ok := lookupAuthStyle(tokenURL); ok {
+ authStyle = style
+ needsAuthStyleProbe = false
+ } else {
+ authStyle = AuthStyleInHeader // the first way we'll try
+ }
+ }
+ req, err := newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle)
if err != nil {
return nil, err
}
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
- if !bustedAuth {
- req.SetBasicAuth(ClientID, ClientSecret)
+ token, err := doTokenRoundTrip(ctx, req)
+ if err != nil && needsAuthStyleProbe {
+ // If we get an error, assume the server wants the
+ // clientID & clientSecret in a different form.
+ // See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
+ // In summary:
+ // - Reddit only accepts client secret in the Authorization header
+ // - Dropbox accepts either it in URL param or Auth header, but not both.
+ // - Google only accepts URL param (not spec compliant?), not Auth header
+ // - Stripe only accepts client secret in Auth header with Bearer method, not Basic
+ //
+ // We used to maintain a big table in this code of all the sites and which way
+ // they went, but maintaining it didn't scale & got annoying.
+ // So just try both ways.
+ authStyle = AuthStyleInParams // the second way we'll try
+ req, _ = newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle)
+ token, err = doTokenRoundTrip(ctx, req)
+ }
+ if needsAuthStyleProbe && err == nil {
+ setAuthStyle(tokenURL, authStyle)
}
- r, err := hc.Do(req)
+ // Don't overwrite `RefreshToken` with an empty value
+ // if this was a token refreshing request.
+ if token != nil && token.RefreshToken == "" {
+ token.RefreshToken = v.Get("refresh_token")
+ }
+ return token, err
+}
+
+func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
+ r, err := ctxhttp.Do(ctx, ContextClient(ctx), req)
if err != nil {
return nil, err
}
- defer r.Body.Close()
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
+ r.Body.Close()
if err != nil {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
}
if code := r.StatusCode; code < 200 || code > 299 {
- return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body)
+ return nil, &RetrieveError{
+ Response: r,
+ Body: body,
+ }
}
var token *Token
@@ -192,12 +260,6 @@ func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string,
Raw: vals,
}
e := vals.Get("expires_in")
- if e == "" {
- // TODO(jbd): Facebook's OAuth2 implementation is broken and
- // returns expires_in field in expires. Remove the fallback to expires,
- // when Facebook fixes their implementation.
- e = vals.Get("expires")
- }
expires, _ := strconv.Atoi(e)
if expires != 0 {
token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
@@ -216,10 +278,17 @@ func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string,
}
json.Unmarshal(body, &token.Raw) // no error checks for optional fields
}
- // Don't overwrite `RefreshToken` with an empty value
- // if this was a token refreshing request.
- if token.RefreshToken == "" {
- token.RefreshToken = v.Get("refresh_token")
+ if token.AccessToken == "" {
+ return nil, errors.New("oauth2: server response missing access_token")
}
return token, nil
}
+
+type RetrieveError struct {
+ Response *http.Response
+ Body []byte
+}
+
+func (r *RetrieveError) Error() string {
+ return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
+}
diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go
index f1f173e3..572074a6 100644
--- a/vendor/golang.org/x/oauth2/internal/transport.go
+++ b/vendor/golang.org/x/oauth2/internal/transport.go
@@ -2,13 +2,11 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package internal contains support packages for oauth2 package.
package internal
import (
+ "context"
"net/http"
-
- "golang.org/x/net/context"
)
// HTTPClient is the context key to use with golang.org/x/net/context's
@@ -20,50 +18,16 @@ var HTTPClient ContextKey
// because nobody else can create a ContextKey, being unexported.
type ContextKey struct{}
-// ContextClientFunc is a func which tries to return an *http.Client
-// given a Context value. If it returns an error, the search stops
-// with that error. If it returns (nil, nil), the search continues
-// down the list of registered funcs.
-type ContextClientFunc func(context.Context) (*http.Client, error)
-
-var contextClientFuncs []ContextClientFunc
-
-func RegisterContextClientFunc(fn ContextClientFunc) {
- contextClientFuncs = append(contextClientFuncs, fn)
-}
+var appengineClientHook func(context.Context) *http.Client
-func ContextClient(ctx context.Context) (*http.Client, error) {
+func ContextClient(ctx context.Context) *http.Client {
if ctx != nil {
if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
- return hc, nil
- }
- }
- for _, fn := range contextClientFuncs {
- c, err := fn(ctx)
- if err != nil {
- return nil, err
- }
- if c != nil {
- return c, nil
+ return hc
}
}
- return http.DefaultClient, nil
-}
-
-func ContextTransport(ctx context.Context) http.RoundTripper {
- hc, err := ContextClient(ctx)
- // This is a rare error case (somebody using nil on App Engine).
- if err != nil {
- return ErrorTransport{err}
+ if appengineClientHook != nil {
+ return appengineClientHook(ctx)
}
- return hc.Transport
-}
-
-// ErrorTransport returns the specified error on RoundTrip.
-// This RoundTripper should be used in rare error cases where
-// error handling can be postponed to response handling time.
-type ErrorTransport struct{ Err error }
-
-func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) {
- return nil, t.Err
+ return http.DefaultClient
}
diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go
index 29887ea9..683d2d27 100644
--- a/vendor/golang.org/x/oauth2/jws/jws.go
+++ b/vendor/golang.org/x/oauth2/jws/jws.go
@@ -2,8 +2,16 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package jws provides encoding and decoding utilities for
-// signed JWS messages.
+// Package jws provides a partial implementation
+// of JSON Web Signature encoding and decoding.
+// It exists to support the golang.org/x/oauth2 package.
+//
+// See RFC 7515.
+//
+// Deprecated: this package is not intended for public use and might be
+// removed in the future. It exists for internal use only.
+// Please switch to another JWS package or copy this package into your own
+// source tree.
package jws // import "golang.org/x/oauth2/jws"
import (
@@ -64,7 +72,7 @@ func (c *ClaimSet) encode() (string, error) {
}
if len(c.PrivateClaims) == 0 {
- return base64Encode(b), nil
+ return base64.RawURLEncoding.EncodeToString(b), nil
}
// Marshal private claim set and then append it to b.
@@ -82,7 +90,7 @@ func (c *ClaimSet) encode() (string, error) {
}
b[len(b)-1] = ',' // Replace closing curly brace with a comma.
b = append(b, prv[1:]...) // Append private claims.
- return base64Encode(b), nil
+ return base64.RawURLEncoding.EncodeToString(b), nil
}
// Header represents the header for the signed JWS payloads.
@@ -92,6 +100,9 @@ type Header struct {
// Represents the token type.
Typ string `json:"typ"`
+
+ // The optional hint of which key is being used.
+ KeyID string `json:"kid,omitempty"`
}
func (h *Header) encode() (string, error) {
@@ -99,7 +110,7 @@ func (h *Header) encode() (string, error) {
if err != nil {
return "", err
}
- return base64Encode(b), nil
+ return base64.RawURLEncoding.EncodeToString(b), nil
}
// Decode decodes a claim set from a JWS payload.
@@ -110,7 +121,7 @@ func Decode(payload string) (*ClaimSet, error) {
// TODO(jbd): Provide more context about the error.
return nil, errors.New("jws: invalid token received")
}
- decoded, err := base64Decode(s[1])
+ decoded, err := base64.RawURLEncoding.DecodeString(s[1])
if err != nil {
return nil, err
}
@@ -137,7 +148,7 @@ func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) {
if err != nil {
return "", err
}
- return fmt.Sprintf("%s.%s", ss, base64Encode(sig)), nil
+ return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil
}
// Encode encodes a signed JWS with provided header and claim set.
@@ -160,7 +171,7 @@ func Verify(token string, key *rsa.PublicKey) error {
}
signedContent := parts[0] + "." + parts[1]
- signatureString, err := base64Decode(parts[2])
+ signatureString, err := base64.RawURLEncoding.DecodeString(parts[2])
if err != nil {
return err
}
@@ -169,23 +180,3 @@ func Verify(token string, key *rsa.PublicKey) error {
h.Write([]byte(signedContent))
return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString))
}
-
-// base64Encode returns and Base64url encoded version of the input string with any
-// trailing "=" stripped.
-func base64Encode(b []byte) string {
- return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
-}
-
-// base64Decode decodes the Base64url encoded string
-func base64Decode(s string) ([]byte, error) {
- // add back missing padding
- switch len(s) % 4 {
- case 1:
- s += "==="
- case 2:
- s += "=="
- case 3:
- s += "="
- }
- return base64.URLEncoding.DecodeString(s)
-}
diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go
index 2ffad21a..99f3e0a3 100644
--- a/vendor/golang.org/x/oauth2/jwt/jwt.go
+++ b/vendor/golang.org/x/oauth2/jwt/jwt.go
@@ -9,6 +9,7 @@
package jwt
import (
+ "context"
"encoding/json"
"fmt"
"io"
@@ -18,7 +19,6 @@ import (
"strings"
"time"
- "golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/internal"
"golang.org/x/oauth2/jws"
@@ -46,6 +46,10 @@ type Config struct {
//
PrivateKey []byte
+ // PrivateKeyID contains an optional hint indicating which key is being
+ // used.
+ PrivateKeyID string
+
// Subject is the optional user to impersonate.
Subject string
@@ -57,6 +61,11 @@ type Config struct {
// Expires optionally specifies how long the token is valid for.
Expires time.Duration
+
+ // Audience optionally specifies the intended audience of the
+ // request. If empty, the value of TokenURL is used as the
+ // intended audience.
+ Audience string
}
// TokenSource returns a JWT TokenSource using the configuration
@@ -101,7 +110,12 @@ func (js jwtSource) Token() (*oauth2.Token, error) {
if t := js.conf.Expires; t > 0 {
claimSet.Exp = time.Now().Add(t).Unix()
}
- payload, err := jws.Encode(defaultHeader, claimSet, pk)
+ if aud := js.conf.Audience; aud != "" {
+ claimSet.Aud = aud
+ }
+ h := *defaultHeader
+ h.KeyID = js.conf.PrivateKeyID
+ payload, err := jws.Encode(&h, claimSet, pk)
if err != nil {
return nil, err
}
@@ -118,7 +132,10 @@ func (js jwtSource) Token() (*oauth2.Token, error) {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
}
if c := resp.StatusCode; c < 200 || c > 299 {
- return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body)
+ return nil, &oauth2.RetrieveError{
+ Response: resp,
+ Body: body,
+ }
}
// tokenRes is the JSON response body.
var tokenRes struct {
diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go
index a6828960..428283f0 100644
--- a/vendor/golang.org/x/oauth2/oauth2.go
+++ b/vendor/golang.org/x/oauth2/oauth2.go
@@ -3,40 +3,41 @@
// license that can be found in the LICENSE file.
// Package oauth2 provides support for making
-// OAuth2 authorized and authenticated HTTP requests.
+// OAuth2 authorized and authenticated HTTP requests,
+// as specified in RFC 6749.
// It can additionally grant authorization with Bearer JWT.
package oauth2 // import "golang.org/x/oauth2"
import (
"bytes"
+ "context"
"errors"
"net/http"
"net/url"
"strings"
"sync"
- "golang.org/x/net/context"
"golang.org/x/oauth2/internal"
)
// NoContext is the default context you should supply if not using
// your own context.Context (see https://golang.org/x/net/context).
+//
+// Deprecated: Use context.Background() or context.TODO() instead.
var NoContext = context.TODO()
-// RegisterBrokenAuthHeaderProvider registers an OAuth2 server
-// identified by the tokenURL prefix as an OAuth2 implementation
-// which doesn't support the HTTP Basic authentication
-// scheme to authenticate with the authorization server.
-// Once a server is registered, credentials (client_id and client_secret)
-// will be passed as query parameters rather than being present
-// in the Authorization header.
-// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
-func RegisterBrokenAuthHeaderProvider(tokenURL string) {
- internal.RegisterBrokenAuthHeaderProvider(tokenURL)
-}
+// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
+//
+// Deprecated: this function no longer does anything. Caller code that
+// wants to avoid potential extra HTTP requests made during
+// auto-probing of the provider's auth style should set
+// Endpoint.AuthStyle.
+func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
// Config describes a typical 3-legged OAuth2 flow, with both the
// client application information and the server's endpoint URLs.
+// For the client credentials 2-legged OAuth2 flow, see the clientcredentials
+// package (https://golang.org/x/oauth2/clientcredentials).
type Config struct {
// ClientID is the application's ID.
ClientID string
@@ -66,13 +67,38 @@ type TokenSource interface {
Token() (*Token, error)
}
-// Endpoint contains the OAuth 2.0 provider's authorization and token
+// Endpoint represents an OAuth 2.0 provider's authorization and token
// endpoint URLs.
type Endpoint struct {
AuthURL string
TokenURL string
+
+ // AuthStyle optionally specifies how the endpoint wants the
+ // client ID & client secret sent. The zero value means to
+ // auto-detect.
+ AuthStyle AuthStyle
}
+// AuthStyle represents how requests for tokens are authenticated
+// to the server.
+type AuthStyle int
+
+const (
+ // AuthStyleAutoDetect means to auto-detect which authentication
+ // style the provider wants by trying both ways and caching
+ // the successful way for the future.
+ AuthStyleAutoDetect AuthStyle = 0
+
+ // AuthStyleInParams sends the "client_id" and "client_secret"
+ // in the POST body as application/x-www-form-urlencoded parameters.
+ AuthStyleInParams AuthStyle = 1
+
+ // AuthStyleInHeader sends the client_id and client_password
+ // using HTTP Basic Authorization. This is an optional style
+ // described in the OAuth2 RFC 6749 section 2.3.1.
+ AuthStyleInHeader AuthStyle = 2
+)
+
var (
// AccessTypeOnline and AccessTypeOffline are options passed
// to the Options.AuthCodeURL method. They modify the
@@ -113,21 +139,30 @@ func SetAuthURLParam(key, value string) AuthCodeOption {
// that asks for permissions for the required scopes explicitly.
//
// State is a token to protect the user from CSRF attacks. You must
-// always provide a non-zero string and validate that it matches the
+// always provide a non-empty string and validate that it matches the
// the state query parameter on your redirect callback.
// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
//
// Opts may include AccessTypeOnline or AccessTypeOffline, as well
// as ApprovalForce.
+// It can also be used to pass the PKCE challenge.
+// See https://www.oauth.com/oauth2-servers/pkce/ for more info.
func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
var buf bytes.Buffer
buf.WriteString(c.Endpoint.AuthURL)
v := url.Values{
"response_type": {"code"},
"client_id": {c.ClientID},
- "redirect_uri": internal.CondVal(c.RedirectURL),
- "scope": internal.CondVal(strings.Join(c.Scopes, " ")),
- "state": internal.CondVal(state),
+ }
+ if c.RedirectURL != "" {
+ v.Set("redirect_uri", c.RedirectURL)
+ }
+ if len(c.Scopes) > 0 {
+ v.Set("scope", strings.Join(c.Scopes, " "))
+ }
+ if state != "" {
+ // TODO(light): Docs say never to omit state; don't allow empty.
+ v.Set("state", state)
}
for _, opt := range opts {
opt.setValue(v)
@@ -150,15 +185,17 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
// and when other authorization grant types are not available."
// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
//
-// The HTTP client to use is derived from the context.
-// If nil, http.DefaultClient is used.
+// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
- return retrieveToken(ctx, c, url.Values{
+ v := url.Values{
"grant_type": {"password"},
"username": {username},
"password": {password},
- "scope": internal.CondVal(strings.Join(c.Scopes, " ")),
- })
+ }
+ if len(c.Scopes) > 0 {
+ v.Set("scope", strings.Join(c.Scopes, " "))
+ }
+ return retrieveToken(ctx, c, v)
}
// Exchange converts an authorization code into a token.
@@ -166,18 +203,25 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor
// It is used after a resource provider redirects the user back
// to the Redirect URI (the URL obtained from AuthCodeURL).
//
-// The HTTP client to use is derived from the context.
-// If a client is not provided via the context, http.DefaultClient is used.
+// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
//
// The code will be in the *http.Request.FormValue("code"). Before
// calling Exchange, be sure to validate FormValue("state").
-func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) {
- return retrieveToken(ctx, c, url.Values{
- "grant_type": {"authorization_code"},
- "code": {code},
- "redirect_uri": internal.CondVal(c.RedirectURL),
- "scope": internal.CondVal(strings.Join(c.Scopes, " ")),
- })
+//
+// Opts may include the PKCE verifier code if previously used in AuthCodeURL.
+// See https://www.oauth.com/oauth2-servers/pkce/ for more info.
+func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) {
+ v := url.Values{
+ "grant_type": {"authorization_code"},
+ "code": {code},
+ }
+ if c.RedirectURL != "" {
+ v.Set("redirect_uri", c.RedirectURL)
+ }
+ for _, opt := range opts {
+ opt.setValue(v)
+ }
+ return retrieveToken(ctx, c, v)
}
// Client returns an HTTP client using the provided token.
@@ -288,20 +332,20 @@ var HTTPClient internal.ContextKey
// NewClient creates an *http.Client from a Context and TokenSource.
// The returned client is not valid beyond the lifetime of the context.
//
+// Note that if a custom *http.Client is provided via the Context it
+// is used only for token acquisition and is not used to configure the
+// *http.Client returned from NewClient.
+//
// As a special case, if src is nil, a non-OAuth2 client is returned
// using the provided context. This exists to support related OAuth2
// packages.
func NewClient(ctx context.Context, src TokenSource) *http.Client {
if src == nil {
- c, err := internal.ContextClient(ctx)
- if err != nil {
- return &http.Client{Transport: internal.ErrorTransport{err}}
- }
- return c
+ return internal.ContextClient(ctx)
}
return &http.Client{
Transport: &Transport{
- Base: internal.ContextTransport(ctx),
+ Base: internal.ContextClient(ctx).Transport,
Source: ReuseTokenSource(nil, src),
},
}
diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go
index 7a3167f1..82272034 100644
--- a/vendor/golang.org/x/oauth2/token.go
+++ b/vendor/golang.org/x/oauth2/token.go
@@ -5,13 +5,14 @@
package oauth2
import (
+ "context"
+ "fmt"
"net/http"
"net/url"
"strconv"
"strings"
"time"
- "golang.org/x/net/context"
"golang.org/x/oauth2/internal"
)
@@ -20,7 +21,7 @@ import (
// expirations due to client-server time mismatches.
const expiryDelta = 10 * time.Second
-// Token represents the crendentials used to authorize
+// Token represents the credentials used to authorize
// the requests to access protected resources on the OAuth 2.0
// provider's backend.
//
@@ -117,13 +118,16 @@ func (t *Token) Extra(key string) interface{} {
return v
}
+// timeNow is time.Now but pulled out as a variable for tests.
+var timeNow = time.Now
+
// expired reports whether the token is expired.
// t must be non-nil.
func (t *Token) expired() bool {
if t.Expiry.IsZero() {
return false
}
- return t.Expiry.Add(-expiryDelta).Before(time.Now())
+ return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow())
}
// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
@@ -150,9 +154,25 @@ func tokenFromInternal(t *internal.Token) *Token {
// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
// with an error..
func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
- tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v)
+ tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle))
if err != nil {
+ if rErr, ok := err.(*internal.RetrieveError); ok {
+ return nil, (*RetrieveError)(rErr)
+ }
return nil, err
}
return tokenFromInternal(tk), nil
}
+
+// RetrieveError is the error returned when the token endpoint returns a
+// non-2XX HTTP status code.
+type RetrieveError struct {
+ Response *http.Response
+ // Body is the body that was consumed by reading Response.Body.
+ // It may be truncated.
+ Body []byte
+}
+
+func (r *RetrieveError) Error() string {
+ return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
+}
diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go
index 92ac7e25..aa0d34f1 100644
--- a/vendor/golang.org/x/oauth2/transport.go
+++ b/vendor/golang.org/x/oauth2/transport.go
@@ -31,9 +31,17 @@ type Transport struct {
}
// RoundTrip authorizes and authenticates the request with an
-// access token. If no token exists or token is expired,
-// tries to refresh/fetch a new token.
+// access token from Transport's Source.
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ reqBodyClosed := false
+ if req.Body != nil {
+ defer func() {
+ if !reqBodyClosed {
+ req.Body.Close()
+ }
+ }()
+ }
+
if t.Source == nil {
return nil, errors.New("oauth2: Transport's Source is nil")
}
@@ -46,6 +54,10 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
token.SetAuthHeader(req2)
t.setModReq(req, req2)
res, err := t.base().RoundTrip(req2)
+
+ // req.Body is assumed to have been closed by the base RoundTripper.
+ reqBodyClosed = true
+
if err != nil {
t.setModReq(req, nil)
return nil, err
diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go
new file mode 100644
index 00000000..ac53e733
--- /dev/null
+++ b/vendor/golang.org/x/sync/semaphore/semaphore.go
@@ -0,0 +1,127 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package semaphore provides a weighted semaphore implementation.
+package semaphore // import "golang.org/x/sync/semaphore"
+
+import (
+ "container/list"
+ "context"
+ "sync"
+)
+
+type waiter struct {
+ n int64
+ ready chan<- struct{} // Closed when semaphore acquired.
+}
+
+// NewWeighted creates a new weighted semaphore with the given
+// maximum combined weight for concurrent access.
+func NewWeighted(n int64) *Weighted {
+ w := &Weighted{size: n}
+ return w
+}
+
+// Weighted provides a way to bound concurrent access to a resource.
+// The callers can request access with a given weight.
+type Weighted struct {
+ size int64
+ cur int64
+ mu sync.Mutex
+ waiters list.List
+}
+
+// Acquire acquires the semaphore with a weight of n, blocking until resources
+// are available or ctx is done. On success, returns nil. On failure, returns
+// ctx.Err() and leaves the semaphore unchanged.
+//
+// If ctx is already done, Acquire may still succeed without blocking.
+func (s *Weighted) Acquire(ctx context.Context, n int64) error {
+ s.mu.Lock()
+ if s.size-s.cur >= n && s.waiters.Len() == 0 {
+ s.cur += n
+ s.mu.Unlock()
+ return nil
+ }
+
+ if n > s.size {
+ // Don't make other Acquire calls block on one that's doomed to fail.
+ s.mu.Unlock()
+ <-ctx.Done()
+ return ctx.Err()
+ }
+
+ ready := make(chan struct{})
+ w := waiter{n: n, ready: ready}
+ elem := s.waiters.PushBack(w)
+ s.mu.Unlock()
+
+ select {
+ case <-ctx.Done():
+ err := ctx.Err()
+ s.mu.Lock()
+ select {
+ case <-ready:
+ // Acquired the semaphore after we were canceled. Rather than trying to
+ // fix up the queue, just pretend we didn't notice the cancelation.
+ err = nil
+ default:
+ s.waiters.Remove(elem)
+ }
+ s.mu.Unlock()
+ return err
+
+ case <-ready:
+ return nil
+ }
+}
+
+// TryAcquire acquires the semaphore with a weight of n without blocking.
+// On success, returns true. On failure, returns false and leaves the semaphore unchanged.
+func (s *Weighted) TryAcquire(n int64) bool {
+ s.mu.Lock()
+ success := s.size-s.cur >= n && s.waiters.Len() == 0
+ if success {
+ s.cur += n
+ }
+ s.mu.Unlock()
+ return success
+}
+
+// Release releases the semaphore with a weight of n.
+func (s *Weighted) Release(n int64) {
+ s.mu.Lock()
+ s.cur -= n
+ if s.cur < 0 {
+ s.mu.Unlock()
+ panic("semaphore: bad release")
+ }
+ for {
+ next := s.waiters.Front()
+ if next == nil {
+ break // No more waiters blocked.
+ }
+
+ w := next.Value.(waiter)
+ if s.size-s.cur < w.n {
+ // Not enough tokens for the next waiter. We could keep going (to try to
+ // find a waiter with a smaller request), but under load that could cause
+ // starvation for large requests; instead, we leave all remaining waiters
+ // blocked.
+ //
+ // Consider a semaphore used as a read-write lock, with N tokens, N
+ // readers, and one writer. Each reader can Acquire(1) to obtain a read
+ // lock. The writer can Acquire(N) to obtain a write lock, excluding all
+ // of the readers. If we allow the readers to jump ahead in the queue,
+ // the writer will starve — there is always one token available for every
+ // reader.
+ break
+ }
+
+ s.cur += w.n
+ s.waiters.Remove(next)
+ close(w.ready)
+ }
+ s.mu.Unlock()
+}
diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go
new file mode 100644
index 00000000..eca1ea25
--- /dev/null
+++ b/vendor/google.golang.org/api/googleapi/transport/apikey.go
@@ -0,0 +1,38 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package transport contains HTTP transports used to make
+// authenticated API requests.
+package transport
+
+import (
+ "errors"
+ "net/http"
+)
+
+// APIKey is an HTTP Transport which wraps an underlying transport and
+// appends an API Key "key" parameter to the URL of outgoing requests.
+type APIKey struct {
+ // Key is the API Key to set on requests.
+ Key string
+
+ // Transport is the underlying HTTP transport.
+ // If nil, http.DefaultTransport is used.
+ Transport http.RoundTripper
+}
+
+func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) {
+ rt := t.Transport
+ if rt == nil {
+ rt = http.DefaultTransport
+ if rt == nil {
+ return nil, errors.New("googleapi/transport: no Transport specified or available")
+ }
+ }
+ newReq := *req
+ args := newReq.URL.Query()
+ args.Set("key", t.Key)
+ newReq.URL.RawQuery = args.Encode()
+ return rt.RoundTrip(&newReq)
+}
diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go
new file mode 100644
index 00000000..69b8659f
--- /dev/null
+++ b/vendor/google.golang.org/api/internal/creds.go
@@ -0,0 +1,102 @@
+// Copyright 2017 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+
+ "golang.org/x/oauth2"
+
+ "golang.org/x/oauth2/google"
+)
+
+// Creds returns credential information obtained from DialSettings, or if none, then
+// it returns default credential information.
+func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) {
+ if ds.Credentials != nil {
+ return ds.Credentials, nil
+ }
+ if ds.CredentialsJSON != nil {
+ return credentialsFromJSON(ctx, ds.CredentialsJSON, ds.Endpoint, ds.Scopes, ds.Audiences)
+ }
+ if ds.CredentialsFile != "" {
+ data, err := ioutil.ReadFile(ds.CredentialsFile)
+ if err != nil {
+ return nil, fmt.Errorf("cannot read credentials file: %v", err)
+ }
+ return credentialsFromJSON(ctx, data, ds.Endpoint, ds.Scopes, ds.Audiences)
+ }
+ if ds.TokenSource != nil {
+ return &google.Credentials{TokenSource: ds.TokenSource}, nil
+ }
+ cred, err := google.FindDefaultCredentials(ctx, ds.Scopes...)
+ if err != nil {
+ return nil, err
+ }
+ if len(cred.JSON) > 0 {
+ return credentialsFromJSON(ctx, cred.JSON, ds.Endpoint, ds.Scopes, ds.Audiences)
+ }
+ // For GAE and GCE, the JSON is empty so return the default credentials directly.
+ return cred, nil
+}
+
+// JSON key file type.
+const (
+ serviceAccountKey = "service_account"
+)
+
+// credentialsFromJSON returns a google.Credentials based on the input.
+//
+// - If the JSON is a service account and no scopes provided, returns self-signed JWT auth flow
+// - Otherwise, returns OAuth 2.0 flow.
+func credentialsFromJSON(ctx context.Context, data []byte, endpoint string, scopes []string, audiences []string) (*google.Credentials, error) {
+ cred, err := google.CredentialsFromJSON(ctx, data, scopes...)
+ if err != nil {
+ return nil, err
+ }
+ if len(data) > 0 && len(scopes) == 0 {
+ var f struct {
+ Type string `json:"type"`
+ // The rest JSON fields are omitted because they are not used.
+ }
+ if err := json.Unmarshal(cred.JSON, &f); err != nil {
+ return nil, err
+ }
+ if f.Type == serviceAccountKey {
+ ts, err := selfSignedJWTTokenSource(data, endpoint, audiences)
+ if err != nil {
+ return nil, err
+ }
+ cred.TokenSource = ts
+ }
+ }
+ return cred, err
+}
+
+func selfSignedJWTTokenSource(data []byte, endpoint string, audiences []string) (oauth2.TokenSource, error) {
+ // Use the API endpoint as the default audience
+ audience := endpoint
+ if len(audiences) > 0 {
+ // TODO(shinfan): Update golang oauth to support multiple audiences.
+ if len(audiences) > 1 {
+ return nil, fmt.Errorf("multiple audiences support is not implemented")
+ }
+ audience = audiences[0]
+ }
+ return google.JWTAccessTokenSourceFromJSON(data, audience)
+}
diff --git a/vendor/google.golang.org/api/internal/pool.go b/vendor/google.golang.org/api/internal/pool.go
new file mode 100644
index 00000000..a4426dcb
--- /dev/null
+++ b/vendor/google.golang.org/api/internal/pool.go
@@ -0,0 +1,61 @@
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+ "errors"
+
+ "google.golang.org/grpc/naming"
+)
+
+// PoolResolver provides a fixed list of addresses to load balance between
+// and does not provide further updates.
+type PoolResolver struct {
+ poolSize int
+ dialOpt *DialSettings
+ ch chan []*naming.Update
+}
+
+// NewPoolResolver returns a PoolResolver
+// This is an EXPERIMENTAL API and may be changed or removed in the future.
+func NewPoolResolver(size int, o *DialSettings) *PoolResolver {
+ return &PoolResolver{poolSize: size, dialOpt: o}
+}
+
+// Resolve returns a Watcher for the endpoint defined by the DialSettings
+// provided to NewPoolResolver.
+func (r *PoolResolver) Resolve(target string) (naming.Watcher, error) {
+ if r.dialOpt.Endpoint == "" {
+ return nil, errors.New("no endpoint configured")
+ }
+ addrs := make([]*naming.Update, 0, r.poolSize)
+ for i := 0; i < r.poolSize; i++ {
+ addrs = append(addrs, &naming.Update{Op: naming.Add, Addr: r.dialOpt.Endpoint, Metadata: i})
+ }
+ r.ch = make(chan []*naming.Update, 1)
+ r.ch <- addrs
+ return r, nil
+}
+
+// Next returns a static list of updates on the first call,
+// and blocks indefinitely until Close is called on subsequent calls.
+func (r *PoolResolver) Next() ([]*naming.Update, error) {
+ return <-r.ch, nil
+}
+
+// Close releases resources associated with the pool and causes Next to unblock.
+func (r *PoolResolver) Close() {
+ close(r.ch)
+}
diff --git a/vendor/google.golang.org/api/internal/service-account.json b/vendor/google.golang.org/api/internal/service-account.json
new file mode 100644
index 00000000..6b36a929
--- /dev/null
+++ b/vendor/google.golang.org/api/internal/service-account.json
@@ -0,0 +1,12 @@
+{
+ "type": "service_account",
+ "project_id": "project_id",
+ "private_key_id": "private_key_id",
+ "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCzd9ZdbPLAR4/g\nj+Rodu15kEasMpxf/Mz+gKRb2fmgR2Y18Y/iRBYZ4SkmF2pBSfzvwE/aTCzSPBGl\njHhPzohXnSN029eWoItmxVONlqCbR29pD07aLzv08LGeIGdHIEdhVjhvRwTkYZIF\ndXmlHNDRUU/EbJN9D+3ahw22BNnC4PaDgfIWTs3xIlTCSf2rL39I4DSNLTS/LzxK\n/XrQfBMtfwMWwyQaemXbc7gRgzOy8L56wa1W1zyXx99th97j1bLnoAXBGplhB4Co\n25ohyDAuhxRm+XGMEaO0Mzo7u97kvhj48a569RH1QRhOf7EBf60jO4h5eOmfi5P5\nPV3l7041AgMBAAECggEAEZ0RTNoEeRqM5F067YW+iM/AH+ZXspP9Cn1VpC4gcbqQ\nLXsnw+0qvh97CmIB66Z3TJBzRdl0DK4YjUbcB/kdKHwjnrR01DOtesijCqJd4N+B\n762w73jzSXbV9872U+S3HLZ5k3JE6KUqz55X8fyCAgkY6w4862lEzs2yasrPFHEV\nRoQp3PM0Miif8R3hGDhOWcHxcobullthG6JHAQFfc1ctwEjZI4TK0iWqlzfWGyKN\nT9UgvjUDud5cGvS9el0AiLN6keAf77tcPn1zetUVhxN1KN4bVAm1Q+6O8esl63Rj\n7JXpHzxaRnit9S6/aH/twHsGGtLg5Puw6jey6xs4AQKBgQD2JNy1wzewCRkD+jug\n8CHbJ+LIJVRNIaWa/RK1QD8/UjmFPkIzRQSF3AKC5mRAWSa2FL3yVK3N/DD7hazW\n85XSBB7IDcnoJnA9SkUeWwqQGkDx3EntlU3gX8Kn/+ofF8O9jLXxAa901MAVXVuf\n5YDzrl4PNE3bFnPCdiNmSdRfhQKBgQC6p4DsCpwqbeTu9f5ak9VW/fQP47Fgt+Mf\nwGjBnKP5PbbNJpHCfamF7jqSRH83Xy0KNssH7jD/NZ2oT594sMmiQPUC5ni9VYY6\nsuYB0JbD5Mq+EjKIVhYtxaQJ76LzHreEI+G4z6k3H7/hRpr3/C48n9G/uVkT9DbJ\noplxxEx68QKBgQCdJ23vcwO0Firtmi/GEmtbVHz70rGfSXNFoHz4UlvPXv0wsE5u\nE4vOt2i3EMhDOWh46odYGG6bzH+tp2xyFTW70Dui+QLHgPs6dpfoyLHWzZxXj5F3\n6lK9hgZvYvqk/XRRKmzjwnK2wjsdqOyeC1covlR5mqh20D/6kZkKbur0TQKBgAwy\nCZBimRWEnKKoW/gbFKNccGfhXqONID/g2Hdd/rC4QYth68AjacIgcJ9B7nX1uAGk\n1tsryvPB0w0+NpMyKdp6GAgaeuUUA3MuYSzZLiCagEyu77JMvaI7+Z3UlHcCGMd/\neK4Uk1/QqT7U2Cc/yN2ZK6E1QQa2vCWshA4U31JhAoGAbtbSSSsul1c+PsJ13Cfk\n6qVnqYzPqt23QTyOZmGAvUHH/M4xRiQpOE0cDF4t/r5PwenAQPQzTvMmWRzj6uAY\n3eaU0eAK7ZfoweCoOIAPnpFbbRLrXfoY46H7MYh7euWGXOKEpxz5yzuEkd9ByNUE\n86vSEidqbMIiXVgEgnu/k08=\n-----END PRIVATE KEY-----\n",
+ "client_email": "xyz@developer.gserviceaccount.com",
+ "client_id": "123",
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
+ "token_uri": "https://accounts.google.com/o/oauth2/token",
+ "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
+ "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/xyz%40developer.gserviceaccount.com"
+}
diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go
new file mode 100644
index 00000000..062301c6
--- /dev/null
+++ b/vendor/google.golang.org/api/internal/settings.go
@@ -0,0 +1,96 @@
+// Copyright 2017 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package internal supports the options and transport packages.
+package internal
+
+import (
+ "errors"
+ "net/http"
+
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+ "google.golang.org/grpc"
+)
+
+// DialSettings holds information needed to establish a connection with a
+// Google API service.
+type DialSettings struct {
+ Endpoint string
+ Scopes []string
+ TokenSource oauth2.TokenSource
+ Credentials *google.Credentials
+ CredentialsFile string // if set, Token Source is ignored.
+ CredentialsJSON []byte
+ UserAgent string
+ APIKey string
+ Audiences []string
+ HTTPClient *http.Client
+ GRPCDialOpts []grpc.DialOption
+ GRPCConn *grpc.ClientConn
+ NoAuth bool
+
+ // Google API system parameters. For more information please read:
+ // https://cloud.google.com/apis/docs/system-parameters
+ QuotaProject string
+ RequestReason string
+}
+
+// Validate reports an error if ds is invalid.
+func (ds *DialSettings) Validate() error {
+ hasCreds := ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "" || ds.Credentials != nil
+ if ds.NoAuth && hasCreds {
+ return errors.New("options.WithoutAuthentication is incompatible with any option that provides credentials")
+ }
+ // Credentials should not appear with other options.
+ // We currently allow TokenSource and CredentialsFile to coexist.
+ // TODO(jba): make TokenSource & CredentialsFile an error (breaking change).
+ nCreds := 0
+ if ds.Credentials != nil {
+ nCreds++
+ }
+ if ds.CredentialsJSON != nil {
+ nCreds++
+ }
+ if ds.CredentialsFile != "" {
+ nCreds++
+ }
+ if ds.APIKey != "" {
+ nCreds++
+ }
+ if ds.TokenSource != nil {
+ nCreds++
+ }
+ if len(ds.Scopes) > 0 && len(ds.Audiences) > 0 {
+ return errors.New("WithScopes is incompatible with WithAudience")
+ }
+ // Accept only one form of credentials, except we allow TokenSource and CredentialsFile for backwards compatibility.
+ if nCreds > 1 && !(nCreds == 2 && ds.TokenSource != nil && ds.CredentialsFile != "") {
+ return errors.New("multiple credential options provided")
+ }
+ if ds.HTTPClient != nil && ds.GRPCConn != nil {
+ return errors.New("WithHTTPClient is incompatible with WithGRPCConn")
+ }
+ if ds.HTTPClient != nil && ds.GRPCDialOpts != nil {
+ return errors.New("WithHTTPClient is incompatible with gRPC dial options")
+ }
+ if ds.HTTPClient != nil && ds.QuotaProject != "" {
+ return errors.New("WithHTTPClient is incompatible with QuotaProject")
+ }
+ if ds.HTTPClient != nil && ds.RequestReason != "" {
+ return errors.New("WithHTTPClient is incompatible with RequestReason")
+ }
+
+ return nil
+}
diff --git a/vendor/google.golang.org/api/iterator/iterator.go b/vendor/google.golang.org/api/iterator/iterator.go
new file mode 100644
index 00000000..3c8ea773
--- /dev/null
+++ b/vendor/google.golang.org/api/iterator/iterator.go
@@ -0,0 +1,231 @@
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package iterator provides support for standard Google API iterators.
+// See https://github.com/GoogleCloudPlatform/gcloud-golang/wiki/Iterator-Guidelines.
+package iterator
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+// Done is returned by an iterator's Next method when the iteration is
+// complete; when there are no more items to return.
+var Done = errors.New("no more items in iterator")
+
+// We don't support mixed calls to Next and NextPage because they play
+// with the paging state in incompatible ways.
+var errMixed = errors.New("iterator: Next and NextPage called on same iterator")
+
+// PageInfo contains information about an iterator's paging state.
+type PageInfo struct {
+ // Token is the token used to retrieve the next page of items from the
+ // API. You may set Token immediately after creating an iterator to
+ // begin iteration at a particular point. If Token is the empty string,
+ // the iterator will begin with the first eligible item.
+ //
+ // The result of setting Token after the first call to Next is undefined.
+ //
+ // After the underlying API method is called to retrieve a page of items,
+ // Token is set to the next-page token in the response.
+ Token string
+
+ // MaxSize is the maximum number of items returned by a call to the API.
+ // Set MaxSize as a hint to optimize the buffering behavior of the iterator.
+ // If zero, the page size is determined by the underlying service.
+ //
+ // Use Pager to retrieve a page of a specific, exact size.
+ MaxSize int
+
+ // The error state of the iterator. Manipulated by PageInfo.next and Pager.
+ // This is a latch: it starts as nil, and once set should never change.
+ err error
+
+ // If true, no more calls to fetch should be made. Set to true when fetch
+ // returns an empty page token. The iterator is Done when this is true AND
+ // the buffer is empty.
+ atEnd bool
+
+ // Function that fetches a page from the underlying service. It should pass
+ // the pageSize and pageToken arguments to the service, fill the buffer
+ // with the results from the call, and return the next-page token returned
+ // by the service. The function must not remove any existing items from the
+ // buffer. If the underlying RPC takes an int32 page size, pageSize should
+ // be silently truncated.
+ fetch func(pageSize int, pageToken string) (nextPageToken string, err error)
+
+ // Function that returns the number of currently buffered items.
+ bufLen func() int
+
+ // Function that returns the buffer, after setting the buffer variable to nil.
+ takeBuf func() interface{}
+
+ // Set to true on first call to PageInfo.next or Pager.NextPage. Used to check
+ // for calls to both Next and NextPage with the same iterator.
+ nextCalled, nextPageCalled bool
+}
+
+// NewPageInfo exposes internals for iterator implementations.
+// It is not a stable interface.
+var NewPageInfo = newPageInfo
+
+// If an iterator can support paging, its iterator-creating method should call
+// this (via the NewPageInfo variable above).
+//
+// The fetch, bufLen and takeBuf arguments provide access to the
+// iterator's internal slice of buffered items. They behave as described in
+// PageInfo, above.
+//
+// The return value is the PageInfo.next method bound to the returned PageInfo value.
+// (Returning it avoids exporting PageInfo.next.)
+func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (*PageInfo, func() error) {
+ pi := &PageInfo{
+ fetch: fetch,
+ bufLen: bufLen,
+ takeBuf: takeBuf,
+ }
+ return pi, pi.next
+}
+
+// Remaining returns the number of items available before the iterator makes another API call.
+func (pi *PageInfo) Remaining() int { return pi.bufLen() }
+
+// next provides support for an iterator's Next function. An iterator's Next
+// should return the error returned by next if non-nil; else it can assume
+// there is at least one item in its buffer, and it should return that item and
+// remove it from the buffer.
+func (pi *PageInfo) next() error {
+ pi.nextCalled = true
+ if pi.err != nil { // Once we get an error, always return it.
+ // TODO(jba): fix so users can retry on transient errors? Probably not worth it.
+ return pi.err
+ }
+ if pi.nextPageCalled {
+ pi.err = errMixed
+ return pi.err
+ }
+ // Loop until we get some items or reach the end.
+ for pi.bufLen() == 0 && !pi.atEnd {
+ if err := pi.fill(pi.MaxSize); err != nil {
+ pi.err = err
+ return pi.err
+ }
+ if pi.Token == "" {
+ pi.atEnd = true
+ }
+ }
+ // Either the buffer is non-empty or pi.atEnd is true (or both).
+ if pi.bufLen() == 0 {
+ // The buffer is empty and pi.atEnd is true, i.e. the service has no
+ // more items.
+ pi.err = Done
+ }
+ return pi.err
+}
+
+// Call the service to fill the buffer, using size and pi.Token. Set pi.Token to the
+// next-page token returned by the call.
+// If fill returns a non-nil error, the buffer will be empty.
+func (pi *PageInfo) fill(size int) error {
+ tok, err := pi.fetch(size, pi.Token)
+ if err != nil {
+ pi.takeBuf() // clear the buffer
+ return err
+ }
+ pi.Token = tok
+ return nil
+}
+
+// Pageable is implemented by iterators that support paging.
+type Pageable interface {
+ // PageInfo returns paging information associated with the iterator.
+ PageInfo() *PageInfo
+}
+
+// Pager supports retrieving iterator items a page at a time.
+type Pager struct {
+ pageInfo *PageInfo
+ pageSize int
+}
+
+// NewPager returns a pager that uses iter. Calls to its NextPage method will
+// obtain exactly pageSize items, unless fewer remain. The pageToken argument
+// indicates where to start the iteration. Pass the empty string to start at
+// the beginning, or pass a token retrieved from a call to Pager.NextPage.
+//
+// If you use an iterator with a Pager, you must not call Next on the iterator.
+func NewPager(iter Pageable, pageSize int, pageToken string) *Pager {
+ p := &Pager{
+ pageInfo: iter.PageInfo(),
+ pageSize: pageSize,
+ }
+ p.pageInfo.Token = pageToken
+ if pageSize <= 0 {
+ p.pageInfo.err = errors.New("iterator: page size must be positive")
+ }
+ return p
+}
+
+// NextPage retrieves a sequence of items from the iterator and appends them
+// to slicep, which must be a pointer to a slice of the iterator's item type.
+// Exactly p.pageSize items will be appended, unless fewer remain.
+//
+// The first return value is the page token to use for the next page of items.
+// If empty, there are no more pages. Aside from checking for the end of the
+// iteration, the returned page token is only needed if the iteration is to be
+// resumed a later time, in another context (possibly another process).
+//
+// The second return value is non-nil if an error occurred. It will never be
+// the special iterator sentinel value Done. To recognize the end of the
+// iteration, compare nextPageToken to the empty string.
+//
+// It is possible for NextPage to return a single zero-length page along with
+// an empty page token when there are no more items in the iteration.
+func (p *Pager) NextPage(slicep interface{}) (nextPageToken string, err error) {
+ p.pageInfo.nextPageCalled = true
+ if p.pageInfo.err != nil {
+ return "", p.pageInfo.err
+ }
+ if p.pageInfo.nextCalled {
+ p.pageInfo.err = errMixed
+ return "", p.pageInfo.err
+ }
+ if p.pageInfo.bufLen() > 0 {
+ return "", errors.New("must call NextPage with an empty buffer")
+ }
+ // The buffer must be empty here, so takeBuf is a no-op. We call it just to get
+ // the buffer's type.
+ wantSliceType := reflect.PtrTo(reflect.ValueOf(p.pageInfo.takeBuf()).Type())
+ if slicep == nil {
+ return "", errors.New("nil passed to Pager.NextPage")
+ }
+ vslicep := reflect.ValueOf(slicep)
+ if vslicep.Type() != wantSliceType {
+ return "", fmt.Errorf("slicep should be of type %s, got %T", wantSliceType, slicep)
+ }
+ for p.pageInfo.bufLen() < p.pageSize {
+ if err := p.pageInfo.fill(p.pageSize - p.pageInfo.bufLen()); err != nil {
+ p.pageInfo.err = err
+ return "", p.pageInfo.err
+ }
+ if p.pageInfo.Token == "" {
+ break
+ }
+ }
+ e := vslicep.Elem()
+ e.Set(reflect.AppendSlice(e, reflect.ValueOf(p.pageInfo.takeBuf())))
+ return p.pageInfo.Token, nil
+}
diff --git a/vendor/google.golang.org/api/option/credentials_go19.go b/vendor/google.golang.org/api/option/credentials_go19.go
new file mode 100644
index 00000000..0636a829
--- /dev/null
+++ b/vendor/google.golang.org/api/option/credentials_go19.go
@@ -0,0 +1,33 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.9
+
+package option
+
+import (
+ "golang.org/x/oauth2/google"
+ "google.golang.org/api/internal"
+)
+
+type withCreds google.Credentials
+
+func (w *withCreds) Apply(o *internal.DialSettings) {
+ o.Credentials = (*google.Credentials)(w)
+}
+
+// WithCredentials returns a ClientOption that authenticates API calls.
+func WithCredentials(creds *google.Credentials) ClientOption {
+ return (*withCreds)(creds)
+}
diff --git a/vendor/google.golang.org/api/option/credentials_notgo19.go b/vendor/google.golang.org/api/option/credentials_notgo19.go
new file mode 100644
index 00000000..74d3a4b5
--- /dev/null
+++ b/vendor/google.golang.org/api/option/credentials_notgo19.go
@@ -0,0 +1,32 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !go1.9
+
+package option
+
+import (
+ "golang.org/x/oauth2/google"
+ "google.golang.org/api/internal"
+)
+
+type withCreds google.DefaultCredentials
+
+func (w *withCreds) Apply(o *internal.DialSettings) {
+ o.Credentials = (*google.DefaultCredentials)(w)
+}
+
+func WithCredentials(creds *google.DefaultCredentials) ClientOption {
+ return (*withCreds)(creds)
+}
diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go
new file mode 100644
index 00000000..0a1c2dba
--- /dev/null
+++ b/vendor/google.golang.org/api/option/option.go
@@ -0,0 +1,235 @@
+// Copyright 2017 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package option contains options for Google API clients.
+package option
+
+import (
+ "net/http"
+
+ "golang.org/x/oauth2"
+ "google.golang.org/api/internal"
+ "google.golang.org/grpc"
+)
+
+// A ClientOption is an option for a Google API client.
+type ClientOption interface {
+ Apply(*internal.DialSettings)
+}
+
+// WithTokenSource returns a ClientOption that specifies an OAuth2 token
+// source to be used as the basis for authentication.
+func WithTokenSource(s oauth2.TokenSource) ClientOption {
+ return withTokenSource{s}
+}
+
+type withTokenSource struct{ ts oauth2.TokenSource }
+
+func (w withTokenSource) Apply(o *internal.DialSettings) {
+ o.TokenSource = w.ts
+}
+
+type withCredFile string
+
+func (w withCredFile) Apply(o *internal.DialSettings) {
+ o.CredentialsFile = string(w)
+}
+
+// WithCredentialsFile returns a ClientOption that authenticates
+// API calls with the given service account or refresh token JSON
+// credentials file.
+func WithCredentialsFile(filename string) ClientOption {
+ return withCredFile(filename)
+}
+
+// WithServiceAccountFile returns a ClientOption that uses a Google service
+// account credentials file to authenticate.
+//
+// Deprecated: Use WithCredentialsFile instead.
+func WithServiceAccountFile(filename string) ClientOption {
+ return WithCredentialsFile(filename)
+}
+
+// WithCredentialsJSON returns a ClientOption that authenticates
+// API calls with the given service account or refresh token JSON
+// credentials.
+func WithCredentialsJSON(p []byte) ClientOption {
+ return withCredentialsJSON(p)
+}
+
+type withCredentialsJSON []byte
+
+func (w withCredentialsJSON) Apply(o *internal.DialSettings) {
+ o.CredentialsJSON = make([]byte, len(w))
+ copy(o.CredentialsJSON, w)
+}
+
+// WithEndpoint returns a ClientOption that overrides the default endpoint
+// to be used for a service.
+func WithEndpoint(url string) ClientOption {
+ return withEndpoint(url)
+}
+
+type withEndpoint string
+
+func (w withEndpoint) Apply(o *internal.DialSettings) {
+ o.Endpoint = string(w)
+}
+
+// WithScopes returns a ClientOption that overrides the default OAuth2 scopes
+// to be used for a service.
+func WithScopes(scope ...string) ClientOption {
+ return withScopes(scope)
+}
+
+type withScopes []string
+
+func (w withScopes) Apply(o *internal.DialSettings) {
+ o.Scopes = make([]string, len(w))
+ copy(o.Scopes, w)
+}
+
+// WithUserAgent returns a ClientOption that sets the User-Agent.
+func WithUserAgent(ua string) ClientOption {
+ return withUA(ua)
+}
+
+type withUA string
+
+func (w withUA) Apply(o *internal.DialSettings) { o.UserAgent = string(w) }
+
+// WithHTTPClient returns a ClientOption that specifies the HTTP client to use
+// as the basis of communications. This option may only be used with services
+// that support HTTP as their communication transport. When used, the
+// WithHTTPClient option takes precedent over all other supplied options.
+func WithHTTPClient(client *http.Client) ClientOption {
+ return withHTTPClient{client}
+}
+
+type withHTTPClient struct{ client *http.Client }
+
+func (w withHTTPClient) Apply(o *internal.DialSettings) {
+ o.HTTPClient = w.client
+}
+
+// WithGRPCConn returns a ClientOption that specifies the gRPC client
+// connection to use as the basis of communications. This option many only be
+// used with services that support gRPC as their communication transport. When
+// used, the WithGRPCConn option takes precedent over all other supplied
+// options.
+func WithGRPCConn(conn *grpc.ClientConn) ClientOption {
+ return withGRPCConn{conn}
+}
+
+type withGRPCConn struct{ conn *grpc.ClientConn }
+
+func (w withGRPCConn) Apply(o *internal.DialSettings) {
+ o.GRPCConn = w.conn
+}
+
+// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption
+// to an underlying gRPC dial. It does not work with WithGRPCConn.
+func WithGRPCDialOption(opt grpc.DialOption) ClientOption {
+ return withGRPCDialOption{opt}
+}
+
+type withGRPCDialOption struct{ opt grpc.DialOption }
+
+func (w withGRPCDialOption) Apply(o *internal.DialSettings) {
+ o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt)
+}
+
+// WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC
+// connections that requests will be balanced between.
+// This is an EXPERIMENTAL API and may be changed or removed in the future.
+func WithGRPCConnectionPool(size int) ClientOption {
+ return withGRPCConnectionPool(size)
+}
+
+type withGRPCConnectionPool int
+
+func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) {
+ balancer := grpc.RoundRobin(internal.NewPoolResolver(int(w), o))
+ o.GRPCDialOpts = append(o.GRPCDialOpts, grpc.WithBalancer(balancer))
+}
+
+// WithAPIKey returns a ClientOption that specifies an API key to be used
+// as the basis for authentication.
+//
+// API Keys can only be used for JSON-over-HTTP APIs, including those under
+// the import path google.golang.org/api/....
+func WithAPIKey(apiKey string) ClientOption {
+ return withAPIKey(apiKey)
+}
+
+type withAPIKey string
+
+func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) }
+
+// WithAudiences returns a ClientOption that specifies an audience to be used
+// as the audience field ("aud") for the JWT token authentication.
+func WithAudiences(audience ...string) ClientOption {
+ return withAudiences(audience)
+}
+
+type withAudiences []string
+
+func (w withAudiences) Apply(o *internal.DialSettings) {
+ o.Audiences = make([]string, len(w))
+ copy(o.Audiences, w)
+}
+
+// WithoutAuthentication returns a ClientOption that specifies that no
+// authentication should be used. It is suitable only for testing and for
+// accessing public resources, like public Google Cloud Storage buckets.
+// It is an error to provide both WithoutAuthentication and any of WithAPIKey,
+// WithTokenSource, WithCredentialsFile or WithServiceAccountFile.
+func WithoutAuthentication() ClientOption {
+ return withoutAuthentication{}
+}
+
+type withoutAuthentication struct{}
+
+func (w withoutAuthentication) Apply(o *internal.DialSettings) { o.NoAuth = true }
+
+// WithQuotaProject returns a ClientOption that specifies the project used
+// for quota and billing purposes.
+//
+// For more information please read:
+// https://cloud.google.com/apis/docs/system-parameters
+func WithQuotaProject(quotaProject string) ClientOption {
+ return withQuotaProject(quotaProject)
+}
+
+type withQuotaProject string
+
+func (w withQuotaProject) Apply(o *internal.DialSettings) {
+ o.QuotaProject = string(w)
+}
+
+// WithRequestReason returns a ClientOption that specifies a reason for
+// making the request, which is intended to be recorded in audit logging.
+// An example reason would be a support-case ticket number.
+//
+// For more information please read:
+// https://cloud.google.com/apis/docs/system-parameters
+func WithRequestReason(requestReason string) ClientOption {
+ return withRequestReason(requestReason)
+}
+
+type withRequestReason string
+
+func (w withRequestReason) Apply(o *internal.DialSettings) {
+ o.RequestReason = string(w)
+}
diff --git a/vendor/google.golang.org/api/support/bundler/bundler.go b/vendor/google.golang.org/api/support/bundler/bundler.go
new file mode 100644
index 00000000..c5532711
--- /dev/null
+++ b/vendor/google.golang.org/api/support/bundler/bundler.go
@@ -0,0 +1,349 @@
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package bundler supports bundling (batching) of items. Bundling amortizes an
+// action with fixed costs over multiple items. For example, if an API provides
+// an RPC that accepts a list of items as input, but clients would prefer
+// adding items one at a time, then a Bundler can accept individual items from
+// the client and bundle many of them into a single RPC.
+//
+// This package is experimental and subject to change without notice.
+package bundler
+
+import (
+ "context"
+ "errors"
+ "math"
+ "reflect"
+ "sync"
+ "time"
+
+ "golang.org/x/sync/semaphore"
+)
+
+const (
+ DefaultDelayThreshold = time.Second
+ DefaultBundleCountThreshold = 10
+ DefaultBundleByteThreshold = 1e6 // 1M
+ DefaultBufferedByteLimit = 1e9 // 1G
+)
+
+var (
+ // ErrOverflow indicates that Bundler's stored bytes exceeds its BufferedByteLimit.
+ ErrOverflow = errors.New("bundler reached buffered byte limit")
+
+ // ErrOversizedItem indicates that an item's size exceeds the maximum bundle size.
+ ErrOversizedItem = errors.New("item size exceeds bundle byte limit")
+)
+
+// A Bundler collects items added to it into a bundle until the bundle
+// exceeds a given size, then calls a user-provided function to handle the bundle.
+type Bundler struct {
+ // Starting from the time that the first message is added to a bundle, once
+ // this delay has passed, handle the bundle. The default is DefaultDelayThreshold.
+ DelayThreshold time.Duration
+
+ // Once a bundle has this many items, handle the bundle. Since only one
+ // item at a time is added to a bundle, no bundle will exceed this
+ // threshold, so it also serves as a limit. The default is
+ // DefaultBundleCountThreshold.
+ BundleCountThreshold int
+
+ // Once the number of bytes in current bundle reaches this threshold, handle
+ // the bundle. The default is DefaultBundleByteThreshold. This triggers handling,
+ // but does not cap the total size of a bundle.
+ BundleByteThreshold int
+
+ // The maximum size of a bundle, in bytes. Zero means unlimited.
+ BundleByteLimit int
+
+ // The maximum number of bytes that the Bundler will keep in memory before
+ // returning ErrOverflow. The default is DefaultBufferedByteLimit.
+ BufferedByteLimit int
+
+ // The maximum number of handler invocations that can be running at once.
+ // The default is 1.
+ HandlerLimit int
+
+ handler func(interface{}) // called to handle a bundle
+ itemSliceZero reflect.Value // nil (zero value) for slice of items
+ flushTimer *time.Timer // implements DelayThreshold
+
+ mu sync.Mutex
+ sem *semaphore.Weighted // enforces BufferedByteLimit
+ semOnce sync.Once
+ curBundle bundle // incoming items added to this bundle
+
+ // Each bundle is assigned a unique ticket that determines the order in which the
+ // handler is called. The ticket is assigned with mu locked, but waiting for tickets
+ // to be handled is done via mu2 and cond, below.
+ nextTicket uint64 // next ticket to be assigned
+
+ mu2 sync.Mutex
+ cond *sync.Cond
+ nextHandled uint64 // next ticket to be handled
+
+ // In this implementation, active uses space proportional to HandlerLimit, and
+ // waitUntilAllHandled takes time proportional to HandlerLimit each time an acquire
+ // or release occurs, so large values of HandlerLimit max may cause performance
+ // issues.
+ active map[uint64]bool // tickets of bundles actively being handled
+}
+
+type bundle struct {
+ items reflect.Value // slice of item type
+ size int // size in bytes of all items
+}
+
+// NewBundler creates a new Bundler.
+//
+// itemExample is a value of the type that will be bundled. For example, if you
+// want to create bundles of *Entry, you could pass &Entry{} for itemExample.
+//
+// handler is a function that will be called on each bundle. If itemExample is
+// of type T, the argument to handler is of type []T. handler is always called
+// sequentially for each bundle, and never in parallel.
+//
+// Configure the Bundler by setting its thresholds and limits before calling
+// any of its methods.
+func NewBundler(itemExample interface{}, handler func(interface{})) *Bundler {
+ b := &Bundler{
+ DelayThreshold: DefaultDelayThreshold,
+ BundleCountThreshold: DefaultBundleCountThreshold,
+ BundleByteThreshold: DefaultBundleByteThreshold,
+ BufferedByteLimit: DefaultBufferedByteLimit,
+ HandlerLimit: 1,
+
+ handler: handler,
+ itemSliceZero: reflect.Zero(reflect.SliceOf(reflect.TypeOf(itemExample))),
+ active: map[uint64]bool{},
+ }
+ b.curBundle.items = b.itemSliceZero
+ b.cond = sync.NewCond(&b.mu2)
+ return b
+}
+
+func (b *Bundler) initSemaphores() {
+ // Create the semaphores lazily, because the user may set limits
+ // after NewBundler.
+ b.semOnce.Do(func() {
+ b.sem = semaphore.NewWeighted(int64(b.BufferedByteLimit))
+ })
+}
+
+// Add adds item to the current bundle. It marks the bundle for handling and
+// starts a new one if any of the thresholds or limits are exceeded.
+//
+// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then
+// the item can never be handled. Add returns ErrOversizedItem in this case.
+//
+// If adding the item would exceed the maximum memory allowed
+// (Bundler.BufferedByteLimit) or an AddWait call is blocked waiting for
+// memory, Add returns ErrOverflow.
+//
+// Add never blocks.
+func (b *Bundler) Add(item interface{}, size int) error {
+ // If this item exceeds the maximum size of a bundle,
+ // we can never send it.
+ if b.BundleByteLimit > 0 && size > b.BundleByteLimit {
+ return ErrOversizedItem
+ }
+ // If adding this item would exceed our allotted memory
+ // footprint, we can't accept it.
+ // (TryAcquire also returns false if anything is waiting on the semaphore,
+ // so calls to Add and AddWait shouldn't be mixed.)
+ b.initSemaphores()
+ if !b.sem.TryAcquire(int64(size)) {
+ return ErrOverflow
+ }
+ b.add(item, size)
+ return nil
+}
+
+// add adds item to the current bundle. It marks the bundle for handling and
+// starts a new one if any of the thresholds or limits are exceeded.
+func (b *Bundler) add(item interface{}, size int) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ // If adding this item to the current bundle would cause it to exceed the
+ // maximum bundle size, close the current bundle and start a new one.
+ if b.BundleByteLimit > 0 && b.curBundle.size+size > b.BundleByteLimit {
+ b.startFlushLocked()
+ }
+ // Add the item.
+ b.curBundle.items = reflect.Append(b.curBundle.items, reflect.ValueOf(item))
+ b.curBundle.size += size
+
+ // Start a timer to flush the item if one isn't already running.
+ // startFlushLocked clears the timer and closes the bundle at the same time,
+ // so we only allocate a new timer for the first item in each bundle.
+ // (We could try to call Reset on the timer instead, but that would add a lot
+ // of complexity to the code just to save one small allocation.)
+ if b.flushTimer == nil {
+ b.flushTimer = time.AfterFunc(b.DelayThreshold, b.Flush)
+ }
+
+ // If the current bundle equals the count threshold, close it.
+ if b.curBundle.items.Len() == b.BundleCountThreshold {
+ b.startFlushLocked()
+ }
+ // If the current bundle equals or exceeds the byte threshold, close it.
+ if b.curBundle.size >= b.BundleByteThreshold {
+ b.startFlushLocked()
+ }
+}
+
+// AddWait adds item to the current bundle. It marks the bundle for handling and
+// starts a new one if any of the thresholds or limits are exceeded.
+//
+// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then
+// the item can never be handled. AddWait returns ErrOversizedItem in this case.
+//
+// If adding the item would exceed the maximum memory allowed (Bundler.BufferedByteLimit),
+// AddWait blocks until space is available or ctx is done.
+//
+// Calls to Add and AddWait should not be mixed on the same Bundler.
+func (b *Bundler) AddWait(ctx context.Context, item interface{}, size int) error {
+ // If this item exceeds the maximum size of a bundle,
+ // we can never send it.
+ if b.BundleByteLimit > 0 && size > b.BundleByteLimit {
+ return ErrOversizedItem
+ }
+ // If adding this item would exceed our allotted memory footprint, block
+ // until space is available. The semaphore is FIFO, so there will be no
+ // starvation.
+ b.initSemaphores()
+ if err := b.sem.Acquire(ctx, int64(size)); err != nil {
+ return err
+ }
+ // Here, we've reserved space for item. Other goroutines can call AddWait
+ // and even acquire space, but no one can take away our reservation
+ // (assuming sem.Release is used correctly). So there is no race condition
+ // resulting from locking the mutex after sem.Acquire returns.
+ b.add(item, size)
+ return nil
+}
+
+// Flush invokes the handler for all remaining items in the Bundler and waits
+// for it to return.
+func (b *Bundler) Flush() {
+ b.mu.Lock()
+ b.startFlushLocked()
+ // Here, all bundles with tickets < b.nextTicket are
+ // either finished or active. Those are the ones
+ // we want to wait for.
+ t := b.nextTicket
+ b.mu.Unlock()
+ b.initSemaphores()
+ b.waitUntilAllHandled(t)
+}
+
+func (b *Bundler) startFlushLocked() {
+ if b.flushTimer != nil {
+ b.flushTimer.Stop()
+ b.flushTimer = nil
+ }
+ if b.curBundle.items.Len() == 0 {
+ return
+ }
+ // Here, both semaphores must have been initialized.
+ bun := b.curBundle
+ b.curBundle = bundle{items: b.itemSliceZero}
+ ticket := b.nextTicket
+ b.nextTicket++
+ go func() {
+ defer func() {
+ b.sem.Release(int64(bun.size))
+ b.release(ticket)
+ }()
+ b.acquire(ticket)
+ b.handler(bun.items.Interface())
+ }()
+}
+
+// acquire blocks until ticket is the next to be served, then returns. In order for N
+// acquire calls to return, the tickets must be in the range [0, N). A ticket must
+// not be presented to acquire more than once.
+func (b *Bundler) acquire(ticket uint64) {
+ b.mu2.Lock()
+ defer b.mu2.Unlock()
+ if ticket < b.nextHandled {
+ panic("bundler: acquire: arg too small")
+ }
+ for !(ticket == b.nextHandled && len(b.active) < b.HandlerLimit) {
+ b.cond.Wait()
+ }
+ // Here,
+ // ticket == b.nextHandled: the caller is the next one to be handled;
+ // and len(b.active) < b.HandlerLimit: there is space available.
+ b.active[ticket] = true
+ b.nextHandled++
+ // Broadcast, not Signal: although at most one acquire waiter can make progress,
+ // there might be waiters in waitUntilAllHandled.
+ b.cond.Broadcast()
+}
+
+// If a ticket is used for a call to acquire, it must later be passed to release. A
+// ticket must not be presented to release more than once.
+func (b *Bundler) release(ticket uint64) {
+ b.mu2.Lock()
+ defer b.mu2.Unlock()
+ if !b.active[ticket] {
+ panic("bundler: release: not an active ticket")
+ }
+ delete(b.active, ticket)
+ b.cond.Broadcast()
+}
+
+// waitUntilAllHandled blocks until all tickets < n have called release, meaning
+// all bundles with tickets < n have been handled.
+func (b *Bundler) waitUntilAllHandled(n uint64) {
+ // Proof of correctness of this function.
+ // "N is acquired" means acquire(N) has returned.
+ // "N is released" means release(N) has returned.
+ // 1. If N is acquired, N-1 is acquired.
+ // Follows from the loop test in acquire, and the fact
+ // that nextHandled is incremented by 1.
+ // 2. If nextHandled >= N, then N-1 is acquired.
+ // Because we only increment nextHandled to N after N-1 is acquired.
+ // 3. If nextHandled >= N, then all n < N is acquired.
+ // Follows from #1 and #2.
+ // 4. If N is acquired and N is not in active, then N is released.
+ // Because we put N in active before acquire returns, and only
+ // remove it when it is released.
+ // Let min(active) be the smallest member of active, or infinity if active is empty.
+ // 5. If nextHandled >= N and N <= min(active), then all n < N is released.
+ // From nextHandled >= N and #3, all n < N is acquired.
+ // N <= min(active) implies n < min(active) for all n < N. So all n < N is not in active.
+ // So from #4, all n < N is released.
+ // The loop test below is the antecedent of #5.
+ b.mu2.Lock()
+ defer b.mu2.Unlock()
+ for !(b.nextHandled >= n && n <= min(b.active)) {
+ b.cond.Wait()
+ }
+}
+
+// min returns the minimum value of the set s, or the largest uint64 if
+// s is empty.
+func min(s map[uint64]bool) uint64 {
+ var m uint64 = math.MaxUint64
+ for n := range s {
+ if n < m {
+ m = n
+ }
+ }
+ return m
+}
diff --git a/vendor/google.golang.org/api/transport/dial.go b/vendor/google.golang.org/api/transport/dial.go
new file mode 100644
index 00000000..1fb7cf90
--- /dev/null
+++ b/vendor/google.golang.org/api/transport/dial.go
@@ -0,0 +1,46 @@
+// Copyright 2015 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "context"
+ "net/http"
+
+ "google.golang.org/grpc"
+
+ "google.golang.org/api/option"
+ gtransport "google.golang.org/api/transport/grpc"
+ htransport "google.golang.org/api/transport/http"
+)
+
+// NewHTTPClient returns an HTTP client for use communicating with a Google cloud
+// service, configured with the given ClientOptions. It also returns the endpoint
+// for the service as specified in the options.
+func NewHTTPClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) {
+ return htransport.NewClient(ctx, opts...)
+}
+
+// DialGRPC returns a GRPC connection for use communicating with a Google cloud
+// service, configured with the given ClientOptions.
+func DialGRPC(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) {
+ return gtransport.Dial(ctx, opts...)
+}
+
+// DialGRPCInsecure returns an insecure GRPC connection for use communicating
+// with fake or mock Google cloud service implementations, such as emulators.
+// The connection is configured with the given ClientOptions.
+func DialGRPCInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) {
+ return gtransport.DialInsecure(ctx, opts...)
+}
diff --git a/vendor/google.golang.org/api/transport/doc.go b/vendor/google.golang.org/api/transport/doc.go
new file mode 100644
index 00000000..4915036c
--- /dev/null
+++ b/vendor/google.golang.org/api/transport/doc.go
@@ -0,0 +1,21 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package transport provides utility methods for creating authenticated
+// transports to Google's HTTP and gRPC APIs. It is intended to be used in
+// conjunction with google.golang.org/api/option.
+//
+// This package is not intended for use by end developers. Use the
+// google.golang.org/api/option package to configure API clients.
+package transport
diff --git a/vendor/google.golang.org/api/transport/go19.go b/vendor/google.golang.org/api/transport/go19.go
new file mode 100644
index 00000000..3e89f932
--- /dev/null
+++ b/vendor/google.golang.org/api/transport/go19.go
@@ -0,0 +1,35 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.9
+
+package transport
+
+import (
+ "context"
+
+ "golang.org/x/oauth2/google"
+ "google.golang.org/api/internal"
+ "google.golang.org/api/option"
+)
+
+// Creds constructs a google.Credentials from the information in the options,
+// or obtains the default credentials in the same way as google.FindDefaultCredentials.
+func Creds(ctx context.Context, opts ...option.ClientOption) (*google.Credentials, error) {
+ var ds internal.DialSettings
+ for _, opt := range opts {
+ opt.Apply(&ds)
+ }
+ return internal.Creds(ctx, &ds)
+}
diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go
new file mode 100644
index 00000000..4f6b94de
--- /dev/null
+++ b/vendor/google.golang.org/api/transport/grpc/dial.go
@@ -0,0 +1,129 @@
+// Copyright 2015 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package grpc supports network connections to GRPC servers.
+// This package is not intended for use by end developers. Use the
+// google.golang.org/api/option package to configure API clients.
+package grpc
+
+import (
+ "context"
+ "errors"
+ "log"
+
+ "go.opencensus.io/plugin/ocgrpc"
+ "google.golang.org/api/internal"
+ "google.golang.org/api/option"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/oauth"
+)
+
+// Set at init time by dial_appengine.go. If nil, we're not on App Engine.
+var appengineDialerHook func(context.Context) grpc.DialOption
+
+// Dial returns a GRPC connection for use communicating with a Google cloud
+// service, configured with the given ClientOptions.
+func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) {
+ return dial(ctx, false, opts)
+}
+
+// DialInsecure returns an insecure GRPC connection for use communicating
+// with fake or mock Google cloud service implementations, such as emulators.
+// The connection is configured with the given ClientOptions.
+func DialInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) {
+ return dial(ctx, true, opts)
+}
+
+func dial(ctx context.Context, insecure bool, opts []option.ClientOption) (*grpc.ClientConn, error) {
+ var o internal.DialSettings
+ for _, opt := range opts {
+ opt.Apply(&o)
+ }
+ if err := o.Validate(); err != nil {
+ return nil, err
+ }
+ if o.HTTPClient != nil {
+ return nil, errors.New("unsupported HTTP client specified")
+ }
+ if o.GRPCConn != nil {
+ return o.GRPCConn, nil
+ }
+ grpcOpts := []grpc.DialOption{
+ grpc.WithWaitForHandshake(),
+ }
+ if insecure {
+ grpcOpts = []grpc.DialOption{grpc.WithInsecure()}
+ } else if !o.NoAuth {
+ if o.APIKey != "" {
+ log.Print("API keys are not supported for gRPC APIs. Remove the WithAPIKey option from your client-creating call.")
+ }
+ creds, err := internal.Creds(ctx, &o)
+ if err != nil {
+ return nil, err
+ }
+ grpcOpts = []grpc.DialOption{
+ grpc.WithPerRPCCredentials(grpcTokenSource{
+ TokenSource: oauth.TokenSource{creds.TokenSource},
+ quotaProject: o.QuotaProject,
+ requestReason: o.RequestReason,
+ }),
+ grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")),
+ }
+ }
+ if appengineDialerHook != nil {
+ // Use the Socket API on App Engine.
+ grpcOpts = append(grpcOpts, appengineDialerHook(ctx))
+ }
+ // Add tracing, but before the other options, so that clients can override the
+ // gRPC stats handler.
+ // This assumes that gRPC options are processed in order, left to right.
+ grpcOpts = addOCStatsHandler(grpcOpts)
+ grpcOpts = append(grpcOpts, o.GRPCDialOpts...)
+ if o.UserAgent != "" {
+ grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent))
+ }
+ return grpc.DialContext(ctx, o.Endpoint, grpcOpts...)
+}
+
+func addOCStatsHandler(opts []grpc.DialOption) []grpc.DialOption {
+ return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}))
+}
+
+// grpcTokenSource supplies PerRPCCredentials from an oauth.TokenSource.
+type grpcTokenSource struct {
+ oauth.TokenSource
+
+ // Additional metadata attached as headers.
+ quotaProject string
+ requestReason string
+}
+
+// GetRequestMetadata gets the request metadata as a map from a grpcTokenSource.
+func (ts grpcTokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (
+ map[string]string, error) {
+ metadata, err := ts.TokenSource.GetRequestMetadata(ctx, uri...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Attach system parameters into the metadata
+ if ts.quotaProject != "" {
+ metadata["X-goog-user-project"] = ts.quotaProject
+ }
+ if ts.requestReason != "" {
+ metadata["X-goog-request-reason"] = ts.requestReason
+ }
+ return metadata, nil
+}
diff --git a/vendor/google.golang.org/api/transport/grpc/dial_appengine.go b/vendor/google.golang.org/api/transport/grpc/dial_appengine.go
new file mode 100644
index 00000000..87819d4e
--- /dev/null
+++ b/vendor/google.golang.org/api/transport/grpc/dial_appengine.go
@@ -0,0 +1,41 @@
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build appengine
+
+package grpc
+
+import (
+ "context"
+ "net"
+ "time"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/socket"
+ "google.golang.org/grpc"
+)
+
+func init() {
+ // NOTE: dev_appserver doesn't currently support SSL.
+ // When it does, this code can be removed.
+ if appengine.IsDevAppServer() {
+ return
+ }
+
+ appengineDialerHook = func(ctx context.Context) grpc.DialOption {
+ return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return socket.DialTimeout(ctx, "tcp", addr, timeout)
+ })
+ }
+}
diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go
new file mode 100644
index 00000000..c0d8bf20
--- /dev/null
+++ b/vendor/google.golang.org/api/transport/http/dial.go
@@ -0,0 +1,161 @@
+// Copyright 2015 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package http supports network connections to HTTP servers.
+// This package is not intended for use by end developers. Use the
+// google.golang.org/api/option package to configure API clients.
+package http
+
+import (
+ "context"
+ "errors"
+ "net/http"
+
+ "go.opencensus.io/plugin/ochttp"
+ "golang.org/x/oauth2"
+ "google.golang.org/api/googleapi/transport"
+ "google.golang.org/api/internal"
+ "google.golang.org/api/option"
+ "google.golang.org/api/transport/http/internal/propagation"
+)
+
+// NewClient returns an HTTP client for use communicating with a Google cloud
+// service, configured with the given ClientOptions. It also returns the endpoint
+// for the service as specified in the options.
+func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) {
+ settings, err := newSettings(opts)
+ if err != nil {
+ return nil, "", err
+ }
+ // TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided?
+ if settings.HTTPClient != nil {
+ return settings.HTTPClient, settings.Endpoint, nil
+ }
+ trans, err := newTransport(ctx, defaultBaseTransport(ctx), settings)
+ if err != nil {
+ return nil, "", err
+ }
+ return &http.Client{Transport: trans}, settings.Endpoint, nil
+}
+
+// NewTransport creates an http.RoundTripper for use communicating with a Google
+// cloud service, configured with the given ClientOptions. Its RoundTrip method delegates to base.
+func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.ClientOption) (http.RoundTripper, error) {
+ settings, err := newSettings(opts)
+ if err != nil {
+ return nil, err
+ }
+ if settings.HTTPClient != nil {
+ return nil, errors.New("transport/http: WithHTTPClient passed to NewTransport")
+ }
+ return newTransport(ctx, base, settings)
+}
+
+func newTransport(ctx context.Context, base http.RoundTripper, settings *internal.DialSettings) (http.RoundTripper, error) {
+ trans := base
+ trans = parameterTransport{
+ base: trans,
+ userAgent: settings.UserAgent,
+ quotaProject: settings.QuotaProject,
+ requestReason: settings.RequestReason,
+ }
+ trans = addOCTransport(trans)
+ switch {
+ case settings.NoAuth:
+ // Do nothing.
+ case settings.APIKey != "":
+ trans = &transport.APIKey{
+ Transport: trans,
+ Key: settings.APIKey,
+ }
+ default:
+ creds, err := internal.Creds(ctx, settings)
+ if err != nil {
+ return nil, err
+ }
+ trans = &oauth2.Transport{
+ Base: trans,
+ Source: creds.TokenSource,
+ }
+ }
+ return trans, nil
+}
+
+func newSettings(opts []option.ClientOption) (*internal.DialSettings, error) {
+ var o internal.DialSettings
+ for _, opt := range opts {
+ opt.Apply(&o)
+ }
+ if err := o.Validate(); err != nil {
+ return nil, err
+ }
+ if o.GRPCConn != nil {
+ return nil, errors.New("unsupported gRPC connection specified")
+ }
+ return &o, nil
+}
+
+type parameterTransport struct {
+ userAgent string
+ quotaProject string
+ requestReason string
+
+ base http.RoundTripper
+}
+
+func (t parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ rt := t.base
+ if rt == nil {
+ return nil, errors.New("transport: no Transport specified")
+ }
+ if t.userAgent == "" {
+ return rt.RoundTrip(req)
+ }
+ newReq := *req
+ newReq.Header = make(http.Header)
+ for k, vv := range req.Header {
+ newReq.Header[k] = vv
+ }
+ // TODO(cbro): append to existing User-Agent header?
+ newReq.Header.Set("User-Agent", t.userAgent)
+
+ // Attach system parameters into the header
+ if t.quotaProject != "" {
+ newReq.Header.Set("X-Goog-User-Project", t.quotaProject)
+ }
+ if t.requestReason != "" {
+ newReq.Header.Set("X-Goog-Request-Reason", t.requestReason)
+ }
+
+ return rt.RoundTrip(&newReq)
+}
+
+// Set at init time by dial_appengine.go. If nil, we're not on App Engine.
+var appengineUrlfetchHook func(context.Context) http.RoundTripper
+
+// defaultBaseTransport returns the base HTTP transport.
+// On App Engine, this is urlfetch.Transport, otherwise it's http.DefaultTransport.
+func defaultBaseTransport(ctx context.Context) http.RoundTripper {
+ if appengineUrlfetchHook != nil {
+ return appengineUrlfetchHook(ctx)
+ }
+ return http.DefaultTransport
+}
+
+func addOCTransport(trans http.RoundTripper) http.RoundTripper {
+ return &ochttp.Transport{
+ Base: trans,
+ Propagation: &propagation.HTTPFormat{},
+ }
+}
diff --git a/vendor/google.golang.org/api/transport/http/dial_appengine.go b/vendor/google.golang.org/api/transport/http/dial_appengine.go
new file mode 100644
index 00000000..04c81413
--- /dev/null
+++ b/vendor/google.golang.org/api/transport/http/dial_appengine.go
@@ -0,0 +1,30 @@
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build appengine
+
+package http
+
+import (
+ "context"
+ "net/http"
+
+ "google.golang.org/appengine/urlfetch"
+)
+
+func init() {
+ appengineUrlfetchHook = func(ctx context.Context) http.RoundTripper {
+ return &urlfetch.Transport{Context: ctx}
+ }
+}
diff --git a/vendor/google.golang.org/api/transport/http/internal/propagation/http.go b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go
new file mode 100644
index 00000000..24b4f0d2
--- /dev/null
+++ b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go
@@ -0,0 +1,96 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+// Package propagation implements X-Cloud-Trace-Context header propagation used
+// by Google Cloud products.
+package propagation
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "go.opencensus.io/trace"
+ "go.opencensus.io/trace/propagation"
+)
+
+const (
+ httpHeaderMaxSize = 200
+ httpHeader = `X-Cloud-Trace-Context`
+)
+
+var _ propagation.HTTPFormat = (*HTTPFormat)(nil)
+
+// HTTPFormat implements propagation.HTTPFormat to propagate
+// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace.
+type HTTPFormat struct{}
+
+// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests.
+func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
+ h := req.Header.Get(httpHeader)
+ // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat.
+ // Return if the header is empty or missing, or if the header is unreasonably
+ // large, to avoid making unnecessary copies of a large string.
+ if h == "" || len(h) > httpHeaderMaxSize {
+ return trace.SpanContext{}, false
+ }
+
+ // Parse the trace id field.
+ slash := strings.Index(h, `/`)
+ if slash == -1 {
+ return trace.SpanContext{}, false
+ }
+ tid, h := h[:slash], h[slash+1:]
+
+ buf, err := hex.DecodeString(tid)
+ if err != nil {
+ return trace.SpanContext{}, false
+ }
+ copy(sc.TraceID[:], buf)
+
+ // Parse the span id field.
+ spanstr := h
+ semicolon := strings.Index(h, `;`)
+ if semicolon != -1 {
+ spanstr, h = h[:semicolon], h[semicolon+1:]
+ }
+ sid, err := strconv.ParseUint(spanstr, 10, 64)
+ if err != nil {
+ return trace.SpanContext{}, false
+ }
+ binary.BigEndian.PutUint64(sc.SpanID[:], sid)
+
+ // Parse the options field, options field is optional.
+ if !strings.HasPrefix(h, "o=") {
+ return sc, true
+ }
+ o, err := strconv.ParseUint(h[2:], 10, 64)
+ if err != nil {
+ return trace.SpanContext{}, false
+ }
+ sc.TraceOptions = trace.TraceOptions(o)
+ return sc, true
+}
+
+// SpanContextToRequest modifies the given request to include a Stackdriver Trace header.
+func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
+ sid := binary.BigEndian.Uint64(sc.SpanID[:])
+ header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions))
+ req.Header.Set(httpHeader, header)
+}
diff --git a/vendor/google.golang.org/api/transport/not_go19.go b/vendor/google.golang.org/api/transport/not_go19.go
new file mode 100644
index 00000000..0cb62759
--- /dev/null
+++ b/vendor/google.golang.org/api/transport/not_go19.go
@@ -0,0 +1,35 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !go1.9
+
+package transport
+
+import (
+ "context"
+
+ "golang.org/x/oauth2/google"
+ "google.golang.org/api/internal"
+ "google.golang.org/api/option"
+)
+
+// Creds constructs a google.DefaultCredentials from the information in the options,
+// or obtains the default credentials in the same way as google.FindDefaultCredentials.
+func Creds(ctx context.Context, opts ...option.ClientOption) (*google.DefaultCredentials, error) {
+ var ds internal.DialSettings
+ for _, opt := range opts {
+ opt.Apply(&ds)
+ }
+ return internal.Creds(ctx, &ds)
+}
diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go
new file mode 100644
index 00000000..4ec872e4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go
@@ -0,0 +1,2822 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google.golang.org/appengine/internal/socket/socket_service.proto
+
+package socket
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type RemoteSocketServiceError_ErrorCode int32
+
+const (
+ RemoteSocketServiceError_SYSTEM_ERROR RemoteSocketServiceError_ErrorCode = 1
+ RemoteSocketServiceError_GAI_ERROR RemoteSocketServiceError_ErrorCode = 2
+ RemoteSocketServiceError_FAILURE RemoteSocketServiceError_ErrorCode = 4
+ RemoteSocketServiceError_PERMISSION_DENIED RemoteSocketServiceError_ErrorCode = 5
+ RemoteSocketServiceError_INVALID_REQUEST RemoteSocketServiceError_ErrorCode = 6
+ RemoteSocketServiceError_SOCKET_CLOSED RemoteSocketServiceError_ErrorCode = 7
+)
+
+var RemoteSocketServiceError_ErrorCode_name = map[int32]string{
+ 1: "SYSTEM_ERROR",
+ 2: "GAI_ERROR",
+ 4: "FAILURE",
+ 5: "PERMISSION_DENIED",
+ 6: "INVALID_REQUEST",
+ 7: "SOCKET_CLOSED",
+}
+var RemoteSocketServiceError_ErrorCode_value = map[string]int32{
+ "SYSTEM_ERROR": 1,
+ "GAI_ERROR": 2,
+ "FAILURE": 4,
+ "PERMISSION_DENIED": 5,
+ "INVALID_REQUEST": 6,
+ "SOCKET_CLOSED": 7,
+}
+
+func (x RemoteSocketServiceError_ErrorCode) Enum() *RemoteSocketServiceError_ErrorCode {
+ p := new(RemoteSocketServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x RemoteSocketServiceError_ErrorCode) String() string {
+ return proto.EnumName(RemoteSocketServiceError_ErrorCode_name, int32(x))
+}
+func (x *RemoteSocketServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_ErrorCode_value, data, "RemoteSocketServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = RemoteSocketServiceError_ErrorCode(value)
+ return nil
+}
+func (RemoteSocketServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 0}
+}
+
+type RemoteSocketServiceError_SystemError int32
+
+const (
+ RemoteSocketServiceError_SYS_SUCCESS RemoteSocketServiceError_SystemError = 0
+ RemoteSocketServiceError_SYS_EPERM RemoteSocketServiceError_SystemError = 1
+ RemoteSocketServiceError_SYS_ENOENT RemoteSocketServiceError_SystemError = 2
+ RemoteSocketServiceError_SYS_ESRCH RemoteSocketServiceError_SystemError = 3
+ RemoteSocketServiceError_SYS_EINTR RemoteSocketServiceError_SystemError = 4
+ RemoteSocketServiceError_SYS_EIO RemoteSocketServiceError_SystemError = 5
+ RemoteSocketServiceError_SYS_ENXIO RemoteSocketServiceError_SystemError = 6
+ RemoteSocketServiceError_SYS_E2BIG RemoteSocketServiceError_SystemError = 7
+ RemoteSocketServiceError_SYS_ENOEXEC RemoteSocketServiceError_SystemError = 8
+ RemoteSocketServiceError_SYS_EBADF RemoteSocketServiceError_SystemError = 9
+ RemoteSocketServiceError_SYS_ECHILD RemoteSocketServiceError_SystemError = 10
+ RemoteSocketServiceError_SYS_EAGAIN RemoteSocketServiceError_SystemError = 11
+ RemoteSocketServiceError_SYS_EWOULDBLOCK RemoteSocketServiceError_SystemError = 11
+ RemoteSocketServiceError_SYS_ENOMEM RemoteSocketServiceError_SystemError = 12
+ RemoteSocketServiceError_SYS_EACCES RemoteSocketServiceError_SystemError = 13
+ RemoteSocketServiceError_SYS_EFAULT RemoteSocketServiceError_SystemError = 14
+ RemoteSocketServiceError_SYS_ENOTBLK RemoteSocketServiceError_SystemError = 15
+ RemoteSocketServiceError_SYS_EBUSY RemoteSocketServiceError_SystemError = 16
+ RemoteSocketServiceError_SYS_EEXIST RemoteSocketServiceError_SystemError = 17
+ RemoteSocketServiceError_SYS_EXDEV RemoteSocketServiceError_SystemError = 18
+ RemoteSocketServiceError_SYS_ENODEV RemoteSocketServiceError_SystemError = 19
+ RemoteSocketServiceError_SYS_ENOTDIR RemoteSocketServiceError_SystemError = 20
+ RemoteSocketServiceError_SYS_EISDIR RemoteSocketServiceError_SystemError = 21
+ RemoteSocketServiceError_SYS_EINVAL RemoteSocketServiceError_SystemError = 22
+ RemoteSocketServiceError_SYS_ENFILE RemoteSocketServiceError_SystemError = 23
+ RemoteSocketServiceError_SYS_EMFILE RemoteSocketServiceError_SystemError = 24
+ RemoteSocketServiceError_SYS_ENOTTY RemoteSocketServiceError_SystemError = 25
+ RemoteSocketServiceError_SYS_ETXTBSY RemoteSocketServiceError_SystemError = 26
+ RemoteSocketServiceError_SYS_EFBIG RemoteSocketServiceError_SystemError = 27
+ RemoteSocketServiceError_SYS_ENOSPC RemoteSocketServiceError_SystemError = 28
+ RemoteSocketServiceError_SYS_ESPIPE RemoteSocketServiceError_SystemError = 29
+ RemoteSocketServiceError_SYS_EROFS RemoteSocketServiceError_SystemError = 30
+ RemoteSocketServiceError_SYS_EMLINK RemoteSocketServiceError_SystemError = 31
+ RemoteSocketServiceError_SYS_EPIPE RemoteSocketServiceError_SystemError = 32
+ RemoteSocketServiceError_SYS_EDOM RemoteSocketServiceError_SystemError = 33
+ RemoteSocketServiceError_SYS_ERANGE RemoteSocketServiceError_SystemError = 34
+ RemoteSocketServiceError_SYS_EDEADLK RemoteSocketServiceError_SystemError = 35
+ RemoteSocketServiceError_SYS_EDEADLOCK RemoteSocketServiceError_SystemError = 35
+ RemoteSocketServiceError_SYS_ENAMETOOLONG RemoteSocketServiceError_SystemError = 36
+ RemoteSocketServiceError_SYS_ENOLCK RemoteSocketServiceError_SystemError = 37
+ RemoteSocketServiceError_SYS_ENOSYS RemoteSocketServiceError_SystemError = 38
+ RemoteSocketServiceError_SYS_ENOTEMPTY RemoteSocketServiceError_SystemError = 39
+ RemoteSocketServiceError_SYS_ELOOP RemoteSocketServiceError_SystemError = 40
+ RemoteSocketServiceError_SYS_ENOMSG RemoteSocketServiceError_SystemError = 42
+ RemoteSocketServiceError_SYS_EIDRM RemoteSocketServiceError_SystemError = 43
+ RemoteSocketServiceError_SYS_ECHRNG RemoteSocketServiceError_SystemError = 44
+ RemoteSocketServiceError_SYS_EL2NSYNC RemoteSocketServiceError_SystemError = 45
+ RemoteSocketServiceError_SYS_EL3HLT RemoteSocketServiceError_SystemError = 46
+ RemoteSocketServiceError_SYS_EL3RST RemoteSocketServiceError_SystemError = 47
+ RemoteSocketServiceError_SYS_ELNRNG RemoteSocketServiceError_SystemError = 48
+ RemoteSocketServiceError_SYS_EUNATCH RemoteSocketServiceError_SystemError = 49
+ RemoteSocketServiceError_SYS_ENOCSI RemoteSocketServiceError_SystemError = 50
+ RemoteSocketServiceError_SYS_EL2HLT RemoteSocketServiceError_SystemError = 51
+ RemoteSocketServiceError_SYS_EBADE RemoteSocketServiceError_SystemError = 52
+ RemoteSocketServiceError_SYS_EBADR RemoteSocketServiceError_SystemError = 53
+ RemoteSocketServiceError_SYS_EXFULL RemoteSocketServiceError_SystemError = 54
+ RemoteSocketServiceError_SYS_ENOANO RemoteSocketServiceError_SystemError = 55
+ RemoteSocketServiceError_SYS_EBADRQC RemoteSocketServiceError_SystemError = 56
+ RemoteSocketServiceError_SYS_EBADSLT RemoteSocketServiceError_SystemError = 57
+ RemoteSocketServiceError_SYS_EBFONT RemoteSocketServiceError_SystemError = 59
+ RemoteSocketServiceError_SYS_ENOSTR RemoteSocketServiceError_SystemError = 60
+ RemoteSocketServiceError_SYS_ENODATA RemoteSocketServiceError_SystemError = 61
+ RemoteSocketServiceError_SYS_ETIME RemoteSocketServiceError_SystemError = 62
+ RemoteSocketServiceError_SYS_ENOSR RemoteSocketServiceError_SystemError = 63
+ RemoteSocketServiceError_SYS_ENONET RemoteSocketServiceError_SystemError = 64
+ RemoteSocketServiceError_SYS_ENOPKG RemoteSocketServiceError_SystemError = 65
+ RemoteSocketServiceError_SYS_EREMOTE RemoteSocketServiceError_SystemError = 66
+ RemoteSocketServiceError_SYS_ENOLINK RemoteSocketServiceError_SystemError = 67
+ RemoteSocketServiceError_SYS_EADV RemoteSocketServiceError_SystemError = 68
+ RemoteSocketServiceError_SYS_ESRMNT RemoteSocketServiceError_SystemError = 69
+ RemoteSocketServiceError_SYS_ECOMM RemoteSocketServiceError_SystemError = 70
+ RemoteSocketServiceError_SYS_EPROTO RemoteSocketServiceError_SystemError = 71
+ RemoteSocketServiceError_SYS_EMULTIHOP RemoteSocketServiceError_SystemError = 72
+ RemoteSocketServiceError_SYS_EDOTDOT RemoteSocketServiceError_SystemError = 73
+ RemoteSocketServiceError_SYS_EBADMSG RemoteSocketServiceError_SystemError = 74
+ RemoteSocketServiceError_SYS_EOVERFLOW RemoteSocketServiceError_SystemError = 75
+ RemoteSocketServiceError_SYS_ENOTUNIQ RemoteSocketServiceError_SystemError = 76
+ RemoteSocketServiceError_SYS_EBADFD RemoteSocketServiceError_SystemError = 77
+ RemoteSocketServiceError_SYS_EREMCHG RemoteSocketServiceError_SystemError = 78
+ RemoteSocketServiceError_SYS_ELIBACC RemoteSocketServiceError_SystemError = 79
+ RemoteSocketServiceError_SYS_ELIBBAD RemoteSocketServiceError_SystemError = 80
+ RemoteSocketServiceError_SYS_ELIBSCN RemoteSocketServiceError_SystemError = 81
+ RemoteSocketServiceError_SYS_ELIBMAX RemoteSocketServiceError_SystemError = 82
+ RemoteSocketServiceError_SYS_ELIBEXEC RemoteSocketServiceError_SystemError = 83
+ RemoteSocketServiceError_SYS_EILSEQ RemoteSocketServiceError_SystemError = 84
+ RemoteSocketServiceError_SYS_ERESTART RemoteSocketServiceError_SystemError = 85
+ RemoteSocketServiceError_SYS_ESTRPIPE RemoteSocketServiceError_SystemError = 86
+ RemoteSocketServiceError_SYS_EUSERS RemoteSocketServiceError_SystemError = 87
+ RemoteSocketServiceError_SYS_ENOTSOCK RemoteSocketServiceError_SystemError = 88
+ RemoteSocketServiceError_SYS_EDESTADDRREQ RemoteSocketServiceError_SystemError = 89
+ RemoteSocketServiceError_SYS_EMSGSIZE RemoteSocketServiceError_SystemError = 90
+ RemoteSocketServiceError_SYS_EPROTOTYPE RemoteSocketServiceError_SystemError = 91
+ RemoteSocketServiceError_SYS_ENOPROTOOPT RemoteSocketServiceError_SystemError = 92
+ RemoteSocketServiceError_SYS_EPROTONOSUPPORT RemoteSocketServiceError_SystemError = 93
+ RemoteSocketServiceError_SYS_ESOCKTNOSUPPORT RemoteSocketServiceError_SystemError = 94
+ RemoteSocketServiceError_SYS_EOPNOTSUPP RemoteSocketServiceError_SystemError = 95
+ RemoteSocketServiceError_SYS_ENOTSUP RemoteSocketServiceError_SystemError = 95
+ RemoteSocketServiceError_SYS_EPFNOSUPPORT RemoteSocketServiceError_SystemError = 96
+ RemoteSocketServiceError_SYS_EAFNOSUPPORT RemoteSocketServiceError_SystemError = 97
+ RemoteSocketServiceError_SYS_EADDRINUSE RemoteSocketServiceError_SystemError = 98
+ RemoteSocketServiceError_SYS_EADDRNOTAVAIL RemoteSocketServiceError_SystemError = 99
+ RemoteSocketServiceError_SYS_ENETDOWN RemoteSocketServiceError_SystemError = 100
+ RemoteSocketServiceError_SYS_ENETUNREACH RemoteSocketServiceError_SystemError = 101
+ RemoteSocketServiceError_SYS_ENETRESET RemoteSocketServiceError_SystemError = 102
+ RemoteSocketServiceError_SYS_ECONNABORTED RemoteSocketServiceError_SystemError = 103
+ RemoteSocketServiceError_SYS_ECONNRESET RemoteSocketServiceError_SystemError = 104
+ RemoteSocketServiceError_SYS_ENOBUFS RemoteSocketServiceError_SystemError = 105
+ RemoteSocketServiceError_SYS_EISCONN RemoteSocketServiceError_SystemError = 106
+ RemoteSocketServiceError_SYS_ENOTCONN RemoteSocketServiceError_SystemError = 107
+ RemoteSocketServiceError_SYS_ESHUTDOWN RemoteSocketServiceError_SystemError = 108
+ RemoteSocketServiceError_SYS_ETOOMANYREFS RemoteSocketServiceError_SystemError = 109
+ RemoteSocketServiceError_SYS_ETIMEDOUT RemoteSocketServiceError_SystemError = 110
+ RemoteSocketServiceError_SYS_ECONNREFUSED RemoteSocketServiceError_SystemError = 111
+ RemoteSocketServiceError_SYS_EHOSTDOWN RemoteSocketServiceError_SystemError = 112
+ RemoteSocketServiceError_SYS_EHOSTUNREACH RemoteSocketServiceError_SystemError = 113
+ RemoteSocketServiceError_SYS_EALREADY RemoteSocketServiceError_SystemError = 114
+ RemoteSocketServiceError_SYS_EINPROGRESS RemoteSocketServiceError_SystemError = 115
+ RemoteSocketServiceError_SYS_ESTALE RemoteSocketServiceError_SystemError = 116
+ RemoteSocketServiceError_SYS_EUCLEAN RemoteSocketServiceError_SystemError = 117
+ RemoteSocketServiceError_SYS_ENOTNAM RemoteSocketServiceError_SystemError = 118
+ RemoteSocketServiceError_SYS_ENAVAIL RemoteSocketServiceError_SystemError = 119
+ RemoteSocketServiceError_SYS_EISNAM RemoteSocketServiceError_SystemError = 120
+ RemoteSocketServiceError_SYS_EREMOTEIO RemoteSocketServiceError_SystemError = 121
+ RemoteSocketServiceError_SYS_EDQUOT RemoteSocketServiceError_SystemError = 122
+ RemoteSocketServiceError_SYS_ENOMEDIUM RemoteSocketServiceError_SystemError = 123
+ RemoteSocketServiceError_SYS_EMEDIUMTYPE RemoteSocketServiceError_SystemError = 124
+ RemoteSocketServiceError_SYS_ECANCELED RemoteSocketServiceError_SystemError = 125
+ RemoteSocketServiceError_SYS_ENOKEY RemoteSocketServiceError_SystemError = 126
+ RemoteSocketServiceError_SYS_EKEYEXPIRED RemoteSocketServiceError_SystemError = 127
+ RemoteSocketServiceError_SYS_EKEYREVOKED RemoteSocketServiceError_SystemError = 128
+ RemoteSocketServiceError_SYS_EKEYREJECTED RemoteSocketServiceError_SystemError = 129
+ RemoteSocketServiceError_SYS_EOWNERDEAD RemoteSocketServiceError_SystemError = 130
+ RemoteSocketServiceError_SYS_ENOTRECOVERABLE RemoteSocketServiceError_SystemError = 131
+ RemoteSocketServiceError_SYS_ERFKILL RemoteSocketServiceError_SystemError = 132
+)
+
+var RemoteSocketServiceError_SystemError_name = map[int32]string{
+ 0: "SYS_SUCCESS",
+ 1: "SYS_EPERM",
+ 2: "SYS_ENOENT",
+ 3: "SYS_ESRCH",
+ 4: "SYS_EINTR",
+ 5: "SYS_EIO",
+ 6: "SYS_ENXIO",
+ 7: "SYS_E2BIG",
+ 8: "SYS_ENOEXEC",
+ 9: "SYS_EBADF",
+ 10: "SYS_ECHILD",
+ 11: "SYS_EAGAIN",
+ // Duplicate value: 11: "SYS_EWOULDBLOCK",
+ 12: "SYS_ENOMEM",
+ 13: "SYS_EACCES",
+ 14: "SYS_EFAULT",
+ 15: "SYS_ENOTBLK",
+ 16: "SYS_EBUSY",
+ 17: "SYS_EEXIST",
+ 18: "SYS_EXDEV",
+ 19: "SYS_ENODEV",
+ 20: "SYS_ENOTDIR",
+ 21: "SYS_EISDIR",
+ 22: "SYS_EINVAL",
+ 23: "SYS_ENFILE",
+ 24: "SYS_EMFILE",
+ 25: "SYS_ENOTTY",
+ 26: "SYS_ETXTBSY",
+ 27: "SYS_EFBIG",
+ 28: "SYS_ENOSPC",
+ 29: "SYS_ESPIPE",
+ 30: "SYS_EROFS",
+ 31: "SYS_EMLINK",
+ 32: "SYS_EPIPE",
+ 33: "SYS_EDOM",
+ 34: "SYS_ERANGE",
+ 35: "SYS_EDEADLK",
+ // Duplicate value: 35: "SYS_EDEADLOCK",
+ 36: "SYS_ENAMETOOLONG",
+ 37: "SYS_ENOLCK",
+ 38: "SYS_ENOSYS",
+ 39: "SYS_ENOTEMPTY",
+ 40: "SYS_ELOOP",
+ 42: "SYS_ENOMSG",
+ 43: "SYS_EIDRM",
+ 44: "SYS_ECHRNG",
+ 45: "SYS_EL2NSYNC",
+ 46: "SYS_EL3HLT",
+ 47: "SYS_EL3RST",
+ 48: "SYS_ELNRNG",
+ 49: "SYS_EUNATCH",
+ 50: "SYS_ENOCSI",
+ 51: "SYS_EL2HLT",
+ 52: "SYS_EBADE",
+ 53: "SYS_EBADR",
+ 54: "SYS_EXFULL",
+ 55: "SYS_ENOANO",
+ 56: "SYS_EBADRQC",
+ 57: "SYS_EBADSLT",
+ 59: "SYS_EBFONT",
+ 60: "SYS_ENOSTR",
+ 61: "SYS_ENODATA",
+ 62: "SYS_ETIME",
+ 63: "SYS_ENOSR",
+ 64: "SYS_ENONET",
+ 65: "SYS_ENOPKG",
+ 66: "SYS_EREMOTE",
+ 67: "SYS_ENOLINK",
+ 68: "SYS_EADV",
+ 69: "SYS_ESRMNT",
+ 70: "SYS_ECOMM",
+ 71: "SYS_EPROTO",
+ 72: "SYS_EMULTIHOP",
+ 73: "SYS_EDOTDOT",
+ 74: "SYS_EBADMSG",
+ 75: "SYS_EOVERFLOW",
+ 76: "SYS_ENOTUNIQ",
+ 77: "SYS_EBADFD",
+ 78: "SYS_EREMCHG",
+ 79: "SYS_ELIBACC",
+ 80: "SYS_ELIBBAD",
+ 81: "SYS_ELIBSCN",
+ 82: "SYS_ELIBMAX",
+ 83: "SYS_ELIBEXEC",
+ 84: "SYS_EILSEQ",
+ 85: "SYS_ERESTART",
+ 86: "SYS_ESTRPIPE",
+ 87: "SYS_EUSERS",
+ 88: "SYS_ENOTSOCK",
+ 89: "SYS_EDESTADDRREQ",
+ 90: "SYS_EMSGSIZE",
+ 91: "SYS_EPROTOTYPE",
+ 92: "SYS_ENOPROTOOPT",
+ 93: "SYS_EPROTONOSUPPORT",
+ 94: "SYS_ESOCKTNOSUPPORT",
+ 95: "SYS_EOPNOTSUPP",
+ // Duplicate value: 95: "SYS_ENOTSUP",
+ 96: "SYS_EPFNOSUPPORT",
+ 97: "SYS_EAFNOSUPPORT",
+ 98: "SYS_EADDRINUSE",
+ 99: "SYS_EADDRNOTAVAIL",
+ 100: "SYS_ENETDOWN",
+ 101: "SYS_ENETUNREACH",
+ 102: "SYS_ENETRESET",
+ 103: "SYS_ECONNABORTED",
+ 104: "SYS_ECONNRESET",
+ 105: "SYS_ENOBUFS",
+ 106: "SYS_EISCONN",
+ 107: "SYS_ENOTCONN",
+ 108: "SYS_ESHUTDOWN",
+ 109: "SYS_ETOOMANYREFS",
+ 110: "SYS_ETIMEDOUT",
+ 111: "SYS_ECONNREFUSED",
+ 112: "SYS_EHOSTDOWN",
+ 113: "SYS_EHOSTUNREACH",
+ 114: "SYS_EALREADY",
+ 115: "SYS_EINPROGRESS",
+ 116: "SYS_ESTALE",
+ 117: "SYS_EUCLEAN",
+ 118: "SYS_ENOTNAM",
+ 119: "SYS_ENAVAIL",
+ 120: "SYS_EISNAM",
+ 121: "SYS_EREMOTEIO",
+ 122: "SYS_EDQUOT",
+ 123: "SYS_ENOMEDIUM",
+ 124: "SYS_EMEDIUMTYPE",
+ 125: "SYS_ECANCELED",
+ 126: "SYS_ENOKEY",
+ 127: "SYS_EKEYEXPIRED",
+ 128: "SYS_EKEYREVOKED",
+ 129: "SYS_EKEYREJECTED",
+ 130: "SYS_EOWNERDEAD",
+ 131: "SYS_ENOTRECOVERABLE",
+ 132: "SYS_ERFKILL",
+}
+var RemoteSocketServiceError_SystemError_value = map[string]int32{
+ "SYS_SUCCESS": 0,
+ "SYS_EPERM": 1,
+ "SYS_ENOENT": 2,
+ "SYS_ESRCH": 3,
+ "SYS_EINTR": 4,
+ "SYS_EIO": 5,
+ "SYS_ENXIO": 6,
+ "SYS_E2BIG": 7,
+ "SYS_ENOEXEC": 8,
+ "SYS_EBADF": 9,
+ "SYS_ECHILD": 10,
+ "SYS_EAGAIN": 11,
+ "SYS_EWOULDBLOCK": 11,
+ "SYS_ENOMEM": 12,
+ "SYS_EACCES": 13,
+ "SYS_EFAULT": 14,
+ "SYS_ENOTBLK": 15,
+ "SYS_EBUSY": 16,
+ "SYS_EEXIST": 17,
+ "SYS_EXDEV": 18,
+ "SYS_ENODEV": 19,
+ "SYS_ENOTDIR": 20,
+ "SYS_EISDIR": 21,
+ "SYS_EINVAL": 22,
+ "SYS_ENFILE": 23,
+ "SYS_EMFILE": 24,
+ "SYS_ENOTTY": 25,
+ "SYS_ETXTBSY": 26,
+ "SYS_EFBIG": 27,
+ "SYS_ENOSPC": 28,
+ "SYS_ESPIPE": 29,
+ "SYS_EROFS": 30,
+ "SYS_EMLINK": 31,
+ "SYS_EPIPE": 32,
+ "SYS_EDOM": 33,
+ "SYS_ERANGE": 34,
+ "SYS_EDEADLK": 35,
+ "SYS_EDEADLOCK": 35,
+ "SYS_ENAMETOOLONG": 36,
+ "SYS_ENOLCK": 37,
+ "SYS_ENOSYS": 38,
+ "SYS_ENOTEMPTY": 39,
+ "SYS_ELOOP": 40,
+ "SYS_ENOMSG": 42,
+ "SYS_EIDRM": 43,
+ "SYS_ECHRNG": 44,
+ "SYS_EL2NSYNC": 45,
+ "SYS_EL3HLT": 46,
+ "SYS_EL3RST": 47,
+ "SYS_ELNRNG": 48,
+ "SYS_EUNATCH": 49,
+ "SYS_ENOCSI": 50,
+ "SYS_EL2HLT": 51,
+ "SYS_EBADE": 52,
+ "SYS_EBADR": 53,
+ "SYS_EXFULL": 54,
+ "SYS_ENOANO": 55,
+ "SYS_EBADRQC": 56,
+ "SYS_EBADSLT": 57,
+ "SYS_EBFONT": 59,
+ "SYS_ENOSTR": 60,
+ "SYS_ENODATA": 61,
+ "SYS_ETIME": 62,
+ "SYS_ENOSR": 63,
+ "SYS_ENONET": 64,
+ "SYS_ENOPKG": 65,
+ "SYS_EREMOTE": 66,
+ "SYS_ENOLINK": 67,
+ "SYS_EADV": 68,
+ "SYS_ESRMNT": 69,
+ "SYS_ECOMM": 70,
+ "SYS_EPROTO": 71,
+ "SYS_EMULTIHOP": 72,
+ "SYS_EDOTDOT": 73,
+ "SYS_EBADMSG": 74,
+ "SYS_EOVERFLOW": 75,
+ "SYS_ENOTUNIQ": 76,
+ "SYS_EBADFD": 77,
+ "SYS_EREMCHG": 78,
+ "SYS_ELIBACC": 79,
+ "SYS_ELIBBAD": 80,
+ "SYS_ELIBSCN": 81,
+ "SYS_ELIBMAX": 82,
+ "SYS_ELIBEXEC": 83,
+ "SYS_EILSEQ": 84,
+ "SYS_ERESTART": 85,
+ "SYS_ESTRPIPE": 86,
+ "SYS_EUSERS": 87,
+ "SYS_ENOTSOCK": 88,
+ "SYS_EDESTADDRREQ": 89,
+ "SYS_EMSGSIZE": 90,
+ "SYS_EPROTOTYPE": 91,
+ "SYS_ENOPROTOOPT": 92,
+ "SYS_EPROTONOSUPPORT": 93,
+ "SYS_ESOCKTNOSUPPORT": 94,
+ "SYS_EOPNOTSUPP": 95,
+ "SYS_ENOTSUP": 95,
+ "SYS_EPFNOSUPPORT": 96,
+ "SYS_EAFNOSUPPORT": 97,
+ "SYS_EADDRINUSE": 98,
+ "SYS_EADDRNOTAVAIL": 99,
+ "SYS_ENETDOWN": 100,
+ "SYS_ENETUNREACH": 101,
+ "SYS_ENETRESET": 102,
+ "SYS_ECONNABORTED": 103,
+ "SYS_ECONNRESET": 104,
+ "SYS_ENOBUFS": 105,
+ "SYS_EISCONN": 106,
+ "SYS_ENOTCONN": 107,
+ "SYS_ESHUTDOWN": 108,
+ "SYS_ETOOMANYREFS": 109,
+ "SYS_ETIMEDOUT": 110,
+ "SYS_ECONNREFUSED": 111,
+ "SYS_EHOSTDOWN": 112,
+ "SYS_EHOSTUNREACH": 113,
+ "SYS_EALREADY": 114,
+ "SYS_EINPROGRESS": 115,
+ "SYS_ESTALE": 116,
+ "SYS_EUCLEAN": 117,
+ "SYS_ENOTNAM": 118,
+ "SYS_ENAVAIL": 119,
+ "SYS_EISNAM": 120,
+ "SYS_EREMOTEIO": 121,
+ "SYS_EDQUOT": 122,
+ "SYS_ENOMEDIUM": 123,
+ "SYS_EMEDIUMTYPE": 124,
+ "SYS_ECANCELED": 125,
+ "SYS_ENOKEY": 126,
+ "SYS_EKEYEXPIRED": 127,
+ "SYS_EKEYREVOKED": 128,
+ "SYS_EKEYREJECTED": 129,
+ "SYS_EOWNERDEAD": 130,
+ "SYS_ENOTRECOVERABLE": 131,
+ "SYS_ERFKILL": 132,
+}
+
+func (x RemoteSocketServiceError_SystemError) Enum() *RemoteSocketServiceError_SystemError {
+ p := new(RemoteSocketServiceError_SystemError)
+ *p = x
+ return p
+}
+func (x RemoteSocketServiceError_SystemError) String() string {
+ return proto.EnumName(RemoteSocketServiceError_SystemError_name, int32(x))
+}
+func (x *RemoteSocketServiceError_SystemError) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_SystemError_value, data, "RemoteSocketServiceError_SystemError")
+ if err != nil {
+ return err
+ }
+ *x = RemoteSocketServiceError_SystemError(value)
+ return nil
+}
+func (RemoteSocketServiceError_SystemError) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 1}
+}
+
+type CreateSocketRequest_SocketFamily int32
+
+const (
+ CreateSocketRequest_IPv4 CreateSocketRequest_SocketFamily = 1
+ CreateSocketRequest_IPv6 CreateSocketRequest_SocketFamily = 2
+)
+
+var CreateSocketRequest_SocketFamily_name = map[int32]string{
+ 1: "IPv4",
+ 2: "IPv6",
+}
+var CreateSocketRequest_SocketFamily_value = map[string]int32{
+ "IPv4": 1,
+ "IPv6": 2,
+}
+
+func (x CreateSocketRequest_SocketFamily) Enum() *CreateSocketRequest_SocketFamily {
+ p := new(CreateSocketRequest_SocketFamily)
+ *p = x
+ return p
+}
+func (x CreateSocketRequest_SocketFamily) String() string {
+ return proto.EnumName(CreateSocketRequest_SocketFamily_name, int32(x))
+}
+func (x *CreateSocketRequest_SocketFamily) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketFamily_value, data, "CreateSocketRequest_SocketFamily")
+ if err != nil {
+ return err
+ }
+ *x = CreateSocketRequest_SocketFamily(value)
+ return nil
+}
+func (CreateSocketRequest_SocketFamily) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 0}
+}
+
+type CreateSocketRequest_SocketProtocol int32
+
+const (
+ CreateSocketRequest_TCP CreateSocketRequest_SocketProtocol = 1
+ CreateSocketRequest_UDP CreateSocketRequest_SocketProtocol = 2
+)
+
+var CreateSocketRequest_SocketProtocol_name = map[int32]string{
+ 1: "TCP",
+ 2: "UDP",
+}
+var CreateSocketRequest_SocketProtocol_value = map[string]int32{
+ "TCP": 1,
+ "UDP": 2,
+}
+
+func (x CreateSocketRequest_SocketProtocol) Enum() *CreateSocketRequest_SocketProtocol {
+ p := new(CreateSocketRequest_SocketProtocol)
+ *p = x
+ return p
+}
+func (x CreateSocketRequest_SocketProtocol) String() string {
+ return proto.EnumName(CreateSocketRequest_SocketProtocol_name, int32(x))
+}
+func (x *CreateSocketRequest_SocketProtocol) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketProtocol_value, data, "CreateSocketRequest_SocketProtocol")
+ if err != nil {
+ return err
+ }
+ *x = CreateSocketRequest_SocketProtocol(value)
+ return nil
+}
+func (CreateSocketRequest_SocketProtocol) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 1}
+}
+
+type SocketOption_SocketOptionLevel int32
+
+const (
+ SocketOption_SOCKET_SOL_IP SocketOption_SocketOptionLevel = 0
+ SocketOption_SOCKET_SOL_SOCKET SocketOption_SocketOptionLevel = 1
+ SocketOption_SOCKET_SOL_TCP SocketOption_SocketOptionLevel = 6
+ SocketOption_SOCKET_SOL_UDP SocketOption_SocketOptionLevel = 17
+)
+
+var SocketOption_SocketOptionLevel_name = map[int32]string{
+ 0: "SOCKET_SOL_IP",
+ 1: "SOCKET_SOL_SOCKET",
+ 6: "SOCKET_SOL_TCP",
+ 17: "SOCKET_SOL_UDP",
+}
+var SocketOption_SocketOptionLevel_value = map[string]int32{
+ "SOCKET_SOL_IP": 0,
+ "SOCKET_SOL_SOCKET": 1,
+ "SOCKET_SOL_TCP": 6,
+ "SOCKET_SOL_UDP": 17,
+}
+
+func (x SocketOption_SocketOptionLevel) Enum() *SocketOption_SocketOptionLevel {
+ p := new(SocketOption_SocketOptionLevel)
+ *p = x
+ return p
+}
+func (x SocketOption_SocketOptionLevel) String() string {
+ return proto.EnumName(SocketOption_SocketOptionLevel_name, int32(x))
+}
+func (x *SocketOption_SocketOptionLevel) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionLevel_value, data, "SocketOption_SocketOptionLevel")
+ if err != nil {
+ return err
+ }
+ *x = SocketOption_SocketOptionLevel(value)
+ return nil
+}
+func (SocketOption_SocketOptionLevel) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 0}
+}
+
+type SocketOption_SocketOptionName int32
+
+const (
+ SocketOption_SOCKET_SO_DEBUG SocketOption_SocketOptionName = 1
+ SocketOption_SOCKET_SO_REUSEADDR SocketOption_SocketOptionName = 2
+ SocketOption_SOCKET_SO_TYPE SocketOption_SocketOptionName = 3
+ SocketOption_SOCKET_SO_ERROR SocketOption_SocketOptionName = 4
+ SocketOption_SOCKET_SO_DONTROUTE SocketOption_SocketOptionName = 5
+ SocketOption_SOCKET_SO_BROADCAST SocketOption_SocketOptionName = 6
+ SocketOption_SOCKET_SO_SNDBUF SocketOption_SocketOptionName = 7
+ SocketOption_SOCKET_SO_RCVBUF SocketOption_SocketOptionName = 8
+ SocketOption_SOCKET_SO_KEEPALIVE SocketOption_SocketOptionName = 9
+ SocketOption_SOCKET_SO_OOBINLINE SocketOption_SocketOptionName = 10
+ SocketOption_SOCKET_SO_LINGER SocketOption_SocketOptionName = 13
+ SocketOption_SOCKET_SO_RCVTIMEO SocketOption_SocketOptionName = 20
+ SocketOption_SOCKET_SO_SNDTIMEO SocketOption_SocketOptionName = 21
+ SocketOption_SOCKET_IP_TOS SocketOption_SocketOptionName = 1
+ SocketOption_SOCKET_IP_TTL SocketOption_SocketOptionName = 2
+ SocketOption_SOCKET_IP_HDRINCL SocketOption_SocketOptionName = 3
+ SocketOption_SOCKET_IP_OPTIONS SocketOption_SocketOptionName = 4
+ SocketOption_SOCKET_TCP_NODELAY SocketOption_SocketOptionName = 1
+ SocketOption_SOCKET_TCP_MAXSEG SocketOption_SocketOptionName = 2
+ SocketOption_SOCKET_TCP_CORK SocketOption_SocketOptionName = 3
+ SocketOption_SOCKET_TCP_KEEPIDLE SocketOption_SocketOptionName = 4
+ SocketOption_SOCKET_TCP_KEEPINTVL SocketOption_SocketOptionName = 5
+ SocketOption_SOCKET_TCP_KEEPCNT SocketOption_SocketOptionName = 6
+ SocketOption_SOCKET_TCP_SYNCNT SocketOption_SocketOptionName = 7
+ SocketOption_SOCKET_TCP_LINGER2 SocketOption_SocketOptionName = 8
+ SocketOption_SOCKET_TCP_DEFER_ACCEPT SocketOption_SocketOptionName = 9
+ SocketOption_SOCKET_TCP_WINDOW_CLAMP SocketOption_SocketOptionName = 10
+ SocketOption_SOCKET_TCP_INFO SocketOption_SocketOptionName = 11
+ SocketOption_SOCKET_TCP_QUICKACK SocketOption_SocketOptionName = 12
+)
+
+var SocketOption_SocketOptionName_name = map[int32]string{
+ 1: "SOCKET_SO_DEBUG",
+ 2: "SOCKET_SO_REUSEADDR",
+ 3: "SOCKET_SO_TYPE",
+ 4: "SOCKET_SO_ERROR",
+ 5: "SOCKET_SO_DONTROUTE",
+ 6: "SOCKET_SO_BROADCAST",
+ 7: "SOCKET_SO_SNDBUF",
+ 8: "SOCKET_SO_RCVBUF",
+ 9: "SOCKET_SO_KEEPALIVE",
+ 10: "SOCKET_SO_OOBINLINE",
+ 13: "SOCKET_SO_LINGER",
+ 20: "SOCKET_SO_RCVTIMEO",
+ 21: "SOCKET_SO_SNDTIMEO",
+ // Duplicate value: 1: "SOCKET_IP_TOS",
+ // Duplicate value: 2: "SOCKET_IP_TTL",
+ // Duplicate value: 3: "SOCKET_IP_HDRINCL",
+ // Duplicate value: 4: "SOCKET_IP_OPTIONS",
+ // Duplicate value: 1: "SOCKET_TCP_NODELAY",
+ // Duplicate value: 2: "SOCKET_TCP_MAXSEG",
+ // Duplicate value: 3: "SOCKET_TCP_CORK",
+ // Duplicate value: 4: "SOCKET_TCP_KEEPIDLE",
+ // Duplicate value: 5: "SOCKET_TCP_KEEPINTVL",
+ // Duplicate value: 6: "SOCKET_TCP_KEEPCNT",
+ // Duplicate value: 7: "SOCKET_TCP_SYNCNT",
+ // Duplicate value: 8: "SOCKET_TCP_LINGER2",
+ // Duplicate value: 9: "SOCKET_TCP_DEFER_ACCEPT",
+ // Duplicate value: 10: "SOCKET_TCP_WINDOW_CLAMP",
+ 11: "SOCKET_TCP_INFO",
+ 12: "SOCKET_TCP_QUICKACK",
+}
+var SocketOption_SocketOptionName_value = map[string]int32{
+ "SOCKET_SO_DEBUG": 1,
+ "SOCKET_SO_REUSEADDR": 2,
+ "SOCKET_SO_TYPE": 3,
+ "SOCKET_SO_ERROR": 4,
+ "SOCKET_SO_DONTROUTE": 5,
+ "SOCKET_SO_BROADCAST": 6,
+ "SOCKET_SO_SNDBUF": 7,
+ "SOCKET_SO_RCVBUF": 8,
+ "SOCKET_SO_KEEPALIVE": 9,
+ "SOCKET_SO_OOBINLINE": 10,
+ "SOCKET_SO_LINGER": 13,
+ "SOCKET_SO_RCVTIMEO": 20,
+ "SOCKET_SO_SNDTIMEO": 21,
+ "SOCKET_IP_TOS": 1,
+ "SOCKET_IP_TTL": 2,
+ "SOCKET_IP_HDRINCL": 3,
+ "SOCKET_IP_OPTIONS": 4,
+ "SOCKET_TCP_NODELAY": 1,
+ "SOCKET_TCP_MAXSEG": 2,
+ "SOCKET_TCP_CORK": 3,
+ "SOCKET_TCP_KEEPIDLE": 4,
+ "SOCKET_TCP_KEEPINTVL": 5,
+ "SOCKET_TCP_KEEPCNT": 6,
+ "SOCKET_TCP_SYNCNT": 7,
+ "SOCKET_TCP_LINGER2": 8,
+ "SOCKET_TCP_DEFER_ACCEPT": 9,
+ "SOCKET_TCP_WINDOW_CLAMP": 10,
+ "SOCKET_TCP_INFO": 11,
+ "SOCKET_TCP_QUICKACK": 12,
+}
+
+func (x SocketOption_SocketOptionName) Enum() *SocketOption_SocketOptionName {
+ p := new(SocketOption_SocketOptionName)
+ *p = x
+ return p
+}
+func (x SocketOption_SocketOptionName) String() string {
+ return proto.EnumName(SocketOption_SocketOptionName_name, int32(x))
+}
+func (x *SocketOption_SocketOptionName) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionName_value, data, "SocketOption_SocketOptionName")
+ if err != nil {
+ return err
+ }
+ *x = SocketOption_SocketOptionName(value)
+ return nil
+}
+func (SocketOption_SocketOptionName) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 1}
+}
+
+type ShutDownRequest_How int32
+
+const (
+ ShutDownRequest_SOCKET_SHUT_RD ShutDownRequest_How = 1
+ ShutDownRequest_SOCKET_SHUT_WR ShutDownRequest_How = 2
+ ShutDownRequest_SOCKET_SHUT_RDWR ShutDownRequest_How = 3
+)
+
+var ShutDownRequest_How_name = map[int32]string{
+ 1: "SOCKET_SHUT_RD",
+ 2: "SOCKET_SHUT_WR",
+ 3: "SOCKET_SHUT_RDWR",
+}
+var ShutDownRequest_How_value = map[string]int32{
+ "SOCKET_SHUT_RD": 1,
+ "SOCKET_SHUT_WR": 2,
+ "SOCKET_SHUT_RDWR": 3,
+}
+
+func (x ShutDownRequest_How) Enum() *ShutDownRequest_How {
+ p := new(ShutDownRequest_How)
+ *p = x
+ return p
+}
+func (x ShutDownRequest_How) String() string {
+ return proto.EnumName(ShutDownRequest_How_name, int32(x))
+}
+func (x *ShutDownRequest_How) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ShutDownRequest_How_value, data, "ShutDownRequest_How")
+ if err != nil {
+ return err
+ }
+ *x = ShutDownRequest_How(value)
+ return nil
+}
+func (ShutDownRequest_How) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{21, 0}
+}
+
+type ReceiveRequest_Flags int32
+
+const (
+ ReceiveRequest_MSG_OOB ReceiveRequest_Flags = 1
+ ReceiveRequest_MSG_PEEK ReceiveRequest_Flags = 2
+)
+
+var ReceiveRequest_Flags_name = map[int32]string{
+ 1: "MSG_OOB",
+ 2: "MSG_PEEK",
+}
+var ReceiveRequest_Flags_value = map[string]int32{
+ "MSG_OOB": 1,
+ "MSG_PEEK": 2,
+}
+
+func (x ReceiveRequest_Flags) Enum() *ReceiveRequest_Flags {
+ p := new(ReceiveRequest_Flags)
+ *p = x
+ return p
+}
+func (x ReceiveRequest_Flags) String() string {
+ return proto.EnumName(ReceiveRequest_Flags_name, int32(x))
+}
+func (x *ReceiveRequest_Flags) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ReceiveRequest_Flags_value, data, "ReceiveRequest_Flags")
+ if err != nil {
+ return err
+ }
+ *x = ReceiveRequest_Flags(value)
+ return nil
+}
+func (ReceiveRequest_Flags) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{27, 0}
+}
+
+type PollEvent_PollEventFlag int32
+
+const (
+ PollEvent_SOCKET_POLLNONE PollEvent_PollEventFlag = 0
+ PollEvent_SOCKET_POLLIN PollEvent_PollEventFlag = 1
+ PollEvent_SOCKET_POLLPRI PollEvent_PollEventFlag = 2
+ PollEvent_SOCKET_POLLOUT PollEvent_PollEventFlag = 4
+ PollEvent_SOCKET_POLLERR PollEvent_PollEventFlag = 8
+ PollEvent_SOCKET_POLLHUP PollEvent_PollEventFlag = 16
+ PollEvent_SOCKET_POLLNVAL PollEvent_PollEventFlag = 32
+ PollEvent_SOCKET_POLLRDNORM PollEvent_PollEventFlag = 64
+ PollEvent_SOCKET_POLLRDBAND PollEvent_PollEventFlag = 128
+ PollEvent_SOCKET_POLLWRNORM PollEvent_PollEventFlag = 256
+ PollEvent_SOCKET_POLLWRBAND PollEvent_PollEventFlag = 512
+ PollEvent_SOCKET_POLLMSG PollEvent_PollEventFlag = 1024
+ PollEvent_SOCKET_POLLREMOVE PollEvent_PollEventFlag = 4096
+ PollEvent_SOCKET_POLLRDHUP PollEvent_PollEventFlag = 8192
+)
+
+var PollEvent_PollEventFlag_name = map[int32]string{
+ 0: "SOCKET_POLLNONE",
+ 1: "SOCKET_POLLIN",
+ 2: "SOCKET_POLLPRI",
+ 4: "SOCKET_POLLOUT",
+ 8: "SOCKET_POLLERR",
+ 16: "SOCKET_POLLHUP",
+ 32: "SOCKET_POLLNVAL",
+ 64: "SOCKET_POLLRDNORM",
+ 128: "SOCKET_POLLRDBAND",
+ 256: "SOCKET_POLLWRNORM",
+ 512: "SOCKET_POLLWRBAND",
+ 1024: "SOCKET_POLLMSG",
+ 4096: "SOCKET_POLLREMOVE",
+ 8192: "SOCKET_POLLRDHUP",
+}
+var PollEvent_PollEventFlag_value = map[string]int32{
+ "SOCKET_POLLNONE": 0,
+ "SOCKET_POLLIN": 1,
+ "SOCKET_POLLPRI": 2,
+ "SOCKET_POLLOUT": 4,
+ "SOCKET_POLLERR": 8,
+ "SOCKET_POLLHUP": 16,
+ "SOCKET_POLLNVAL": 32,
+ "SOCKET_POLLRDNORM": 64,
+ "SOCKET_POLLRDBAND": 128,
+ "SOCKET_POLLWRNORM": 256,
+ "SOCKET_POLLWRBAND": 512,
+ "SOCKET_POLLMSG": 1024,
+ "SOCKET_POLLREMOVE": 4096,
+ "SOCKET_POLLRDHUP": 8192,
+}
+
+func (x PollEvent_PollEventFlag) Enum() *PollEvent_PollEventFlag {
+ p := new(PollEvent_PollEventFlag)
+ *p = x
+ return p
+}
+func (x PollEvent_PollEventFlag) String() string {
+ return proto.EnumName(PollEvent_PollEventFlag_name, int32(x))
+}
+func (x *PollEvent_PollEventFlag) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(PollEvent_PollEventFlag_value, data, "PollEvent_PollEventFlag")
+ if err != nil {
+ return err
+ }
+ *x = PollEvent_PollEventFlag(value)
+ return nil
+}
+func (PollEvent_PollEventFlag) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{29, 0}
+}
+
+type ResolveReply_ErrorCode int32
+
+const (
+ ResolveReply_SOCKET_EAI_ADDRFAMILY ResolveReply_ErrorCode = 1
+ ResolveReply_SOCKET_EAI_AGAIN ResolveReply_ErrorCode = 2
+ ResolveReply_SOCKET_EAI_BADFLAGS ResolveReply_ErrorCode = 3
+ ResolveReply_SOCKET_EAI_FAIL ResolveReply_ErrorCode = 4
+ ResolveReply_SOCKET_EAI_FAMILY ResolveReply_ErrorCode = 5
+ ResolveReply_SOCKET_EAI_MEMORY ResolveReply_ErrorCode = 6
+ ResolveReply_SOCKET_EAI_NODATA ResolveReply_ErrorCode = 7
+ ResolveReply_SOCKET_EAI_NONAME ResolveReply_ErrorCode = 8
+ ResolveReply_SOCKET_EAI_SERVICE ResolveReply_ErrorCode = 9
+ ResolveReply_SOCKET_EAI_SOCKTYPE ResolveReply_ErrorCode = 10
+ ResolveReply_SOCKET_EAI_SYSTEM ResolveReply_ErrorCode = 11
+ ResolveReply_SOCKET_EAI_BADHINTS ResolveReply_ErrorCode = 12
+ ResolveReply_SOCKET_EAI_PROTOCOL ResolveReply_ErrorCode = 13
+ ResolveReply_SOCKET_EAI_OVERFLOW ResolveReply_ErrorCode = 14
+ ResolveReply_SOCKET_EAI_MAX ResolveReply_ErrorCode = 15
+)
+
+var ResolveReply_ErrorCode_name = map[int32]string{
+ 1: "SOCKET_EAI_ADDRFAMILY",
+ 2: "SOCKET_EAI_AGAIN",
+ 3: "SOCKET_EAI_BADFLAGS",
+ 4: "SOCKET_EAI_FAIL",
+ 5: "SOCKET_EAI_FAMILY",
+ 6: "SOCKET_EAI_MEMORY",
+ 7: "SOCKET_EAI_NODATA",
+ 8: "SOCKET_EAI_NONAME",
+ 9: "SOCKET_EAI_SERVICE",
+ 10: "SOCKET_EAI_SOCKTYPE",
+ 11: "SOCKET_EAI_SYSTEM",
+ 12: "SOCKET_EAI_BADHINTS",
+ 13: "SOCKET_EAI_PROTOCOL",
+ 14: "SOCKET_EAI_OVERFLOW",
+ 15: "SOCKET_EAI_MAX",
+}
+var ResolveReply_ErrorCode_value = map[string]int32{
+ "SOCKET_EAI_ADDRFAMILY": 1,
+ "SOCKET_EAI_AGAIN": 2,
+ "SOCKET_EAI_BADFLAGS": 3,
+ "SOCKET_EAI_FAIL": 4,
+ "SOCKET_EAI_FAMILY": 5,
+ "SOCKET_EAI_MEMORY": 6,
+ "SOCKET_EAI_NODATA": 7,
+ "SOCKET_EAI_NONAME": 8,
+ "SOCKET_EAI_SERVICE": 9,
+ "SOCKET_EAI_SOCKTYPE": 10,
+ "SOCKET_EAI_SYSTEM": 11,
+ "SOCKET_EAI_BADHINTS": 12,
+ "SOCKET_EAI_PROTOCOL": 13,
+ "SOCKET_EAI_OVERFLOW": 14,
+ "SOCKET_EAI_MAX": 15,
+}
+
+func (x ResolveReply_ErrorCode) Enum() *ResolveReply_ErrorCode {
+ p := new(ResolveReply_ErrorCode)
+ *p = x
+ return p
+}
+func (x ResolveReply_ErrorCode) String() string {
+ return proto.EnumName(ResolveReply_ErrorCode_name, int32(x))
+}
+func (x *ResolveReply_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ResolveReply_ErrorCode_value, data, "ResolveReply_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = ResolveReply_ErrorCode(value)
+ return nil
+}
+func (ResolveReply_ErrorCode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{33, 0}
+}
+
+type RemoteSocketServiceError struct {
+ SystemError *int32 `protobuf:"varint,1,opt,name=system_error,json=systemError,def=0" json:"system_error,omitempty"`
+ ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail,json=errorDetail" json:"error_detail,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *RemoteSocketServiceError) Reset() { *m = RemoteSocketServiceError{} }
+func (m *RemoteSocketServiceError) String() string { return proto.CompactTextString(m) }
+func (*RemoteSocketServiceError) ProtoMessage() {}
+func (*RemoteSocketServiceError) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{0}
+}
+func (m *RemoteSocketServiceError) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_RemoteSocketServiceError.Unmarshal(m, b)
+}
+func (m *RemoteSocketServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_RemoteSocketServiceError.Marshal(b, m, deterministic)
+}
+func (dst *RemoteSocketServiceError) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RemoteSocketServiceError.Merge(dst, src)
+}
+func (m *RemoteSocketServiceError) XXX_Size() int {
+ return xxx_messageInfo_RemoteSocketServiceError.Size(m)
+}
+func (m *RemoteSocketServiceError) XXX_DiscardUnknown() {
+ xxx_messageInfo_RemoteSocketServiceError.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RemoteSocketServiceError proto.InternalMessageInfo
+
+const Default_RemoteSocketServiceError_SystemError int32 = 0
+
+func (m *RemoteSocketServiceError) GetSystemError() int32 {
+ if m != nil && m.SystemError != nil {
+ return *m.SystemError
+ }
+ return Default_RemoteSocketServiceError_SystemError
+}
+
+func (m *RemoteSocketServiceError) GetErrorDetail() string {
+ if m != nil && m.ErrorDetail != nil {
+ return *m.ErrorDetail
+ }
+ return ""
+}
+
+type AddressPort struct {
+ Port *int32 `protobuf:"varint,1,req,name=port" json:"port,omitempty"`
+ PackedAddress []byte `protobuf:"bytes,2,opt,name=packed_address,json=packedAddress" json:"packed_address,omitempty"`
+ HostnameHint *string `protobuf:"bytes,3,opt,name=hostname_hint,json=hostnameHint" json:"hostname_hint,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AddressPort) Reset() { *m = AddressPort{} }
+func (m *AddressPort) String() string { return proto.CompactTextString(m) }
+func (*AddressPort) ProtoMessage() {}
+func (*AddressPort) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{1}
+}
+func (m *AddressPort) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AddressPort.Unmarshal(m, b)
+}
+func (m *AddressPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AddressPort.Marshal(b, m, deterministic)
+}
+func (dst *AddressPort) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AddressPort.Merge(dst, src)
+}
+func (m *AddressPort) XXX_Size() int {
+ return xxx_messageInfo_AddressPort.Size(m)
+}
+func (m *AddressPort) XXX_DiscardUnknown() {
+ xxx_messageInfo_AddressPort.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AddressPort proto.InternalMessageInfo
+
+func (m *AddressPort) GetPort() int32 {
+ if m != nil && m.Port != nil {
+ return *m.Port
+ }
+ return 0
+}
+
+func (m *AddressPort) GetPackedAddress() []byte {
+ if m != nil {
+ return m.PackedAddress
+ }
+ return nil
+}
+
+func (m *AddressPort) GetHostnameHint() string {
+ if m != nil && m.HostnameHint != nil {
+ return *m.HostnameHint
+ }
+ return ""
+}
+
+type CreateSocketRequest struct {
+ Family *CreateSocketRequest_SocketFamily `protobuf:"varint,1,req,name=family,enum=appengine.CreateSocketRequest_SocketFamily" json:"family,omitempty"`
+ Protocol *CreateSocketRequest_SocketProtocol `protobuf:"varint,2,req,name=protocol,enum=appengine.CreateSocketRequest_SocketProtocol" json:"protocol,omitempty"`
+ SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options,json=socketOptions" json:"socket_options,omitempty"`
+ ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"`
+ ListenBacklog *int32 `protobuf:"varint,5,opt,name=listen_backlog,json=listenBacklog,def=0" json:"listen_backlog,omitempty"`
+ RemoteIp *AddressPort `protobuf:"bytes,6,opt,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"`
+ AppId *string `protobuf:"bytes,9,opt,name=app_id,json=appId" json:"app_id,omitempty"`
+ ProjectId *int64 `protobuf:"varint,10,opt,name=project_id,json=projectId" json:"project_id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CreateSocketRequest) Reset() { *m = CreateSocketRequest{} }
+func (m *CreateSocketRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateSocketRequest) ProtoMessage() {}
+func (*CreateSocketRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{2}
+}
+func (m *CreateSocketRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CreateSocketRequest.Unmarshal(m, b)
+}
+func (m *CreateSocketRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CreateSocketRequest.Marshal(b, m, deterministic)
+}
+func (dst *CreateSocketRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CreateSocketRequest.Merge(dst, src)
+}
+func (m *CreateSocketRequest) XXX_Size() int {
+ return xxx_messageInfo_CreateSocketRequest.Size(m)
+}
+func (m *CreateSocketRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_CreateSocketRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateSocketRequest proto.InternalMessageInfo
+
+const Default_CreateSocketRequest_ListenBacklog int32 = 0
+
+func (m *CreateSocketRequest) GetFamily() CreateSocketRequest_SocketFamily {
+ if m != nil && m.Family != nil {
+ return *m.Family
+ }
+ return CreateSocketRequest_IPv4
+}
+
+func (m *CreateSocketRequest) GetProtocol() CreateSocketRequest_SocketProtocol {
+ if m != nil && m.Protocol != nil {
+ return *m.Protocol
+ }
+ return CreateSocketRequest_TCP
+}
+
+func (m *CreateSocketRequest) GetSocketOptions() []*SocketOption {
+ if m != nil {
+ return m.SocketOptions
+ }
+ return nil
+}
+
+func (m *CreateSocketRequest) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+func (m *CreateSocketRequest) GetListenBacklog() int32 {
+ if m != nil && m.ListenBacklog != nil {
+ return *m.ListenBacklog
+ }
+ return Default_CreateSocketRequest_ListenBacklog
+}
+
+func (m *CreateSocketRequest) GetRemoteIp() *AddressPort {
+ if m != nil {
+ return m.RemoteIp
+ }
+ return nil
+}
+
+func (m *CreateSocketRequest) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *CreateSocketRequest) GetProjectId() int64 {
+ if m != nil && m.ProjectId != nil {
+ return *m.ProjectId
+ }
+ return 0
+}
+
+type CreateSocketReply struct {
+ SocketDescriptor *string `protobuf:"bytes,1,opt,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"`
+ ServerAddress *AddressPort `protobuf:"bytes,3,opt,name=server_address,json=serverAddress" json:"server_address,omitempty"`
+ ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CreateSocketReply) Reset() { *m = CreateSocketReply{} }
+func (m *CreateSocketReply) String() string { return proto.CompactTextString(m) }
+func (*CreateSocketReply) ProtoMessage() {}
+func (*CreateSocketReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{3}
+}
+
+var extRange_CreateSocketReply = []proto.ExtensionRange{
+ {Start: 1000, End: 536870911},
+}
+
+func (*CreateSocketReply) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_CreateSocketReply
+}
+func (m *CreateSocketReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CreateSocketReply.Unmarshal(m, b)
+}
+func (m *CreateSocketReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CreateSocketReply.Marshal(b, m, deterministic)
+}
+func (dst *CreateSocketReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CreateSocketReply.Merge(dst, src)
+}
+func (m *CreateSocketReply) XXX_Size() int {
+ return xxx_messageInfo_CreateSocketReply.Size(m)
+}
+func (m *CreateSocketReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_CreateSocketReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateSocketReply proto.InternalMessageInfo
+
+func (m *CreateSocketReply) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *CreateSocketReply) GetServerAddress() *AddressPort {
+ if m != nil {
+ return m.ServerAddress
+ }
+ return nil
+}
+
+func (m *CreateSocketReply) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+type BindRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"`
+ ProxyExternalIp *AddressPort `protobuf:"bytes,2,req,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *BindRequest) Reset() { *m = BindRequest{} }
+func (m *BindRequest) String() string { return proto.CompactTextString(m) }
+func (*BindRequest) ProtoMessage() {}
+func (*BindRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{4}
+}
+func (m *BindRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_BindRequest.Unmarshal(m, b)
+}
+func (m *BindRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_BindRequest.Marshal(b, m, deterministic)
+}
+func (dst *BindRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BindRequest.Merge(dst, src)
+}
+func (m *BindRequest) XXX_Size() int {
+ return xxx_messageInfo_BindRequest.Size(m)
+}
+func (m *BindRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_BindRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BindRequest proto.InternalMessageInfo
+
+func (m *BindRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *BindRequest) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+type BindReply struct {
+ ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *BindReply) Reset() { *m = BindReply{} }
+func (m *BindReply) String() string { return proto.CompactTextString(m) }
+func (*BindReply) ProtoMessage() {}
+func (*BindReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{5}
+}
+func (m *BindReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_BindReply.Unmarshal(m, b)
+}
+func (m *BindReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_BindReply.Marshal(b, m, deterministic)
+}
+func (dst *BindReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BindReply.Merge(dst, src)
+}
+func (m *BindReply) XXX_Size() int {
+ return xxx_messageInfo_BindReply.Size(m)
+}
+func (m *BindReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_BindReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BindReply proto.InternalMessageInfo
+
+func (m *BindReply) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+type GetSocketNameRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetSocketNameRequest) Reset() { *m = GetSocketNameRequest{} }
+func (m *GetSocketNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetSocketNameRequest) ProtoMessage() {}
+func (*GetSocketNameRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{6}
+}
+func (m *GetSocketNameRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetSocketNameRequest.Unmarshal(m, b)
+}
+func (m *GetSocketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetSocketNameRequest.Marshal(b, m, deterministic)
+}
+func (dst *GetSocketNameRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetSocketNameRequest.Merge(dst, src)
+}
+func (m *GetSocketNameRequest) XXX_Size() int {
+ return xxx_messageInfo_GetSocketNameRequest.Size(m)
+}
+func (m *GetSocketNameRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetSocketNameRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetSocketNameRequest proto.InternalMessageInfo
+
+func (m *GetSocketNameRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+type GetSocketNameReply struct {
+ ProxyExternalIp *AddressPort `protobuf:"bytes,2,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetSocketNameReply) Reset() { *m = GetSocketNameReply{} }
+func (m *GetSocketNameReply) String() string { return proto.CompactTextString(m) }
+func (*GetSocketNameReply) ProtoMessage() {}
+func (*GetSocketNameReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{7}
+}
+func (m *GetSocketNameReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetSocketNameReply.Unmarshal(m, b)
+}
+func (m *GetSocketNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetSocketNameReply.Marshal(b, m, deterministic)
+}
+func (dst *GetSocketNameReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetSocketNameReply.Merge(dst, src)
+}
+func (m *GetSocketNameReply) XXX_Size() int {
+ return xxx_messageInfo_GetSocketNameReply.Size(m)
+}
+func (m *GetSocketNameReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetSocketNameReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetSocketNameReply proto.InternalMessageInfo
+
+func (m *GetSocketNameReply) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+type GetPeerNameRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetPeerNameRequest) Reset() { *m = GetPeerNameRequest{} }
+func (m *GetPeerNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetPeerNameRequest) ProtoMessage() {}
+func (*GetPeerNameRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{8}
+}
+func (m *GetPeerNameRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetPeerNameRequest.Unmarshal(m, b)
+}
+func (m *GetPeerNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetPeerNameRequest.Marshal(b, m, deterministic)
+}
+func (dst *GetPeerNameRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetPeerNameRequest.Merge(dst, src)
+}
+func (m *GetPeerNameRequest) XXX_Size() int {
+ return xxx_messageInfo_GetPeerNameRequest.Size(m)
+}
+func (m *GetPeerNameRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetPeerNameRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetPeerNameRequest proto.InternalMessageInfo
+
+func (m *GetPeerNameRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+type GetPeerNameReply struct {
+ PeerIp *AddressPort `protobuf:"bytes,2,opt,name=peer_ip,json=peerIp" json:"peer_ip,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetPeerNameReply) Reset() { *m = GetPeerNameReply{} }
+func (m *GetPeerNameReply) String() string { return proto.CompactTextString(m) }
+func (*GetPeerNameReply) ProtoMessage() {}
+func (*GetPeerNameReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{9}
+}
+func (m *GetPeerNameReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetPeerNameReply.Unmarshal(m, b)
+}
+func (m *GetPeerNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetPeerNameReply.Marshal(b, m, deterministic)
+}
+func (dst *GetPeerNameReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetPeerNameReply.Merge(dst, src)
+}
+func (m *GetPeerNameReply) XXX_Size() int {
+ return xxx_messageInfo_GetPeerNameReply.Size(m)
+}
+func (m *GetPeerNameReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetPeerNameReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetPeerNameReply proto.InternalMessageInfo
+
+func (m *GetPeerNameReply) GetPeerIp() *AddressPort {
+ if m != nil {
+ return m.PeerIp
+ }
+ return nil
+}
+
+type SocketOption struct {
+ Level *SocketOption_SocketOptionLevel `protobuf:"varint,1,req,name=level,enum=appengine.SocketOption_SocketOptionLevel" json:"level,omitempty"`
+ Option *SocketOption_SocketOptionName `protobuf:"varint,2,req,name=option,enum=appengine.SocketOption_SocketOptionName" json:"option,omitempty"`
+ Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SocketOption) Reset() { *m = SocketOption{} }
+func (m *SocketOption) String() string { return proto.CompactTextString(m) }
+func (*SocketOption) ProtoMessage() {}
+func (*SocketOption) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{10}
+}
+func (m *SocketOption) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SocketOption.Unmarshal(m, b)
+}
+func (m *SocketOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SocketOption.Marshal(b, m, deterministic)
+}
+func (dst *SocketOption) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SocketOption.Merge(dst, src)
+}
+func (m *SocketOption) XXX_Size() int {
+ return xxx_messageInfo_SocketOption.Size(m)
+}
+func (m *SocketOption) XXX_DiscardUnknown() {
+ xxx_messageInfo_SocketOption.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SocketOption proto.InternalMessageInfo
+
+func (m *SocketOption) GetLevel() SocketOption_SocketOptionLevel {
+ if m != nil && m.Level != nil {
+ return *m.Level
+ }
+ return SocketOption_SOCKET_SOL_IP
+}
+
+func (m *SocketOption) GetOption() SocketOption_SocketOptionName {
+ if m != nil && m.Option != nil {
+ return *m.Option
+ }
+ return SocketOption_SOCKET_SO_DEBUG
+}
+
+func (m *SocketOption) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type SetSocketOptionsRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"`
+ Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SetSocketOptionsRequest) Reset() { *m = SetSocketOptionsRequest{} }
+func (m *SetSocketOptionsRequest) String() string { return proto.CompactTextString(m) }
+func (*SetSocketOptionsRequest) ProtoMessage() {}
+func (*SetSocketOptionsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{11}
+}
+func (m *SetSocketOptionsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SetSocketOptionsRequest.Unmarshal(m, b)
+}
+func (m *SetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SetSocketOptionsRequest.Marshal(b, m, deterministic)
+}
+func (dst *SetSocketOptionsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SetSocketOptionsRequest.Merge(dst, src)
+}
+func (m *SetSocketOptionsRequest) XXX_Size() int {
+ return xxx_messageInfo_SetSocketOptionsRequest.Size(m)
+}
+func (m *SetSocketOptionsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SetSocketOptionsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetSocketOptionsRequest proto.InternalMessageInfo
+
+func (m *SetSocketOptionsRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *SetSocketOptionsRequest) GetOptions() []*SocketOption {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+type SetSocketOptionsReply struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SetSocketOptionsReply) Reset() { *m = SetSocketOptionsReply{} }
+func (m *SetSocketOptionsReply) String() string { return proto.CompactTextString(m) }
+func (*SetSocketOptionsReply) ProtoMessage() {}
+func (*SetSocketOptionsReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{12}
+}
+func (m *SetSocketOptionsReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SetSocketOptionsReply.Unmarshal(m, b)
+}
+func (m *SetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SetSocketOptionsReply.Marshal(b, m, deterministic)
+}
+func (dst *SetSocketOptionsReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SetSocketOptionsReply.Merge(dst, src)
+}
+func (m *SetSocketOptionsReply) XXX_Size() int {
+ return xxx_messageInfo_SetSocketOptionsReply.Size(m)
+}
+func (m *SetSocketOptionsReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_SetSocketOptionsReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetSocketOptionsReply proto.InternalMessageInfo
+
+type GetSocketOptionsRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"`
+ Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetSocketOptionsRequest) Reset() { *m = GetSocketOptionsRequest{} }
+func (m *GetSocketOptionsRequest) String() string { return proto.CompactTextString(m) }
+func (*GetSocketOptionsRequest) ProtoMessage() {}
+func (*GetSocketOptionsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{13}
+}
+func (m *GetSocketOptionsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetSocketOptionsRequest.Unmarshal(m, b)
+}
+func (m *GetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetSocketOptionsRequest.Marshal(b, m, deterministic)
+}
+func (dst *GetSocketOptionsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetSocketOptionsRequest.Merge(dst, src)
+}
+func (m *GetSocketOptionsRequest) XXX_Size() int {
+ return xxx_messageInfo_GetSocketOptionsRequest.Size(m)
+}
+func (m *GetSocketOptionsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetSocketOptionsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetSocketOptionsRequest proto.InternalMessageInfo
+
+func (m *GetSocketOptionsRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *GetSocketOptionsRequest) GetOptions() []*SocketOption {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+type GetSocketOptionsReply struct {
+ Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetSocketOptionsReply) Reset() { *m = GetSocketOptionsReply{} }
+func (m *GetSocketOptionsReply) String() string { return proto.CompactTextString(m) }
+func (*GetSocketOptionsReply) ProtoMessage() {}
+func (*GetSocketOptionsReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{14}
+}
+func (m *GetSocketOptionsReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetSocketOptionsReply.Unmarshal(m, b)
+}
+func (m *GetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetSocketOptionsReply.Marshal(b, m, deterministic)
+}
+func (dst *GetSocketOptionsReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetSocketOptionsReply.Merge(dst, src)
+}
+func (m *GetSocketOptionsReply) XXX_Size() int {
+ return xxx_messageInfo_GetSocketOptionsReply.Size(m)
+}
+func (m *GetSocketOptionsReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetSocketOptionsReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetSocketOptionsReply proto.InternalMessageInfo
+
+func (m *GetSocketOptionsReply) GetOptions() []*SocketOption {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+type ConnectRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"`
+ RemoteIp *AddressPort `protobuf:"bytes,2,req,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"`
+ TimeoutSeconds *float64 `protobuf:"fixed64,3,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ConnectRequest) Reset() { *m = ConnectRequest{} }
+func (m *ConnectRequest) String() string { return proto.CompactTextString(m) }
+func (*ConnectRequest) ProtoMessage() {}
+func (*ConnectRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{15}
+}
+func (m *ConnectRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ConnectRequest.Unmarshal(m, b)
+}
+func (m *ConnectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ConnectRequest.Marshal(b, m, deterministic)
+}
+func (dst *ConnectRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConnectRequest.Merge(dst, src)
+}
+func (m *ConnectRequest) XXX_Size() int {
+ return xxx_messageInfo_ConnectRequest.Size(m)
+}
+func (m *ConnectRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConnectRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConnectRequest proto.InternalMessageInfo
+
+const Default_ConnectRequest_TimeoutSeconds float64 = -1
+
+func (m *ConnectRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *ConnectRequest) GetRemoteIp() *AddressPort {
+ if m != nil {
+ return m.RemoteIp
+ }
+ return nil
+}
+
+func (m *ConnectRequest) GetTimeoutSeconds() float64 {
+ if m != nil && m.TimeoutSeconds != nil {
+ return *m.TimeoutSeconds
+ }
+ return Default_ConnectRequest_TimeoutSeconds
+}
+
+type ConnectReply struct {
+ ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ConnectReply) Reset() { *m = ConnectReply{} }
+func (m *ConnectReply) String() string { return proto.CompactTextString(m) }
+func (*ConnectReply) ProtoMessage() {}
+func (*ConnectReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{16}
+}
+
+var extRange_ConnectReply = []proto.ExtensionRange{
+ {Start: 1000, End: 536870911},
+}
+
+func (*ConnectReply) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_ConnectReply
+}
+func (m *ConnectReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ConnectReply.Unmarshal(m, b)
+}
+func (m *ConnectReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ConnectReply.Marshal(b, m, deterministic)
+}
+func (dst *ConnectReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConnectReply.Merge(dst, src)
+}
+func (m *ConnectReply) XXX_Size() int {
+ return xxx_messageInfo_ConnectReply.Size(m)
+}
+func (m *ConnectReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConnectReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConnectReply proto.InternalMessageInfo
+
+func (m *ConnectReply) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+type ListenRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"`
+ Backlog *int32 `protobuf:"varint,2,req,name=backlog" json:"backlog,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListenRequest) Reset() { *m = ListenRequest{} }
+func (m *ListenRequest) String() string { return proto.CompactTextString(m) }
+func (*ListenRequest) ProtoMessage() {}
+func (*ListenRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{17}
+}
+func (m *ListenRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListenRequest.Unmarshal(m, b)
+}
+func (m *ListenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListenRequest.Marshal(b, m, deterministic)
+}
+func (dst *ListenRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListenRequest.Merge(dst, src)
+}
+func (m *ListenRequest) XXX_Size() int {
+ return xxx_messageInfo_ListenRequest.Size(m)
+}
+func (m *ListenRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListenRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListenRequest proto.InternalMessageInfo
+
+func (m *ListenRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *ListenRequest) GetBacklog() int32 {
+ if m != nil && m.Backlog != nil {
+ return *m.Backlog
+ }
+ return 0
+}
+
+type ListenReply struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListenReply) Reset() { *m = ListenReply{} }
+func (m *ListenReply) String() string { return proto.CompactTextString(m) }
+func (*ListenReply) ProtoMessage() {}
+func (*ListenReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{18}
+}
+func (m *ListenReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListenReply.Unmarshal(m, b)
+}
+func (m *ListenReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListenReply.Marshal(b, m, deterministic)
+}
+func (dst *ListenReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListenReply.Merge(dst, src)
+}
+func (m *ListenReply) XXX_Size() int {
+ return xxx_messageInfo_ListenReply.Size(m)
+}
+func (m *ListenReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListenReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListenReply proto.InternalMessageInfo
+
+type AcceptRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"`
+ TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AcceptRequest) Reset() { *m = AcceptRequest{} }
+func (m *AcceptRequest) String() string { return proto.CompactTextString(m) }
+func (*AcceptRequest) ProtoMessage() {}
+func (*AcceptRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{19}
+}
+func (m *AcceptRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AcceptRequest.Unmarshal(m, b)
+}
+func (m *AcceptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AcceptRequest.Marshal(b, m, deterministic)
+}
+func (dst *AcceptRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AcceptRequest.Merge(dst, src)
+}
+func (m *AcceptRequest) XXX_Size() int {
+ return xxx_messageInfo_AcceptRequest.Size(m)
+}
+func (m *AcceptRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AcceptRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AcceptRequest proto.InternalMessageInfo
+
+const Default_AcceptRequest_TimeoutSeconds float64 = -1
+
+func (m *AcceptRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *AcceptRequest) GetTimeoutSeconds() float64 {
+ if m != nil && m.TimeoutSeconds != nil {
+ return *m.TimeoutSeconds
+ }
+ return Default_AcceptRequest_TimeoutSeconds
+}
+
+type AcceptReply struct {
+ NewSocketDescriptor []byte `protobuf:"bytes,2,opt,name=new_socket_descriptor,json=newSocketDescriptor" json:"new_socket_descriptor,omitempty"`
+ RemoteAddress *AddressPort `protobuf:"bytes,3,opt,name=remote_address,json=remoteAddress" json:"remote_address,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AcceptReply) Reset() { *m = AcceptReply{} }
+func (m *AcceptReply) String() string { return proto.CompactTextString(m) }
+func (*AcceptReply) ProtoMessage() {}
+func (*AcceptReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{20}
+}
+func (m *AcceptReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AcceptReply.Unmarshal(m, b)
+}
+func (m *AcceptReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AcceptReply.Marshal(b, m, deterministic)
+}
+func (dst *AcceptReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AcceptReply.Merge(dst, src)
+}
+func (m *AcceptReply) XXX_Size() int {
+ return xxx_messageInfo_AcceptReply.Size(m)
+}
+func (m *AcceptReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_AcceptReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AcceptReply proto.InternalMessageInfo
+
+func (m *AcceptReply) GetNewSocketDescriptor() []byte {
+ if m != nil {
+ return m.NewSocketDescriptor
+ }
+ return nil
+}
+
+func (m *AcceptReply) GetRemoteAddress() *AddressPort {
+ if m != nil {
+ return m.RemoteAddress
+ }
+ return nil
+}
+
+type ShutDownRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"`
+ How *ShutDownRequest_How `protobuf:"varint,2,req,name=how,enum=appengine.ShutDownRequest_How" json:"how,omitempty"`
+ SendOffset *int64 `protobuf:"varint,3,req,name=send_offset,json=sendOffset" json:"send_offset,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ShutDownRequest) Reset() { *m = ShutDownRequest{} }
+func (m *ShutDownRequest) String() string { return proto.CompactTextString(m) }
+func (*ShutDownRequest) ProtoMessage() {}
+func (*ShutDownRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{21}
+}
+func (m *ShutDownRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ShutDownRequest.Unmarshal(m, b)
+}
+func (m *ShutDownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ShutDownRequest.Marshal(b, m, deterministic)
+}
+func (dst *ShutDownRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ShutDownRequest.Merge(dst, src)
+}
+func (m *ShutDownRequest) XXX_Size() int {
+ return xxx_messageInfo_ShutDownRequest.Size(m)
+}
+func (m *ShutDownRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ShutDownRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ShutDownRequest proto.InternalMessageInfo
+
+func (m *ShutDownRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *ShutDownRequest) GetHow() ShutDownRequest_How {
+ if m != nil && m.How != nil {
+ return *m.How
+ }
+ return ShutDownRequest_SOCKET_SHUT_RD
+}
+
+func (m *ShutDownRequest) GetSendOffset() int64 {
+ if m != nil && m.SendOffset != nil {
+ return *m.SendOffset
+ }
+ return 0
+}
+
+type ShutDownReply struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ShutDownReply) Reset() { *m = ShutDownReply{} }
+func (m *ShutDownReply) String() string { return proto.CompactTextString(m) }
+func (*ShutDownReply) ProtoMessage() {}
+func (*ShutDownReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{22}
+}
+func (m *ShutDownReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ShutDownReply.Unmarshal(m, b)
+}
+func (m *ShutDownReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ShutDownReply.Marshal(b, m, deterministic)
+}
+func (dst *ShutDownReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ShutDownReply.Merge(dst, src)
+}
+func (m *ShutDownReply) XXX_Size() int {
+ return xxx_messageInfo_ShutDownReply.Size(m)
+}
+func (m *ShutDownReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_ShutDownReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ShutDownReply proto.InternalMessageInfo
+
+type CloseRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"`
+ SendOffset *int64 `protobuf:"varint,2,opt,name=send_offset,json=sendOffset,def=-1" json:"send_offset,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CloseRequest) Reset() { *m = CloseRequest{} }
+func (m *CloseRequest) String() string { return proto.CompactTextString(m) }
+func (*CloseRequest) ProtoMessage() {}
+func (*CloseRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{23}
+}
+func (m *CloseRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CloseRequest.Unmarshal(m, b)
+}
+func (m *CloseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CloseRequest.Marshal(b, m, deterministic)
+}
+func (dst *CloseRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CloseRequest.Merge(dst, src)
+}
+func (m *CloseRequest) XXX_Size() int {
+ return xxx_messageInfo_CloseRequest.Size(m)
+}
+func (m *CloseRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_CloseRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CloseRequest proto.InternalMessageInfo
+
+const Default_CloseRequest_SendOffset int64 = -1
+
+func (m *CloseRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *CloseRequest) GetSendOffset() int64 {
+ if m != nil && m.SendOffset != nil {
+ return *m.SendOffset
+ }
+ return Default_CloseRequest_SendOffset
+}
+
+type CloseReply struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CloseReply) Reset() { *m = CloseReply{} }
+func (m *CloseReply) String() string { return proto.CompactTextString(m) }
+func (*CloseReply) ProtoMessage() {}
+func (*CloseReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{24}
+}
+func (m *CloseReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CloseReply.Unmarshal(m, b)
+}
+func (m *CloseReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CloseReply.Marshal(b, m, deterministic)
+}
+func (dst *CloseReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CloseReply.Merge(dst, src)
+}
+func (m *CloseReply) XXX_Size() int {
+ return xxx_messageInfo_CloseReply.Size(m)
+}
+func (m *CloseReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_CloseReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CloseReply proto.InternalMessageInfo
+
+type SendRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"`
+ Data []byte `protobuf:"bytes,2,req,name=data" json:"data,omitempty"`
+ StreamOffset *int64 `protobuf:"varint,3,req,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"`
+ Flags *int32 `protobuf:"varint,4,opt,name=flags,def=0" json:"flags,omitempty"`
+ SendTo *AddressPort `protobuf:"bytes,5,opt,name=send_to,json=sendTo" json:"send_to,omitempty"`
+ TimeoutSeconds *float64 `protobuf:"fixed64,6,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SendRequest) Reset() { *m = SendRequest{} }
+func (m *SendRequest) String() string { return proto.CompactTextString(m) }
+func (*SendRequest) ProtoMessage() {}
+func (*SendRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{25}
+}
+func (m *SendRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SendRequest.Unmarshal(m, b)
+}
+func (m *SendRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SendRequest.Marshal(b, m, deterministic)
+}
+func (dst *SendRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SendRequest.Merge(dst, src)
+}
+func (m *SendRequest) XXX_Size() int {
+ return xxx_messageInfo_SendRequest.Size(m)
+}
+func (m *SendRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SendRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SendRequest proto.InternalMessageInfo
+
+const Default_SendRequest_Flags int32 = 0
+const Default_SendRequest_TimeoutSeconds float64 = -1
+
+func (m *SendRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *SendRequest) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *SendRequest) GetStreamOffset() int64 {
+ if m != nil && m.StreamOffset != nil {
+ return *m.StreamOffset
+ }
+ return 0
+}
+
+func (m *SendRequest) GetFlags() int32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return Default_SendRequest_Flags
+}
+
+func (m *SendRequest) GetSendTo() *AddressPort {
+ if m != nil {
+ return m.SendTo
+ }
+ return nil
+}
+
+func (m *SendRequest) GetTimeoutSeconds() float64 {
+ if m != nil && m.TimeoutSeconds != nil {
+ return *m.TimeoutSeconds
+ }
+ return Default_SendRequest_TimeoutSeconds
+}
+
+type SendReply struct {
+ DataSent *int32 `protobuf:"varint,1,opt,name=data_sent,json=dataSent" json:"data_sent,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SendReply) Reset() { *m = SendReply{} }
+func (m *SendReply) String() string { return proto.CompactTextString(m) }
+func (*SendReply) ProtoMessage() {}
+func (*SendReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{26}
+}
+func (m *SendReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SendReply.Unmarshal(m, b)
+}
+func (m *SendReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SendReply.Marshal(b, m, deterministic)
+}
+func (dst *SendReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SendReply.Merge(dst, src)
+}
+func (m *SendReply) XXX_Size() int {
+ return xxx_messageInfo_SendReply.Size(m)
+}
+func (m *SendReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_SendReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SendReply proto.InternalMessageInfo
+
+func (m *SendReply) GetDataSent() int32 {
+ if m != nil && m.DataSent != nil {
+ return *m.DataSent
+ }
+ return 0
+}
+
+type ReceiveRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"`
+ DataSize *int32 `protobuf:"varint,2,req,name=data_size,json=dataSize" json:"data_size,omitempty"`
+ Flags *int32 `protobuf:"varint,3,opt,name=flags,def=0" json:"flags,omitempty"`
+ TimeoutSeconds *float64 `protobuf:"fixed64,5,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ReceiveRequest) Reset() { *m = ReceiveRequest{} }
+func (m *ReceiveRequest) String() string { return proto.CompactTextString(m) }
+func (*ReceiveRequest) ProtoMessage() {}
+func (*ReceiveRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{27}
+}
+func (m *ReceiveRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ReceiveRequest.Unmarshal(m, b)
+}
+func (m *ReceiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ReceiveRequest.Marshal(b, m, deterministic)
+}
+func (dst *ReceiveRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ReceiveRequest.Merge(dst, src)
+}
+func (m *ReceiveRequest) XXX_Size() int {
+ return xxx_messageInfo_ReceiveRequest.Size(m)
+}
+func (m *ReceiveRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ReceiveRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReceiveRequest proto.InternalMessageInfo
+
+const Default_ReceiveRequest_Flags int32 = 0
+const Default_ReceiveRequest_TimeoutSeconds float64 = -1
+
+func (m *ReceiveRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *ReceiveRequest) GetDataSize() int32 {
+ if m != nil && m.DataSize != nil {
+ return *m.DataSize
+ }
+ return 0
+}
+
+func (m *ReceiveRequest) GetFlags() int32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return Default_ReceiveRequest_Flags
+}
+
+func (m *ReceiveRequest) GetTimeoutSeconds() float64 {
+ if m != nil && m.TimeoutSeconds != nil {
+ return *m.TimeoutSeconds
+ }
+ return Default_ReceiveRequest_TimeoutSeconds
+}
+
+type ReceiveReply struct {
+ StreamOffset *int64 `protobuf:"varint,2,opt,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"`
+ Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
+ ReceivedFrom *AddressPort `protobuf:"bytes,4,opt,name=received_from,json=receivedFrom" json:"received_from,omitempty"`
+ BufferSize *int32 `protobuf:"varint,5,opt,name=buffer_size,json=bufferSize" json:"buffer_size,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ReceiveReply) Reset() { *m = ReceiveReply{} }
+func (m *ReceiveReply) String() string { return proto.CompactTextString(m) }
+func (*ReceiveReply) ProtoMessage() {}
+func (*ReceiveReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{28}
+}
+func (m *ReceiveReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ReceiveReply.Unmarshal(m, b)
+}
+func (m *ReceiveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ReceiveReply.Marshal(b, m, deterministic)
+}
+func (dst *ReceiveReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ReceiveReply.Merge(dst, src)
+}
+func (m *ReceiveReply) XXX_Size() int {
+ return xxx_messageInfo_ReceiveReply.Size(m)
+}
+func (m *ReceiveReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_ReceiveReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReceiveReply proto.InternalMessageInfo
+
+func (m *ReceiveReply) GetStreamOffset() int64 {
+ if m != nil && m.StreamOffset != nil {
+ return *m.StreamOffset
+ }
+ return 0
+}
+
+func (m *ReceiveReply) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *ReceiveReply) GetReceivedFrom() *AddressPort {
+ if m != nil {
+ return m.ReceivedFrom
+ }
+ return nil
+}
+
+func (m *ReceiveReply) GetBufferSize() int32 {
+ if m != nil && m.BufferSize != nil {
+ return *m.BufferSize
+ }
+ return 0
+}
+
+type PollEvent struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"`
+ RequestedEvents *int32 `protobuf:"varint,2,req,name=requested_events,json=requestedEvents" json:"requested_events,omitempty"`
+ ObservedEvents *int32 `protobuf:"varint,3,req,name=observed_events,json=observedEvents" json:"observed_events,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PollEvent) Reset() { *m = PollEvent{} }
+func (m *PollEvent) String() string { return proto.CompactTextString(m) }
+func (*PollEvent) ProtoMessage() {}
+func (*PollEvent) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{29}
+}
+func (m *PollEvent) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PollEvent.Unmarshal(m, b)
+}
+func (m *PollEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PollEvent.Marshal(b, m, deterministic)
+}
+func (dst *PollEvent) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PollEvent.Merge(dst, src)
+}
+func (m *PollEvent) XXX_Size() int {
+ return xxx_messageInfo_PollEvent.Size(m)
+}
+func (m *PollEvent) XXX_DiscardUnknown() {
+ xxx_messageInfo_PollEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PollEvent proto.InternalMessageInfo
+
+func (m *PollEvent) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *PollEvent) GetRequestedEvents() int32 {
+ if m != nil && m.RequestedEvents != nil {
+ return *m.RequestedEvents
+ }
+ return 0
+}
+
+func (m *PollEvent) GetObservedEvents() int32 {
+ if m != nil && m.ObservedEvents != nil {
+ return *m.ObservedEvents
+ }
+ return 0
+}
+
+type PollRequest struct {
+ Events []*PollEvent `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"`
+ TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PollRequest) Reset() { *m = PollRequest{} }
+func (m *PollRequest) String() string { return proto.CompactTextString(m) }
+func (*PollRequest) ProtoMessage() {}
+func (*PollRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{30}
+}
+func (m *PollRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PollRequest.Unmarshal(m, b)
+}
+func (m *PollRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PollRequest.Marshal(b, m, deterministic)
+}
+func (dst *PollRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PollRequest.Merge(dst, src)
+}
+func (m *PollRequest) XXX_Size() int {
+ return xxx_messageInfo_PollRequest.Size(m)
+}
+func (m *PollRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_PollRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PollRequest proto.InternalMessageInfo
+
+const Default_PollRequest_TimeoutSeconds float64 = -1
+
+func (m *PollRequest) GetEvents() []*PollEvent {
+ if m != nil {
+ return m.Events
+ }
+ return nil
+}
+
+func (m *PollRequest) GetTimeoutSeconds() float64 {
+ if m != nil && m.TimeoutSeconds != nil {
+ return *m.TimeoutSeconds
+ }
+ return Default_PollRequest_TimeoutSeconds
+}
+
+type PollReply struct {
+ Events []*PollEvent `protobuf:"bytes,2,rep,name=events" json:"events,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PollReply) Reset() { *m = PollReply{} }
+func (m *PollReply) String() string { return proto.CompactTextString(m) }
+func (*PollReply) ProtoMessage() {}
+func (*PollReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{31}
+}
+func (m *PollReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PollReply.Unmarshal(m, b)
+}
+func (m *PollReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PollReply.Marshal(b, m, deterministic)
+}
+func (dst *PollReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PollReply.Merge(dst, src)
+}
+func (m *PollReply) XXX_Size() int {
+ return xxx_messageInfo_PollReply.Size(m)
+}
+func (m *PollReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_PollReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PollReply proto.InternalMessageInfo
+
+func (m *PollReply) GetEvents() []*PollEvent {
+ if m != nil {
+ return m.Events
+ }
+ return nil
+}
+
+type ResolveRequest struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ AddressFamilies []CreateSocketRequest_SocketFamily `protobuf:"varint,2,rep,name=address_families,json=addressFamilies,enum=appengine.CreateSocketRequest_SocketFamily" json:"address_families,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ResolveRequest) Reset() { *m = ResolveRequest{} }
+func (m *ResolveRequest) String() string { return proto.CompactTextString(m) }
+func (*ResolveRequest) ProtoMessage() {}
+func (*ResolveRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{32}
+}
+func (m *ResolveRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ResolveRequest.Unmarshal(m, b)
+}
+func (m *ResolveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ResolveRequest.Marshal(b, m, deterministic)
+}
+func (dst *ResolveRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResolveRequest.Merge(dst, src)
+}
+func (m *ResolveRequest) XXX_Size() int {
+ return xxx_messageInfo_ResolveRequest.Size(m)
+}
+func (m *ResolveRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResolveRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResolveRequest proto.InternalMessageInfo
+
+func (m *ResolveRequest) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *ResolveRequest) GetAddressFamilies() []CreateSocketRequest_SocketFamily {
+ if m != nil {
+ return m.AddressFamilies
+ }
+ return nil
+}
+
+type ResolveReply struct {
+ PackedAddress [][]byte `protobuf:"bytes,2,rep,name=packed_address,json=packedAddress" json:"packed_address,omitempty"`
+ CanonicalName *string `protobuf:"bytes,3,opt,name=canonical_name,json=canonicalName" json:"canonical_name,omitempty"`
+ Aliases []string `protobuf:"bytes,4,rep,name=aliases" json:"aliases,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ResolveReply) Reset() { *m = ResolveReply{} }
+func (m *ResolveReply) String() string { return proto.CompactTextString(m) }
+func (*ResolveReply) ProtoMessage() {}
+func (*ResolveReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_socket_service_b5f8f233dc327808, []int{33}
+}
+func (m *ResolveReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ResolveReply.Unmarshal(m, b)
+}
+func (m *ResolveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ResolveReply.Marshal(b, m, deterministic)
+}
+func (dst *ResolveReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResolveReply.Merge(dst, src)
+}
+func (m *ResolveReply) XXX_Size() int {
+ return xxx_messageInfo_ResolveReply.Size(m)
+}
+func (m *ResolveReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResolveReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResolveReply proto.InternalMessageInfo
+
+func (m *ResolveReply) GetPackedAddress() [][]byte {
+ if m != nil {
+ return m.PackedAddress
+ }
+ return nil
+}
+
+func (m *ResolveReply) GetCanonicalName() string {
+ if m != nil && m.CanonicalName != nil {
+ return *m.CanonicalName
+ }
+ return ""
+}
+
+func (m *ResolveReply) GetAliases() []string {
+ if m != nil {
+ return m.Aliases
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*RemoteSocketServiceError)(nil), "appengine.RemoteSocketServiceError")
+ proto.RegisterType((*AddressPort)(nil), "appengine.AddressPort")
+ proto.RegisterType((*CreateSocketRequest)(nil), "appengine.CreateSocketRequest")
+ proto.RegisterType((*CreateSocketReply)(nil), "appengine.CreateSocketReply")
+ proto.RegisterType((*BindRequest)(nil), "appengine.BindRequest")
+ proto.RegisterType((*BindReply)(nil), "appengine.BindReply")
+ proto.RegisterType((*GetSocketNameRequest)(nil), "appengine.GetSocketNameRequest")
+ proto.RegisterType((*GetSocketNameReply)(nil), "appengine.GetSocketNameReply")
+ proto.RegisterType((*GetPeerNameRequest)(nil), "appengine.GetPeerNameRequest")
+ proto.RegisterType((*GetPeerNameReply)(nil), "appengine.GetPeerNameReply")
+ proto.RegisterType((*SocketOption)(nil), "appengine.SocketOption")
+ proto.RegisterType((*SetSocketOptionsRequest)(nil), "appengine.SetSocketOptionsRequest")
+ proto.RegisterType((*SetSocketOptionsReply)(nil), "appengine.SetSocketOptionsReply")
+ proto.RegisterType((*GetSocketOptionsRequest)(nil), "appengine.GetSocketOptionsRequest")
+ proto.RegisterType((*GetSocketOptionsReply)(nil), "appengine.GetSocketOptionsReply")
+ proto.RegisterType((*ConnectRequest)(nil), "appengine.ConnectRequest")
+ proto.RegisterType((*ConnectReply)(nil), "appengine.ConnectReply")
+ proto.RegisterType((*ListenRequest)(nil), "appengine.ListenRequest")
+ proto.RegisterType((*ListenReply)(nil), "appengine.ListenReply")
+ proto.RegisterType((*AcceptRequest)(nil), "appengine.AcceptRequest")
+ proto.RegisterType((*AcceptReply)(nil), "appengine.AcceptReply")
+ proto.RegisterType((*ShutDownRequest)(nil), "appengine.ShutDownRequest")
+ proto.RegisterType((*ShutDownReply)(nil), "appengine.ShutDownReply")
+ proto.RegisterType((*CloseRequest)(nil), "appengine.CloseRequest")
+ proto.RegisterType((*CloseReply)(nil), "appengine.CloseReply")
+ proto.RegisterType((*SendRequest)(nil), "appengine.SendRequest")
+ proto.RegisterType((*SendReply)(nil), "appengine.SendReply")
+ proto.RegisterType((*ReceiveRequest)(nil), "appengine.ReceiveRequest")
+ proto.RegisterType((*ReceiveReply)(nil), "appengine.ReceiveReply")
+ proto.RegisterType((*PollEvent)(nil), "appengine.PollEvent")
+ proto.RegisterType((*PollRequest)(nil), "appengine.PollRequest")
+ proto.RegisterType((*PollReply)(nil), "appengine.PollReply")
+ proto.RegisterType((*ResolveRequest)(nil), "appengine.ResolveRequest")
+ proto.RegisterType((*ResolveReply)(nil), "appengine.ResolveReply")
+}
+
+func init() {
+ proto.RegisterFile("google.golang.org/appengine/internal/socket/socket_service.proto", fileDescriptor_socket_service_b5f8f233dc327808)
+}
+
+var fileDescriptor_socket_service_b5f8f233dc327808 = []byte{
+ // 3088 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0x5f, 0x77, 0xe3, 0xc6,
+ 0x75, 0x37, 0x48, 0xfd, 0xe3, 0x90, 0x94, 0xee, 0x62, 0xa5, 0x5d, 0x25, 0x6e, 0x12, 0x05, 0x8e,
+ 0x1b, 0x25, 0x8e, 0x77, 0x6d, 0x39, 0x4d, 0x9b, 0xa4, 0x49, 0x16, 0x04, 0x86, 0x24, 0x4c, 0x00,
+ 0x03, 0xcd, 0x0c, 0x25, 0xd1, 0x6d, 0x8a, 0xd0, 0x22, 0xa4, 0x65, 0x4c, 0x11, 0x0c, 0xc9, 0xdd,
+ 0xf5, 0xba, 0x69, 0xaa, 0xfe, 0x39, 0xfd, 0x12, 0x7d, 0xe8, 0x73, 0x3f, 0x43, 0x4f, 0x4f, 0x5f,
+ 0xfa, 0xec, 0xc7, 0x7e, 0x84, 0x9e, 0xbe, 0xb4, 0x9f, 0xa1, 0x67, 0x06, 0xe0, 0x60, 0xc8, 0xd5,
+ 0xae, 0x77, 0x75, 0x72, 0x4e, 0x9e, 0xa4, 0xfb, 0xbb, 0x77, 0xee, 0xff, 0x99, 0xb9, 0x03, 0xa2,
+ 0x47, 0x97, 0x69, 0x7a, 0x39, 0x4a, 0x1e, 0x5c, 0xa6, 0xa3, 0xfe, 0xf8, 0xf2, 0x41, 0x3a, 0xbd,
+ 0x7c, 0xd8, 0x9f, 0x4c, 0x92, 0xf1, 0xe5, 0x70, 0x9c, 0x3c, 0x1c, 0x8e, 0xe7, 0xc9, 0x74, 0xdc,
+ 0x1f, 0x3d, 0x9c, 0xa5, 0xe7, 0x9f, 0x25, 0xf3, 0xfc, 0x4f, 0x3c, 0x4b, 0xa6, 0x4f, 0x87, 0xe7,
+ 0xc9, 0x83, 0xc9, 0x34, 0x9d, 0xa7, 0x66, 0x45, 0xc9, 0x5b, 0xff, 0xbc, 0x8b, 0xf6, 0x69, 0x72,
+ 0x95, 0xce, 0x13, 0x26, 0x25, 0x59, 0x26, 0x88, 0xa7, 0xd3, 0x74, 0x6a, 0x7e, 0x07, 0xd5, 0x66,
+ 0xcf, 0x67, 0xf3, 0xe4, 0x2a, 0x4e, 0x04, 0xbd, 0x6f, 0x1c, 0x18, 0x87, 0xeb, 0x3f, 0x31, 0x3e,
+ 0xa0, 0xd5, 0x0c, 0xce, 0xa4, 0xbe, 0x8d, 0x6a, 0x92, 0x1d, 0x0f, 0x92, 0x79, 0x7f, 0x38, 0xda,
+ 0x2f, 0x1d, 0x18, 0x87, 0x15, 0x5a, 0x95, 0x98, 0x2b, 0x21, 0xeb, 0x73, 0x54, 0x91, 0xb2, 0x4e,
+ 0x3a, 0x48, 0x4c, 0x40, 0x35, 0xd6, 0x63, 0x1c, 0x07, 0x31, 0xa6, 0x94, 0x50, 0x30, 0xcc, 0x3a,
+ 0xaa, 0xb4, 0x6c, 0x2f, 0x27, 0x4b, 0x66, 0x15, 0x6d, 0x36, 0x6d, 0xcf, 0xef, 0x52, 0x0c, 0x6b,
+ 0xe6, 0x1e, 0xba, 0x13, 0x61, 0x1a, 0x78, 0x8c, 0x79, 0x24, 0x8c, 0x5d, 0x1c, 0x7a, 0xd8, 0x85,
+ 0x75, 0xf3, 0x2e, 0xda, 0xf1, 0xc2, 0x13, 0xdb, 0xf7, 0xdc, 0x98, 0xe2, 0xe3, 0x2e, 0x66, 0x1c,
+ 0x36, 0xcc, 0x3b, 0xa8, 0xce, 0x88, 0xd3, 0xc1, 0x3c, 0x76, 0x7c, 0xc2, 0xb0, 0x0b, 0x9b, 0xd6,
+ 0xbf, 0x99, 0xa8, 0xca, 0x34, 0x67, 0x77, 0x50, 0x95, 0xf5, 0x58, 0xcc, 0xba, 0x8e, 0x83, 0x19,
+ 0x83, 0xb7, 0x84, 0x6d, 0x01, 0x60, 0x61, 0x04, 0x0c, 0x73, 0x1b, 0x21, 0x49, 0x86, 0x04, 0x87,
+ 0x1c, 0x4a, 0x8a, 0xcd, 0xa8, 0xd3, 0x86, 0xb2, 0x22, 0xbd, 0x90, 0x53, 0x58, 0x13, 0x9e, 0x66,
+ 0x24, 0x81, 0x75, 0xc5, 0x0b, 0xcf, 0x3c, 0x02, 0x1b, 0x8a, 0x3c, 0x6a, 0x78, 0x2d, 0xd8, 0x5c,
+ 0x18, 0x16, 0x8a, 0xcf, 0xb0, 0x03, 0x5b, 0x8a, 0xdf, 0xb0, 0xdd, 0x26, 0x54, 0x94, 0x61, 0xa7,
+ 0xed, 0xf9, 0x2e, 0x20, 0x45, 0xdb, 0x2d, 0xdb, 0x0b, 0xa1, 0x2a, 0x02, 0x96, 0xf4, 0x29, 0xe9,
+ 0xfa, 0x6e, 0xc3, 0x27, 0x4e, 0x07, 0xaa, 0x9a, 0xb7, 0x01, 0x0e, 0xa0, 0x56, 0x2c, 0x12, 0xd1,
+ 0x41, 0x5d, 0xd1, 0x4d, 0xbb, 0xeb, 0x73, 0xd8, 0xd6, 0x9c, 0xe0, 0x0d, 0xbf, 0x03, 0x3b, 0x85,
+ 0x13, 0x5d, 0xd6, 0x03, 0x50, 0xf2, 0xf8, 0xcc, 0x63, 0x1c, 0xee, 0x28, 0xf6, 0x99, 0x8b, 0x4f,
+ 0xc0, 0xd4, 0xcc, 0x09, 0xfa, 0xae, 0xae, 0xce, 0xf5, 0x28, 0xec, 0x2a, 0x01, 0x8f, 0x09, 0x7a,
+ 0xaf, 0xa0, 0x45, 0xa9, 0xe0, 0x5e, 0xa1, 0xa0, 0xe9, 0xf9, 0x18, 0xee, 0x2b, 0x3a, 0x90, 0xf4,
+ 0xbe, 0x66, 0x80, 0xf3, 0x1e, 0x7c, 0x4d, 0x19, 0xe0, 0x67, 0xbc, 0xc1, 0x7a, 0xf0, 0x75, 0xe5,
+ 0x50, 0x53, 0x24, 0xf5, 0x6d, 0x4d, 0x9e, 0x45, 0x0e, 0xfc, 0x91, 0xa2, 0x59, 0xe4, 0x45, 0x18,
+ 0xbe, 0xa1, 0xc4, 0x29, 0x69, 0x32, 0xf8, 0x66, 0x61, 0xce, 0xf7, 0xc2, 0x0e, 0x7c, 0xab, 0xa8,
+ 0xbd, 0x90, 0x3e, 0x30, 0x6b, 0x68, 0x4b, 0x92, 0x2e, 0x09, 0xe0, 0xdb, 0x4a, 0x98, 0xda, 0x61,
+ 0x0b, 0x83, 0xa5, 0x7c, 0x71, 0xb1, 0xed, 0xfa, 0x1d, 0x78, 0x47, 0x76, 0x9b, 0x02, 0x44, 0x3d,
+ 0xde, 0x31, 0x77, 0x11, 0x64, 0xfe, 0xd8, 0x01, 0xe6, 0x84, 0xf8, 0x24, 0x6c, 0xc1, 0x77, 0x34,
+ 0x2f, 0x7d, 0xa7, 0x03, 0xef, 0xea, 0x5e, 0xf7, 0x18, 0xfc, 0xb1, 0x52, 0x14, 0x12, 0x8e, 0x83,
+ 0x88, 0xf7, 0xe0, 0xbb, 0xca, 0x33, 0x9f, 0x90, 0x08, 0x0e, 0xf5, 0x3a, 0xb3, 0x16, 0x7c, 0xbf,
+ 0x68, 0x43, 0x97, 0x06, 0xf0, 0x9e, 0xd6, 0x3b, 0x34, 0x6c, 0xc1, 0x0f, 0xf2, 0x1d, 0x16, 0x63,
+ 0xff, 0x28, 0x64, 0xbd, 0xd0, 0x81, 0xf7, 0x95, 0x84, 0xff, 0x51, 0xdb, 0xe7, 0xf0, 0x40, 0xa3,
+ 0x29, 0xe3, 0xf0, 0xb0, 0xa0, 0x43, 0xa1, 0xe1, 0x03, 0x15, 0x6c, 0x37, 0xb4, 0xb9, 0xd3, 0x86,
+ 0x0f, 0x35, 0x0f, 0x1c, 0xe6, 0xc1, 0x51, 0xb1, 0xe0, 0x48, 0x28, 0xfc, 0x48, 0xef, 0x66, 0x0c,
+ 0x3f, 0xd4, 0x49, 0x0a, 0x7f, 0xa2, 0xa4, 0xcf, 0x9a, 0x5d, 0xdf, 0x87, 0x1f, 0x69, 0xda, 0xec,
+ 0x90, 0xc0, 0x9f, 0x2a, 0x73, 0x42, 0xfc, 0xd8, 0x81, 0x3f, 0xd3, 0x01, 0xe6, 0x73, 0xf8, 0xb1,
+ 0x5a, 0xd1, 0x68, 0x92, 0x90, 0xc3, 0x4f, 0xf5, 0x1c, 0x72, 0x0a, 0x7f, 0xae, 0xb5, 0xa2, 0x6b,
+ 0x73, 0x1b, 0x7e, 0xa6, 0x3c, 0xe0, 0x5e, 0x80, 0xe1, 0xe7, 0xc5, 0xe6, 0x24, 0x8c, 0xc2, 0x2f,
+ 0xb4, 0xe5, 0x21, 0xe6, 0xf0, 0x48, 0xa3, 0xa3, 0x4e, 0x0b, 0x6c, 0xa5, 0x8e, 0xe2, 0x80, 0x70,
+ 0x0c, 0x0d, 0x4d, 0xbf, 0xec, 0x1d, 0x47, 0x35, 0x8b, 0xed, 0x9e, 0x80, 0x5b, 0x34, 0x1e, 0x0d,
+ 0x42, 0x0e, 0x58, 0x99, 0x73, 0x48, 0x10, 0x40, 0x53, 0xb1, 0x23, 0x4a, 0x38, 0x81, 0x96, 0xaa,
+ 0x78, 0xd0, 0xf5, 0xb9, 0xd7, 0x26, 0x11, 0xb4, 0x8b, 0xf6, 0x22, 0xdc, 0x25, 0x1c, 0x3c, 0x3d,
+ 0x05, 0xa2, 0xe8, 0x1f, 0xab, 0x45, 0xe4, 0x04, 0xd3, 0xa6, 0x4f, 0x4e, 0xa1, 0xa3, 0x0a, 0x1d,
+ 0x12, 0xde, 0x0d, 0xbd, 0x63, 0xf0, 0x8b, 0x3c, 0xd9, 0x6e, 0xd3, 0x85, 0x40, 0x0f, 0xc4, 0x69,
+ 0xb7, 0x20, 0x54, 0x80, 0xef, 0x35, 0x6c, 0xc7, 0x01, 0xa2, 0x03, 0x0d, 0xdb, 0x85, 0x48, 0x07,
+ 0x98, 0x13, 0xc2, 0xb1, 0x0e, 0x04, 0xf6, 0x19, 0xd0, 0xa2, 0xbf, 0xbc, 0x86, 0x3c, 0xcc, 0x58,
+ 0xb1, 0xd1, 0x7d, 0x86, 0x8f, 0x81, 0x2b, 0x09, 0x8a, 0x19, 0xb7, 0x29, 0x87, 0xae, 0x42, 0x18,
+ 0xa7, 0x72, 0xbb, 0x9d, 0xa8, 0x35, 0x5d, 0x86, 0x29, 0x83, 0x53, 0x3d, 0x18, 0x71, 0x8a, 0xc3,
+ 0x99, 0xda, 0x4e, 0xae, 0xd0, 0xe2, 0xba, 0x94, 0xe2, 0x63, 0xe8, 0x29, 0xb9, 0x80, 0xb5, 0x98,
+ 0xf7, 0x09, 0x86, 0x4f, 0x4c, 0x13, 0x6d, 0x17, 0xe9, 0xe5, 0xbd, 0x08, 0xc3, 0x5f, 0xa8, 0xf3,
+ 0x32, 0x24, 0x12, 0x25, 0x11, 0x87, 0xbf, 0x34, 0xef, 0xa3, 0xbb, 0x85, 0x60, 0x48, 0x58, 0x37,
+ 0x8a, 0x08, 0xe5, 0xf0, 0x4b, 0xc5, 0x10, 0x86, 0x79, 0xc1, 0xf8, 0x2b, 0xa5, 0x9a, 0x44, 0xc2,
+ 0xad, 0x6e, 0x14, 0x41, 0xac, 0x1f, 0x7b, 0xac, 0x2b, 0x80, 0x85, 0x9f, 0x51, 0xb3, 0x58, 0xfa,
+ 0x2b, 0x85, 0xda, 0x1a, 0xda, 0x57, 0x0a, 0x45, 0x3c, 0x5e, 0xd8, 0x65, 0x18, 0x3e, 0x15, 0x77,
+ 0x9c, 0xc2, 0x42, 0xc2, 0xed, 0x13, 0xdb, 0xf3, 0xe1, 0xbc, 0x48, 0x08, 0xe6, 0x2e, 0x39, 0x0d,
+ 0x61, 0x50, 0x04, 0x85, 0x79, 0x37, 0xa4, 0xd8, 0x76, 0xda, 0x90, 0x14, 0xc7, 0x07, 0xe6, 0x14,
+ 0x33, 0xcc, 0xe1, 0x42, 0x99, 0x76, 0x48, 0x18, 0xda, 0x0d, 0x42, 0x39, 0x76, 0xe1, 0x52, 0x99,
+ 0x16, 0x68, 0x26, 0xf9, 0x58, 0x8b, 0xa5, 0xd1, 0x6d, 0x32, 0x18, 0x2a, 0xc0, 0x63, 0x42, 0x0c,
+ 0x7e, 0xad, 0x97, 0x45, 0x22, 0x9f, 0x29, 0x83, 0xac, 0xdd, 0xcd, 0x1c, 0x1b, 0x29, 0x83, 0x9c,
+ 0x90, 0xc0, 0x0e, 0x7b, 0x14, 0x37, 0x19, 0x5c, 0x29, 0x41, 0xb1, 0x07, 0x5d, 0xd2, 0xe5, 0x30,
+ 0x5e, 0xf2, 0x8c, 0xe2, 0x66, 0x57, 0xdc, 0xd2, 0xa9, 0x12, 0x6c, 0x13, 0x96, 0x69, 0x9c, 0x28,
+ 0x41, 0x01, 0x2d, 0x62, 0xfd, 0x8d, 0x72, 0xc6, 0xf6, 0x29, 0xb6, 0xdd, 0x1e, 0x4c, 0x55, 0x4a,
+ 0xbc, 0x30, 0xa2, 0xa4, 0x45, 0xc5, 0xa5, 0x3e, 0x2b, 0xb6, 0x23, 0xb7, 0x7d, 0x0c, 0xf3, 0xe2,
+ 0x38, 0x73, 0x7c, 0x6c, 0x87, 0xf0, 0x44, 0x2f, 0x61, 0x68, 0x07, 0xf0, 0xb4, 0x00, 0xb2, 0xe4,
+ 0x3f, 0xd3, 0xae, 0x32, 0x21, 0xf0, 0xb9, 0x72, 0x31, 0x3b, 0x11, 0x3c, 0x02, 0xcf, 0x95, 0x88,
+ 0x7b, 0xdc, 0x25, 0x1c, 0xbe, 0xd0, 0xce, 0xf1, 0x00, 0xbb, 0x5e, 0x37, 0x80, 0xbf, 0x56, 0xde,
+ 0x65, 0x80, 0x6c, 0xcd, 0xdf, 0x2a, 0x39, 0xc7, 0x0e, 0x1d, 0xec, 0x63, 0x17, 0xfe, 0x46, 0x3b,
+ 0x7f, 0x3a, 0xb8, 0x07, 0xbf, 0x53, 0xeb, 0x3a, 0xb8, 0x87, 0xcf, 0x22, 0x8f, 0x62, 0x17, 0xfe,
+ 0xd6, 0xdc, 0x2d, 0x40, 0x8a, 0x4f, 0x48, 0x07, 0xbb, 0x70, 0x6d, 0x98, 0x7b, 0x79, 0xa2, 0x24,
+ 0xfa, 0x31, 0x76, 0x44, 0xad, 0xff, 0xce, 0x30, 0xef, 0x2e, 0x1a, 0xf7, 0x34, 0xc4, 0x54, 0x5c,
+ 0x51, 0xf0, 0xf7, 0x86, 0xb9, 0x9f, 0xb7, 0x79, 0x48, 0x38, 0xc5, 0x8e, 0x38, 0x48, 0xec, 0x86,
+ 0x8f, 0xe1, 0x1f, 0x0c, 0x13, 0x16, 0xe7, 0x44, 0xb3, 0xe3, 0xf9, 0x3e, 0xfc, 0xa3, 0xf1, 0xf5,
+ 0x12, 0x18, 0xd6, 0x15, 0xaa, 0xda, 0x83, 0xc1, 0x34, 0x99, 0xcd, 0xa2, 0x74, 0x3a, 0x37, 0x4d,
+ 0xb4, 0x36, 0x49, 0xa7, 0xf3, 0x7d, 0xe3, 0xa0, 0x74, 0xb8, 0x4e, 0xe5, 0xff, 0xe6, 0xbb, 0x68,
+ 0x7b, 0xd2, 0x3f, 0xff, 0x2c, 0x19, 0xc4, 0xfd, 0x4c, 0x52, 0xce, 0x7f, 0x35, 0x5a, 0xcf, 0xd0,
+ 0x7c, 0xb9, 0xf9, 0x0e, 0xaa, 0x3f, 0x4e, 0x67, 0xf3, 0x71, 0xff, 0x2a, 0x89, 0x1f, 0x0f, 0xc7,
+ 0xf3, 0xfd, 0xb2, 0x9c, 0x12, 0x6b, 0x0b, 0xb0, 0x3d, 0x1c, 0xcf, 0xad, 0x7f, 0x5a, 0x43, 0x77,
+ 0x9d, 0x69, 0xd2, 0x5f, 0x0c, 0xa3, 0x34, 0xf9, 0xcd, 0x93, 0x64, 0x36, 0x37, 0x1d, 0xb4, 0x71,
+ 0xd1, 0xbf, 0x1a, 0x8e, 0x9e, 0x4b, 0xcb, 0xdb, 0x47, 0xef, 0x3d, 0x50, 0x03, 0xec, 0x83, 0x1b,
+ 0xe4, 0x1f, 0x64, 0x54, 0x53, 0x2e, 0xa1, 0xf9, 0x52, 0xd3, 0x43, 0x5b, 0x72, 0xfa, 0x3d, 0x4f,
+ 0xc5, 0x88, 0x2a, 0xd4, 0xbc, 0xff, 0x5a, 0x6a, 0xa2, 0x7c, 0x11, 0x55, 0xcb, 0xcd, 0x9f, 0xa3,
+ 0xed, 0x7c, 0xae, 0x4e, 0x27, 0xf3, 0x61, 0x3a, 0x9e, 0xed, 0x97, 0x0f, 0xca, 0x87, 0xd5, 0xa3,
+ 0xfb, 0x9a, 0xc2, 0x6c, 0x31, 0x91, 0x7c, 0x5a, 0x9f, 0x69, 0xd4, 0xcc, 0x6c, 0xa0, 0x3b, 0x93,
+ 0x69, 0xfa, 0xf9, 0xf3, 0x38, 0xf9, 0x3c, 0x9b, 0xd6, 0xe3, 0xe1, 0x64, 0x7f, 0xed, 0xc0, 0x38,
+ 0xac, 0x1e, 0xdd, 0xd3, 0x54, 0x68, 0xa9, 0xa7, 0x3b, 0x72, 0x01, 0xce, 0xe5, 0xbd, 0x89, 0x79,
+ 0x88, 0xb6, 0x47, 0xc3, 0xd9, 0x3c, 0x19, 0xc7, 0x9f, 0xf6, 0xcf, 0x3f, 0x1b, 0xa5, 0x97, 0xfb,
+ 0xeb, 0x8b, 0xe9, 0xbc, 0x9e, 0x31, 0x1a, 0x19, 0x6e, 0x7e, 0x84, 0x2a, 0x53, 0x39, 0xe1, 0x0b,
+ 0x2b, 0x1b, 0xaf, 0xb4, 0xb2, 0x95, 0x09, 0x7a, 0x13, 0x73, 0x0f, 0x6d, 0xf4, 0x27, 0x93, 0x78,
+ 0x38, 0xd8, 0xaf, 0xc8, 0x42, 0xad, 0xf7, 0x27, 0x13, 0x6f, 0x60, 0x7e, 0x03, 0xa1, 0xc9, 0x34,
+ 0xfd, 0x75, 0x72, 0x3e, 0x17, 0x2c, 0x74, 0x60, 0x1c, 0x96, 0x69, 0x25, 0x47, 0xbc, 0x81, 0x65,
+ 0xa1, 0x9a, 0x9e, 0x7b, 0x73, 0x0b, 0xad, 0x79, 0xd1, 0xd3, 0x1f, 0x82, 0x91, 0xff, 0xf7, 0x23,
+ 0x28, 0x59, 0x16, 0xda, 0x5e, 0x4e, 0xac, 0xb9, 0x89, 0xca, 0xdc, 0x89, 0xc0, 0x10, 0xff, 0x74,
+ 0xdd, 0x08, 0x4a, 0xd6, 0x97, 0x06, 0xba, 0xb3, 0x5c, 0x91, 0xc9, 0xe8, 0xb9, 0xf9, 0x1e, 0xba,
+ 0x93, 0xa7, 0x7d, 0x90, 0xcc, 0xce, 0xa7, 0xc3, 0xc9, 0x3c, 0x7f, 0x93, 0x54, 0x28, 0x64, 0x0c,
+ 0x57, 0xe1, 0xe6, 0xcf, 0xd0, 0xb6, 0x78, 0xf4, 0x24, 0x53, 0xd5, 0x97, 0xe5, 0x57, 0x86, 0x5e,
+ 0xcf, 0xa4, 0x17, 0xfd, 0xfa, 0x7b, 0x28, 0xd1, 0xf7, 0x2b, 0x5b, 0xff, 0xb3, 0x09, 0xd7, 0xd7,
+ 0xd7, 0xd7, 0x25, 0xeb, 0x77, 0xa8, 0xda, 0x18, 0x8e, 0x07, 0x8b, 0x86, 0x7e, 0x49, 0x24, 0xa5,
+ 0x1b, 0x23, 0xb9, 0xd1, 0x15, 0xd1, 0xc1, 0xaf, 0xef, 0x8a, 0x45, 0x50, 0x25, 0xb3, 0x2f, 0xf2,
+ 0x78, 0xa3, 0x42, 0xe3, 0x8d, 0x62, 0xb3, 0x1c, 0xb4, 0xdb, 0x4a, 0xe6, 0x59, 0x75, 0xc2, 0xfe,
+ 0x55, 0x72, 0x9b, 0xc8, 0xac, 0x33, 0x64, 0xae, 0x28, 0x79, 0xa9, 0x7b, 0xa5, 0x37, 0x73, 0xcf,
+ 0x96, 0x9a, 0xa3, 0x24, 0x99, 0xde, 0xda, 0x39, 0x07, 0xc1, 0x92, 0x0a, 0xe1, 0xda, 0x43, 0xb4,
+ 0x39, 0x49, 0x92, 0xe9, 0x57, 0x3b, 0xb4, 0x21, 0xc4, 0xbc, 0x89, 0xf5, 0xe5, 0xe6, 0x62, 0x47,
+ 0x64, 0x7b, 0xdf, 0xfc, 0x05, 0x5a, 0x1f, 0x25, 0x4f, 0x93, 0x51, 0x7e, 0x92, 0x7d, 0xef, 0x25,
+ 0x27, 0xc6, 0x12, 0xe1, 0x8b, 0x05, 0x34, 0x5b, 0x67, 0x3e, 0x42, 0x1b, 0xd9, 0xa1, 0x93, 0x1f,
+ 0x62, 0x87, 0xaf, 0xa3, 0x41, 0x46, 0x90, 0xaf, 0x33, 0x77, 0xd1, 0xfa, 0xd3, 0xfe, 0xe8, 0x49,
+ 0xb2, 0x5f, 0x3e, 0x28, 0x1d, 0xd6, 0x68, 0x46, 0x58, 0x09, 0xba, 0xf3, 0x82, 0x4d, 0xed, 0x41,
+ 0xcd, 0x88, 0x1f, 0x7b, 0x11, 0xbc, 0x25, 0x67, 0x95, 0x02, 0xca, 0xfe, 0x05, 0x43, 0xce, 0x16,
+ 0x05, 0x2c, 0xb6, 0xf3, 0xc6, 0x0a, 0x26, 0x76, 0xf6, 0x1d, 0xeb, 0xdf, 0xd7, 0x11, 0xac, 0x7a,
+ 0x26, 0x6f, 0xbb, 0x85, 0x60, 0xec, 0xe2, 0x46, 0xb7, 0x05, 0x86, 0x1c, 0xc9, 0x14, 0x48, 0xc5,
+ 0x94, 0x28, 0xc6, 0x23, 0x28, 0x2d, 0xa9, 0x8d, 0xe5, 0x95, 0x5a, 0x5e, 0xd6, 0x90, 0x7d, 0x47,
+ 0x58, 0x5b, 0xd6, 0xe0, 0x92, 0x90, 0x53, 0xd2, 0xe5, 0x18, 0xd6, 0x97, 0x19, 0x0d, 0x4a, 0x6c,
+ 0xd7, 0xb1, 0xe5, 0x07, 0x04, 0x31, 0x74, 0x28, 0x06, 0x0b, 0xdd, 0x46, 0xb7, 0x09, 0x9b, 0xcb,
+ 0x28, 0x75, 0x4e, 0x04, 0xba, 0xb5, 0xac, 0xa4, 0x83, 0x71, 0x64, 0xfb, 0xde, 0x09, 0x86, 0xca,
+ 0x32, 0x83, 0x90, 0x86, 0x17, 0xfa, 0x5e, 0x88, 0x01, 0x2d, 0xeb, 0xf1, 0xbd, 0xb0, 0x85, 0x29,
+ 0xd4, 0xcd, 0x7b, 0xc8, 0x5c, 0xd2, 0x2e, 0x86, 0x25, 0x02, 0xbb, 0xcb, 0x38, 0x0b, 0xdd, 0x0c,
+ 0xdf, 0xd3, 0x6a, 0xe2, 0x45, 0x31, 0x27, 0x0c, 0x8c, 0x15, 0x88, 0xfb, 0x50, 0xd2, 0xca, 0xe4,
+ 0x45, 0x71, 0x5b, 0x8c, 0x9a, 0x8e, 0x0f, 0xe5, 0x65, 0x98, 0x44, 0xdc, 0x23, 0x21, 0x83, 0x35,
+ 0xcd, 0x16, 0x77, 0xa2, 0x58, 0x3c, 0xef, 0x7d, 0xbb, 0x07, 0x86, 0x26, 0x2e, 0xf0, 0xc0, 0x3e,
+ 0x63, 0xb8, 0x05, 0x25, 0x2d, 0xdb, 0x02, 0x76, 0x08, 0xed, 0x40, 0x59, 0x0b, 0x5b, 0x80, 0x22,
+ 0x21, 0x9e, 0xeb, 0x63, 0x58, 0x33, 0xf7, 0xd1, 0xee, 0x2a, 0x23, 0xe4, 0x27, 0x3e, 0xac, 0xaf,
+ 0x98, 0x15, 0x1c, 0x27, 0x14, 0x65, 0x58, 0x36, 0x2b, 0x9e, 0xb0, 0x21, 0x87, 0xcd, 0x15, 0xf1,
+ 0x2c, 0x81, 0x47, 0xb0, 0x65, 0xbe, 0x8d, 0xee, 0x6b, 0xb8, 0x8b, 0x9b, 0x98, 0xc6, 0xb6, 0xe3,
+ 0xe0, 0x88, 0x43, 0x65, 0x85, 0x79, 0xea, 0x85, 0x2e, 0x39, 0x8d, 0x1d, 0xdf, 0x0e, 0x22, 0x40,
+ 0x2b, 0x81, 0x78, 0x61, 0x93, 0x40, 0x75, 0x25, 0x90, 0xe3, 0xae, 0xe7, 0x74, 0x6c, 0xa7, 0x03,
+ 0x35, 0x39, 0x11, 0x3d, 0x47, 0xf7, 0xd9, 0xe2, 0xc8, 0xca, 0xaf, 0xf3, 0x5b, 0x1d, 0xea, 0x1f,
+ 0xa2, 0xcd, 0xc5, 0xec, 0x50, 0x7a, 0xf5, 0xec, 0xb0, 0x90, 0xb3, 0xee, 0xa3, 0xbd, 0x17, 0x4d,
+ 0x4f, 0x46, 0xcf, 0x85, 0x4f, 0xad, 0x3f, 0x90, 0x4f, 0x1f, 0xa3, 0xbd, 0xd6, 0x4d, 0x3e, 0xdd,
+ 0x46, 0xd7, 0xbf, 0x18, 0x68, 0xdb, 0x49, 0xc7, 0xe3, 0xe4, 0x7c, 0x7e, 0x2b, 0xf7, 0x97, 0xe6,
+ 0x9c, 0x57, 0xdf, 0x8f, 0xc5, 0x9c, 0xf3, 0x1e, 0xda, 0x99, 0x0f, 0xaf, 0x92, 0xf4, 0xc9, 0x3c,
+ 0x9e, 0x25, 0xe7, 0xe9, 0x78, 0x90, 0xcd, 0x09, 0xc6, 0x4f, 0x4a, 0xef, 0x7f, 0x48, 0xb7, 0x73,
+ 0x16, 0xcb, 0x38, 0xd6, 0x2f, 0x51, 0x4d, 0x39, 0xf8, 0x7b, 0xba, 0x48, 0xf5, 0x21, 0xe1, 0x04,
+ 0xd5, 0x7d, 0x39, 0xb9, 0xdd, 0x2a, 0xfc, 0x7d, 0xb4, 0xb9, 0x98, 0x04, 0x4b, 0x72, 0x3e, 0x5f,
+ 0x90, 0x56, 0x1d, 0x55, 0x17, 0x7a, 0x45, 0xbb, 0x0c, 0x51, 0xdd, 0x3e, 0x3f, 0x4f, 0x26, 0xb7,
+ 0xcb, 0xf2, 0x0d, 0x09, 0x2b, 0xbd, 0x34, 0x61, 0xd7, 0x06, 0xaa, 0x2e, 0x6c, 0x89, 0x84, 0x1d,
+ 0xa1, 0xbd, 0x71, 0xf2, 0x2c, 0x7e, 0xd1, 0x5a, 0xf6, 0x66, 0xb8, 0x3b, 0x4e, 0x9e, 0xb1, 0x1b,
+ 0x06, 0xb9, 0xbc, 0xac, 0xaf, 0x39, 0xc8, 0x65, 0xd2, 0x39, 0x64, 0xfd, 0x97, 0x81, 0x76, 0xd8,
+ 0xe3, 0x27, 0x73, 0x37, 0x7d, 0x76, 0xbb, 0xbc, 0x7e, 0x80, 0xca, 0x8f, 0xd3, 0x67, 0xf9, 0x6d,
+ 0xfb, 0x4d, 0xbd, 0x8b, 0x97, 0xb5, 0x3e, 0x68, 0xa7, 0xcf, 0xa8, 0x10, 0x35, 0xbf, 0x85, 0xaa,
+ 0xb3, 0x64, 0x3c, 0x88, 0xd3, 0x8b, 0x8b, 0x59, 0x32, 0x97, 0xd7, 0x6c, 0x99, 0x22, 0x01, 0x11,
+ 0x89, 0x58, 0x0e, 0x2a, 0xb7, 0xd3, 0x67, 0xfa, 0x45, 0xd6, 0xee, 0xf2, 0x98, 0xba, 0xcb, 0xf7,
+ 0xa8, 0xc0, 0x4e, 0xc5, 0x85, 0xa7, 0xdd, 0x1b, 0x99, 0xdc, 0x29, 0x85, 0xb2, 0xb5, 0x83, 0xea,
+ 0x85, 0x07, 0xa2, 0xae, 0xbf, 0x42, 0x35, 0x67, 0x94, 0xce, 0x6e, 0x35, 0xed, 0x98, 0xef, 0x2c,
+ 0xfb, 0x2c, 0xea, 0x51, 0x96, 0x25, 0xd5, 0xfd, 0xae, 0x21, 0x94, 0x5b, 0x10, 0xf6, 0xfe, 0xcf,
+ 0x40, 0x55, 0x96, 0xdc, 0x72, 0xa8, 0xbd, 0x87, 0xd6, 0x06, 0xfd, 0x79, 0x5f, 0xa6, 0xb5, 0xd6,
+ 0x28, 0x6d, 0x19, 0x54, 0xd2, 0xe2, 0x9d, 0x38, 0x9b, 0x4f, 0x93, 0xfe, 0xd5, 0x72, 0xf6, 0x6a,
+ 0x19, 0x98, 0xf9, 0x61, 0xde, 0x47, 0xeb, 0x17, 0xa3, 0xfe, 0xe5, 0x4c, 0x0e, 0xe4, 0xf2, 0xc9,
+ 0x93, 0xd1, 0x62, 0x3e, 0x93, 0x51, 0xcc, 0x53, 0xf9, 0x1a, 0x7a, 0xc5, 0x7c, 0x26, 0xc4, 0x78,
+ 0x7a, 0x53, 0x37, 0x6f, 0xbc, 0xb4, 0x9b, 0x0f, 0x51, 0x25, 0x8b, 0x57, 0xb4, 0xf2, 0xdb, 0xa8,
+ 0x22, 0x1c, 0x8e, 0x67, 0xc9, 0x78, 0x9e, 0xfd, 0x30, 0x42, 0xb7, 0x04, 0xc0, 0x92, 0xf1, 0xdc,
+ 0xfa, 0x4f, 0x03, 0x6d, 0xd3, 0xe4, 0x3c, 0x19, 0x3e, 0xbd, 0x5d, 0x35, 0x94, 0xf2, 0xe1, 0x17,
+ 0x49, 0xbe, 0x9b, 0x33, 0xe5, 0xc3, 0x2f, 0x92, 0x22, 0xfa, 0xf2, 0x4a, 0xf4, 0x37, 0x04, 0xb3,
+ 0xfe, 0xd2, 0x60, 0x2c, 0xb4, 0xde, 0x94, 0xab, 0xaa, 0x68, 0x33, 0x60, 0x2d, 0x31, 0xa8, 0x80,
+ 0x61, 0xd6, 0xd0, 0x96, 0x20, 0x22, 0x8c, 0x3b, 0x50, 0xb2, 0xfe, 0xd5, 0x40, 0x35, 0x15, 0x86,
+ 0x08, 0xfa, 0x85, 0xea, 0xc8, 0x3e, 0x59, 0xa9, 0xce, 0xa2, 0xb4, 0xc2, 0x3d, 0xbd, 0xb4, 0x3f,
+ 0x45, 0xf5, 0x69, 0xa6, 0x6c, 0x10, 0x5f, 0x4c, 0xd3, 0xab, 0xaf, 0x78, 0x4e, 0xd5, 0x16, 0xc2,
+ 0xcd, 0x69, 0x7a, 0x25, 0xf6, 0xd4, 0xa7, 0x4f, 0x2e, 0x2e, 0x92, 0x69, 0x96, 0x13, 0xf9, 0xd6,
+ 0xa5, 0x28, 0x83, 0x44, 0x56, 0xac, 0x2f, 0xcb, 0xa8, 0x12, 0xa5, 0xa3, 0x11, 0x7e, 0x9a, 0x8c,
+ 0xdf, 0x30, 0xdb, 0xdf, 0x43, 0x30, 0xcd, 0xaa, 0x94, 0x0c, 0xe2, 0x44, 0xac, 0x9f, 0xe5, 0x49,
+ 0xdf, 0x51, 0xb8, 0x54, 0x3b, 0x33, 0xbf, 0x8b, 0x76, 0xd2, 0x4f, 0xe5, 0x4b, 0x51, 0x49, 0x96,
+ 0xa5, 0xe4, 0xf6, 0x02, 0xce, 0x04, 0xad, 0xff, 0x28, 0xa1, 0xba, 0x72, 0x47, 0x24, 0x5a, 0x9b,
+ 0x35, 0x22, 0xe2, 0xfb, 0x21, 0x09, 0x31, 0xbc, 0xa5, 0x4d, 0x6e, 0x02, 0xf4, 0xc2, 0xa5, 0x13,
+ 0x40, 0x40, 0x11, 0xf5, 0x96, 0x46, 0x5e, 0x81, 0x91, 0x2e, 0x87, 0xb5, 0x15, 0x0c, 0x53, 0x0a,
+ 0x5b, 0x2b, 0x58, 0xbb, 0x1b, 0x01, 0xac, 0xda, 0x3d, 0xb1, 0x7d, 0x38, 0xd0, 0x26, 0x2c, 0x01,
+ 0x52, 0x37, 0x24, 0x34, 0x80, 0x47, 0xe6, 0xbd, 0x15, 0xb8, 0x61, 0x87, 0xf2, 0x1b, 0xd3, 0x32,
+ 0x7e, 0x4a, 0xa5, 0xf8, 0x75, 0xe9, 0x05, 0x3c, 0x93, 0x5f, 0x93, 0x1f, 0x9f, 0x0a, 0x3c, 0x60,
+ 0x2d, 0xb8, 0xde, 0x5a, 0x55, 0x8e, 0x03, 0x72, 0x82, 0xe1, 0xfa, 0x40, 0x7e, 0xc0, 0xd2, 0x8d,
+ 0x0a, 0xb7, 0xaf, 0x1f, 0x59, 0x8f, 0x51, 0x55, 0x24, 0x70, 0xb1, 0x7f, 0x7e, 0x80, 0x36, 0xf2,
+ 0x84, 0x1b, 0x72, 0x9e, 0xd8, 0xd5, 0xda, 0x46, 0x25, 0x9a, 0xe6, 0x32, 0x6f, 0x76, 0x4b, 0xfd,
+ 0x38, 0xeb, 0x9c, 0xac, 0xc5, 0x0b, 0x3b, 0xa5, 0xaf, 0xb6, 0x63, 0xfd, 0x56, 0xec, 0xf3, 0x59,
+ 0x3a, 0x2a, 0xf6, 0xb9, 0x89, 0xd6, 0xc6, 0xfd, 0xab, 0x24, 0x6f, 0x36, 0xf9, 0xbf, 0x79, 0x82,
+ 0x20, 0xbf, 0xbb, 0x62, 0xf9, 0x31, 0x6a, 0x98, 0x64, 0xda, 0xdf, 0xf0, 0x4b, 0xd6, 0x4e, 0xae,
+ 0xa4, 0x99, 0xeb, 0xb0, 0xfe, 0xbb, 0x2c, 0xf6, 0x67, 0x6e, 0x5e, 0x38, 0x7f, 0xd3, 0xc7, 0xb8,
+ 0xf2, 0x8b, 0x1f, 0xe3, 0xde, 0x45, 0xdb, 0xe7, 0xfd, 0x71, 0x3a, 0x1e, 0x9e, 0xf7, 0x47, 0xb1,
+ 0xf4, 0x36, 0xfb, 0x1a, 0x57, 0x57, 0xa8, 0x7c, 0x96, 0xed, 0xa3, 0xcd, 0xfe, 0x68, 0xd8, 0x9f,
+ 0x25, 0xe2, 0xa0, 0x2d, 0x1f, 0x56, 0xe8, 0x82, 0xb4, 0xfe, 0xb7, 0xa4, 0xff, 0xa0, 0xfb, 0x35,
+ 0xb4, 0x97, 0x17, 0x10, 0xdb, 0x5e, 0x2c, 0x5e, 0x69, 0x4d, 0x3b, 0xf0, 0x7c, 0xf1, 0x80, 0x28,
+ 0xae, 0x2e, 0xc9, 0x92, 0xbf, 0x65, 0x96, 0xb4, 0x09, 0x5b, 0xa0, 0x0d, 0xdb, 0x6d, 0xfa, 0x76,
+ 0x8b, 0x2d, 0x3d, 0xe3, 0x04, 0xa3, 0x69, 0x7b, 0x7e, 0xf6, 0x0b, 0xf0, 0x12, 0x28, 0x55, 0xaf,
+ 0xaf, 0xc0, 0x01, 0x0e, 0x08, 0xed, 0x2d, 0xbd, 0x1d, 0x04, 0x9c, 0xff, 0x1c, 0xb4, 0xf9, 0x02,
+ 0x1c, 0xda, 0x01, 0x86, 0x2d, 0xed, 0x49, 0x21, 0x60, 0x86, 0xe9, 0x89, 0xe7, 0x2c, 0xbf, 0xe1,
+ 0x24, 0x4e, 0x9c, 0x8e, 0x7c, 0x68, 0xa2, 0x15, 0x3d, 0xd9, 0xef, 0xd8, 0x4b, 0x6f, 0x86, 0x3c,
+ 0xa2, 0xb6, 0x17, 0x72, 0x06, 0xb5, 0x15, 0x86, 0xfc, 0xdd, 0xc1, 0x21, 0x3e, 0xd4, 0x57, 0x18,
+ 0xea, 0x37, 0x9d, 0x6d, 0x6d, 0x0f, 0xcb, 0xb8, 0xec, 0x33, 0xd8, 0x69, 0x6c, 0x7d, 0xb2, 0x91,
+ 0x9d, 0x5a, 0xff, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x31, 0x03, 0x4e, 0xbd, 0xfd, 0x1f, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.proto b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto
new file mode 100644
index 00000000..2fcc7953
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto
@@ -0,0 +1,460 @@
+syntax = "proto2";
+option go_package = "socket";
+
+package appengine;
+
+message RemoteSocketServiceError {
+ enum ErrorCode {
+ SYSTEM_ERROR = 1;
+ GAI_ERROR = 2;
+ FAILURE = 4;
+ PERMISSION_DENIED = 5;
+ INVALID_REQUEST = 6;
+ SOCKET_CLOSED = 7;
+ }
+
+ enum SystemError {
+ option allow_alias = true;
+
+ SYS_SUCCESS = 0;
+ SYS_EPERM = 1;
+ SYS_ENOENT = 2;
+ SYS_ESRCH = 3;
+ SYS_EINTR = 4;
+ SYS_EIO = 5;
+ SYS_ENXIO = 6;
+ SYS_E2BIG = 7;
+ SYS_ENOEXEC = 8;
+ SYS_EBADF = 9;
+ SYS_ECHILD = 10;
+ SYS_EAGAIN = 11;
+ SYS_EWOULDBLOCK = 11;
+ SYS_ENOMEM = 12;
+ SYS_EACCES = 13;
+ SYS_EFAULT = 14;
+ SYS_ENOTBLK = 15;
+ SYS_EBUSY = 16;
+ SYS_EEXIST = 17;
+ SYS_EXDEV = 18;
+ SYS_ENODEV = 19;
+ SYS_ENOTDIR = 20;
+ SYS_EISDIR = 21;
+ SYS_EINVAL = 22;
+ SYS_ENFILE = 23;
+ SYS_EMFILE = 24;
+ SYS_ENOTTY = 25;
+ SYS_ETXTBSY = 26;
+ SYS_EFBIG = 27;
+ SYS_ENOSPC = 28;
+ SYS_ESPIPE = 29;
+ SYS_EROFS = 30;
+ SYS_EMLINK = 31;
+ SYS_EPIPE = 32;
+ SYS_EDOM = 33;
+ SYS_ERANGE = 34;
+ SYS_EDEADLK = 35;
+ SYS_EDEADLOCK = 35;
+ SYS_ENAMETOOLONG = 36;
+ SYS_ENOLCK = 37;
+ SYS_ENOSYS = 38;
+ SYS_ENOTEMPTY = 39;
+ SYS_ELOOP = 40;
+ SYS_ENOMSG = 42;
+ SYS_EIDRM = 43;
+ SYS_ECHRNG = 44;
+ SYS_EL2NSYNC = 45;
+ SYS_EL3HLT = 46;
+ SYS_EL3RST = 47;
+ SYS_ELNRNG = 48;
+ SYS_EUNATCH = 49;
+ SYS_ENOCSI = 50;
+ SYS_EL2HLT = 51;
+ SYS_EBADE = 52;
+ SYS_EBADR = 53;
+ SYS_EXFULL = 54;
+ SYS_ENOANO = 55;
+ SYS_EBADRQC = 56;
+ SYS_EBADSLT = 57;
+ SYS_EBFONT = 59;
+ SYS_ENOSTR = 60;
+ SYS_ENODATA = 61;
+ SYS_ETIME = 62;
+ SYS_ENOSR = 63;
+ SYS_ENONET = 64;
+ SYS_ENOPKG = 65;
+ SYS_EREMOTE = 66;
+ SYS_ENOLINK = 67;
+ SYS_EADV = 68;
+ SYS_ESRMNT = 69;
+ SYS_ECOMM = 70;
+ SYS_EPROTO = 71;
+ SYS_EMULTIHOP = 72;
+ SYS_EDOTDOT = 73;
+ SYS_EBADMSG = 74;
+ SYS_EOVERFLOW = 75;
+ SYS_ENOTUNIQ = 76;
+ SYS_EBADFD = 77;
+ SYS_EREMCHG = 78;
+ SYS_ELIBACC = 79;
+ SYS_ELIBBAD = 80;
+ SYS_ELIBSCN = 81;
+ SYS_ELIBMAX = 82;
+ SYS_ELIBEXEC = 83;
+ SYS_EILSEQ = 84;
+ SYS_ERESTART = 85;
+ SYS_ESTRPIPE = 86;
+ SYS_EUSERS = 87;
+ SYS_ENOTSOCK = 88;
+ SYS_EDESTADDRREQ = 89;
+ SYS_EMSGSIZE = 90;
+ SYS_EPROTOTYPE = 91;
+ SYS_ENOPROTOOPT = 92;
+ SYS_EPROTONOSUPPORT = 93;
+ SYS_ESOCKTNOSUPPORT = 94;
+ SYS_EOPNOTSUPP = 95;
+ SYS_ENOTSUP = 95;
+ SYS_EPFNOSUPPORT = 96;
+ SYS_EAFNOSUPPORT = 97;
+ SYS_EADDRINUSE = 98;
+ SYS_EADDRNOTAVAIL = 99;
+ SYS_ENETDOWN = 100;
+ SYS_ENETUNREACH = 101;
+ SYS_ENETRESET = 102;
+ SYS_ECONNABORTED = 103;
+ SYS_ECONNRESET = 104;
+ SYS_ENOBUFS = 105;
+ SYS_EISCONN = 106;
+ SYS_ENOTCONN = 107;
+ SYS_ESHUTDOWN = 108;
+ SYS_ETOOMANYREFS = 109;
+ SYS_ETIMEDOUT = 110;
+ SYS_ECONNREFUSED = 111;
+ SYS_EHOSTDOWN = 112;
+ SYS_EHOSTUNREACH = 113;
+ SYS_EALREADY = 114;
+ SYS_EINPROGRESS = 115;
+ SYS_ESTALE = 116;
+ SYS_EUCLEAN = 117;
+ SYS_ENOTNAM = 118;
+ SYS_ENAVAIL = 119;
+ SYS_EISNAM = 120;
+ SYS_EREMOTEIO = 121;
+ SYS_EDQUOT = 122;
+ SYS_ENOMEDIUM = 123;
+ SYS_EMEDIUMTYPE = 124;
+ SYS_ECANCELED = 125;
+ SYS_ENOKEY = 126;
+ SYS_EKEYEXPIRED = 127;
+ SYS_EKEYREVOKED = 128;
+ SYS_EKEYREJECTED = 129;
+ SYS_EOWNERDEAD = 130;
+ SYS_ENOTRECOVERABLE = 131;
+ SYS_ERFKILL = 132;
+ }
+
+ optional int32 system_error = 1 [default=0];
+ optional string error_detail = 2;
+}
+
+message AddressPort {
+ required int32 port = 1;
+ optional bytes packed_address = 2;
+
+ optional string hostname_hint = 3;
+}
+
+
+
+message CreateSocketRequest {
+ enum SocketFamily {
+ IPv4 = 1;
+ IPv6 = 2;
+ }
+
+ enum SocketProtocol {
+ TCP = 1;
+ UDP = 2;
+ }
+
+ required SocketFamily family = 1;
+ required SocketProtocol protocol = 2;
+
+ repeated SocketOption socket_options = 3;
+
+ optional AddressPort proxy_external_ip = 4;
+
+ optional int32 listen_backlog = 5 [default=0];
+
+ optional AddressPort remote_ip = 6;
+
+ optional string app_id = 9;
+
+ optional int64 project_id = 10;
+}
+
+message CreateSocketReply {
+ optional string socket_descriptor = 1;
+
+ optional AddressPort server_address = 3;
+
+ optional AddressPort proxy_external_ip = 4;
+
+ extensions 1000 to max;
+}
+
+
+
+message BindRequest {
+ required string socket_descriptor = 1;
+ required AddressPort proxy_external_ip = 2;
+}
+
+message BindReply {
+ optional AddressPort proxy_external_ip = 1;
+}
+
+
+
+message GetSocketNameRequest {
+ required string socket_descriptor = 1;
+}
+
+message GetSocketNameReply {
+ optional AddressPort proxy_external_ip = 2;
+}
+
+
+
+message GetPeerNameRequest {
+ required string socket_descriptor = 1;
+}
+
+message GetPeerNameReply {
+ optional AddressPort peer_ip = 2;
+}
+
+
+message SocketOption {
+
+ enum SocketOptionLevel {
+ SOCKET_SOL_IP = 0;
+ SOCKET_SOL_SOCKET = 1;
+ SOCKET_SOL_TCP = 6;
+ SOCKET_SOL_UDP = 17;
+ }
+
+ enum SocketOptionName {
+ option allow_alias = true;
+
+ SOCKET_SO_DEBUG = 1;
+ SOCKET_SO_REUSEADDR = 2;
+ SOCKET_SO_TYPE = 3;
+ SOCKET_SO_ERROR = 4;
+ SOCKET_SO_DONTROUTE = 5;
+ SOCKET_SO_BROADCAST = 6;
+ SOCKET_SO_SNDBUF = 7;
+ SOCKET_SO_RCVBUF = 8;
+ SOCKET_SO_KEEPALIVE = 9;
+ SOCKET_SO_OOBINLINE = 10;
+ SOCKET_SO_LINGER = 13;
+ SOCKET_SO_RCVTIMEO = 20;
+ SOCKET_SO_SNDTIMEO = 21;
+
+ SOCKET_IP_TOS = 1;
+ SOCKET_IP_TTL = 2;
+ SOCKET_IP_HDRINCL = 3;
+ SOCKET_IP_OPTIONS = 4;
+
+ SOCKET_TCP_NODELAY = 1;
+ SOCKET_TCP_MAXSEG = 2;
+ SOCKET_TCP_CORK = 3;
+ SOCKET_TCP_KEEPIDLE = 4;
+ SOCKET_TCP_KEEPINTVL = 5;
+ SOCKET_TCP_KEEPCNT = 6;
+ SOCKET_TCP_SYNCNT = 7;
+ SOCKET_TCP_LINGER2 = 8;
+ SOCKET_TCP_DEFER_ACCEPT = 9;
+ SOCKET_TCP_WINDOW_CLAMP = 10;
+ SOCKET_TCP_INFO = 11;
+ SOCKET_TCP_QUICKACK = 12;
+ }
+
+ required SocketOptionLevel level = 1;
+ required SocketOptionName option = 2;
+ required bytes value = 3;
+}
+
+
+message SetSocketOptionsRequest {
+ required string socket_descriptor = 1;
+ repeated SocketOption options = 2;
+}
+
+message SetSocketOptionsReply {
+}
+
+message GetSocketOptionsRequest {
+ required string socket_descriptor = 1;
+ repeated SocketOption options = 2;
+}
+
+message GetSocketOptionsReply {
+ repeated SocketOption options = 2;
+}
+
+
+message ConnectRequest {
+ required string socket_descriptor = 1;
+ required AddressPort remote_ip = 2;
+ optional double timeout_seconds = 3 [default=-1];
+}
+
+message ConnectReply {
+ optional AddressPort proxy_external_ip = 1;
+
+ extensions 1000 to max;
+}
+
+
+message ListenRequest {
+ required string socket_descriptor = 1;
+ required int32 backlog = 2;
+}
+
+message ListenReply {
+}
+
+
+message AcceptRequest {
+ required string socket_descriptor = 1;
+ optional double timeout_seconds = 2 [default=-1];
+}
+
+message AcceptReply {
+ optional bytes new_socket_descriptor = 2;
+ optional AddressPort remote_address = 3;
+}
+
+
+
+message ShutDownRequest {
+ enum How {
+ SOCKET_SHUT_RD = 1;
+ SOCKET_SHUT_WR = 2;
+ SOCKET_SHUT_RDWR = 3;
+ }
+ required string socket_descriptor = 1;
+ required How how = 2;
+ required int64 send_offset = 3;
+}
+
+message ShutDownReply {
+}
+
+
+
+message CloseRequest {
+ required string socket_descriptor = 1;
+ optional int64 send_offset = 2 [default=-1];
+}
+
+message CloseReply {
+}
+
+
+
+message SendRequest {
+ required string socket_descriptor = 1;
+ required bytes data = 2 [ctype=CORD];
+ required int64 stream_offset = 3;
+ optional int32 flags = 4 [default=0];
+ optional AddressPort send_to = 5;
+ optional double timeout_seconds = 6 [default=-1];
+}
+
+message SendReply {
+ optional int32 data_sent = 1;
+}
+
+
+message ReceiveRequest {
+ enum Flags {
+ MSG_OOB = 1;
+ MSG_PEEK = 2;
+ }
+ required string socket_descriptor = 1;
+ required int32 data_size = 2;
+ optional int32 flags = 3 [default=0];
+ optional double timeout_seconds = 5 [default=-1];
+}
+
+message ReceiveReply {
+ optional int64 stream_offset = 2;
+ optional bytes data = 3 [ctype=CORD];
+ optional AddressPort received_from = 4;
+ optional int32 buffer_size = 5;
+}
+
+
+
+message PollEvent {
+
+ enum PollEventFlag {
+ SOCKET_POLLNONE = 0;
+ SOCKET_POLLIN = 1;
+ SOCKET_POLLPRI = 2;
+ SOCKET_POLLOUT = 4;
+ SOCKET_POLLERR = 8;
+ SOCKET_POLLHUP = 16;
+ SOCKET_POLLNVAL = 32;
+ SOCKET_POLLRDNORM = 64;
+ SOCKET_POLLRDBAND = 128;
+ SOCKET_POLLWRNORM = 256;
+ SOCKET_POLLWRBAND = 512;
+ SOCKET_POLLMSG = 1024;
+ SOCKET_POLLREMOVE = 4096;
+ SOCKET_POLLRDHUP = 8192;
+ };
+
+ required string socket_descriptor = 1;
+ required int32 requested_events = 2;
+ required int32 observed_events = 3;
+}
+
+message PollRequest {
+ repeated PollEvent events = 1;
+ optional double timeout_seconds = 2 [default=-1];
+}
+
+message PollReply {
+ repeated PollEvent events = 2;
+}
+
+message ResolveRequest {
+ required string name = 1;
+ repeated CreateSocketRequest.SocketFamily address_families = 2;
+}
+
+message ResolveReply {
+ enum ErrorCode {
+ SOCKET_EAI_ADDRFAMILY = 1;
+ SOCKET_EAI_AGAIN = 2;
+ SOCKET_EAI_BADFLAGS = 3;
+ SOCKET_EAI_FAIL = 4;
+ SOCKET_EAI_FAMILY = 5;
+ SOCKET_EAI_MEMORY = 6;
+ SOCKET_EAI_NODATA = 7;
+ SOCKET_EAI_NONAME = 8;
+ SOCKET_EAI_SERVICE = 9;
+ SOCKET_EAI_SOCKTYPE = 10;
+ SOCKET_EAI_SYSTEM = 11;
+ SOCKET_EAI_BADHINTS = 12;
+ SOCKET_EAI_PROTOCOL = 13;
+ SOCKET_EAI_OVERFLOW = 14;
+ SOCKET_EAI_MAX = 15;
+ };
+
+ repeated bytes packed_address = 2;
+ optional string canonical_name = 3;
+ repeated string aliases = 4;
+}
diff --git a/vendor/google.golang.org/appengine/socket/doc.go b/vendor/google.golang.org/appengine/socket/doc.go
new file mode 100644
index 00000000..3de46df8
--- /dev/null
+++ b/vendor/google.golang.org/appengine/socket/doc.go
@@ -0,0 +1,10 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package socket provides outbound network sockets.
+//
+// This package is only required in the classic App Engine environment.
+// Applications running only in App Engine "flexible environment" should
+// use the standard library's net package.
+package socket
diff --git a/vendor/google.golang.org/appengine/socket/socket_classic.go b/vendor/google.golang.org/appengine/socket/socket_classic.go
new file mode 100644
index 00000000..0ad50e2d
--- /dev/null
+++ b/vendor/google.golang.org/appengine/socket/socket_classic.go
@@ -0,0 +1,290 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package socket
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "strconv"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+ "google.golang.org/appengine/internal"
+
+ pb "google.golang.org/appengine/internal/socket"
+)
+
+// Dial connects to the address addr on the network protocol.
+// The address format is host:port, where host may be a hostname or an IP address.
+// Known protocols are "tcp" and "udp".
+// The returned connection satisfies net.Conn, and is valid while ctx is valid;
+// if the connection is to be used after ctx becomes invalid, invoke SetContext
+// with the new context.
+func Dial(ctx context.Context, protocol, addr string) (*Conn, error) {
+ return DialTimeout(ctx, protocol, addr, 0)
+}
+
+var ipFamilies = []pb.CreateSocketRequest_SocketFamily{
+ pb.CreateSocketRequest_IPv4,
+ pb.CreateSocketRequest_IPv6,
+}
+
+// DialTimeout is like Dial but takes a timeout.
+// The timeout includes name resolution, if required.
+func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) {
+ dialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn.
+ if timeout > 0 {
+ var cancel context.CancelFunc
+ dialCtx, cancel = context.WithTimeout(ctx, timeout)
+ defer cancel()
+ }
+
+ host, portStr, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+ port, err := strconv.Atoi(portStr)
+ if err != nil {
+ return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err)
+ }
+
+ var prot pb.CreateSocketRequest_SocketProtocol
+ switch protocol {
+ case "tcp":
+ prot = pb.CreateSocketRequest_TCP
+ case "udp":
+ prot = pb.CreateSocketRequest_UDP
+ default:
+ return nil, fmt.Errorf("socket: unknown protocol %q", protocol)
+ }
+
+ packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host)
+ if err != nil {
+ return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err)
+ }
+ if len(packedAddrs) == 0 {
+ return nil, fmt.Errorf("no addresses for %q", host)
+ }
+
+ packedAddr := packedAddrs[0] // use first address
+ fam := pb.CreateSocketRequest_IPv4
+ if len(packedAddr) == net.IPv6len {
+ fam = pb.CreateSocketRequest_IPv6
+ }
+
+ req := &pb.CreateSocketRequest{
+ Family: fam.Enum(),
+ Protocol: prot.Enum(),
+ RemoteIp: &pb.AddressPort{
+ Port: proto.Int32(int32(port)),
+ PackedAddress: packedAddr,
+ },
+ }
+ if resolved {
+ req.RemoteIp.HostnameHint = &host
+ }
+ res := &pb.CreateSocketReply{}
+ if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil {
+ return nil, err
+ }
+
+ return &Conn{
+ ctx: ctx,
+ desc: res.GetSocketDescriptor(),
+ prot: prot,
+ local: res.ProxyExternalIp,
+ remote: req.RemoteIp,
+ }, nil
+}
+
+// LookupIP returns the given host's IP addresses.
+func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) {
+ packedAddrs, _, err := resolve(ctx, ipFamilies, host)
+ if err != nil {
+ return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err)
+ }
+ addrs = make([]net.IP, len(packedAddrs))
+ for i, pa := range packedAddrs {
+ addrs[i] = net.IP(pa)
+ }
+ return addrs, nil
+}
+
+func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) {
+ // Check if it's an IP address.
+ if ip := net.ParseIP(host); ip != nil {
+ if ip := ip.To4(); ip != nil {
+ return [][]byte{ip}, false, nil
+ }
+ return [][]byte{ip}, false, nil
+ }
+
+ req := &pb.ResolveRequest{
+ Name: &host,
+ AddressFamilies: fams,
+ }
+ res := &pb.ResolveReply{}
+ if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil {
+ // XXX: need to map to pb.ResolveReply_ErrorCode?
+ return nil, false, err
+ }
+ return res.PackedAddress, true, nil
+}
+
+// withDeadline is like context.WithDeadline, except it ignores the zero deadline.
+func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) {
+ if deadline.IsZero() {
+ return parent, func() {}
+ }
+ return context.WithDeadline(parent, deadline)
+}
+
+// Conn represents a socket connection.
+// It implements net.Conn.
+type Conn struct {
+ ctx context.Context
+ desc string
+ offset int64
+
+ prot pb.CreateSocketRequest_SocketProtocol
+ local, remote *pb.AddressPort
+
+ readDeadline, writeDeadline time.Time // optional
+}
+
+// SetContext sets the context that is used by this Conn.
+// It is usually used only when using a Conn that was created in a different context,
+// such as when a connection is created during a warmup request but used while
+// servicing a user request.
+func (cn *Conn) SetContext(ctx context.Context) {
+ cn.ctx = ctx
+}
+
+func (cn *Conn) Read(b []byte) (n int, err error) {
+ const maxRead = 1 << 20
+ if len(b) > maxRead {
+ b = b[:maxRead]
+ }
+
+ req := &pb.ReceiveRequest{
+ SocketDescriptor: &cn.desc,
+ DataSize: proto.Int32(int32(len(b))),
+ }
+ res := &pb.ReceiveReply{}
+ if !cn.readDeadline.IsZero() {
+ req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds())
+ }
+ ctx, cancel := withDeadline(cn.ctx, cn.readDeadline)
+ defer cancel()
+ if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil {
+ return 0, err
+ }
+ if len(res.Data) == 0 {
+ return 0, io.EOF
+ }
+ if len(res.Data) > len(b) {
+ return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b))
+ }
+ return copy(b, res.Data), nil
+}
+
+func (cn *Conn) Write(b []byte) (n int, err error) {
+ const lim = 1 << 20 // max per chunk
+
+ for n < len(b) {
+ chunk := b[n:]
+ if len(chunk) > lim {
+ chunk = chunk[:lim]
+ }
+
+ req := &pb.SendRequest{
+ SocketDescriptor: &cn.desc,
+ Data: chunk,
+ StreamOffset: &cn.offset,
+ }
+ res := &pb.SendReply{}
+ if !cn.writeDeadline.IsZero() {
+ req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds())
+ }
+ ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline)
+ defer cancel()
+ if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil {
+ // assume zero bytes were sent in this RPC
+ break
+ }
+ n += int(res.GetDataSent())
+ cn.offset += int64(res.GetDataSent())
+ }
+
+ return
+}
+
+func (cn *Conn) Close() error {
+ req := &pb.CloseRequest{
+ SocketDescriptor: &cn.desc,
+ }
+ res := &pb.CloseReply{}
+ if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil {
+ return err
+ }
+ cn.desc = "CLOSED"
+ return nil
+}
+
+func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr {
+ if ap == nil {
+ return nil
+ }
+ switch prot {
+ case pb.CreateSocketRequest_TCP:
+ return &net.TCPAddr{
+ IP: net.IP(ap.PackedAddress),
+ Port: int(*ap.Port),
+ }
+ case pb.CreateSocketRequest_UDP:
+ return &net.UDPAddr{
+ IP: net.IP(ap.PackedAddress),
+ Port: int(*ap.Port),
+ }
+ }
+ panic("unknown protocol " + prot.String())
+}
+
+func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) }
+func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) }
+
+func (cn *Conn) SetDeadline(t time.Time) error {
+ cn.readDeadline = t
+ cn.writeDeadline = t
+ return nil
+}
+
+func (cn *Conn) SetReadDeadline(t time.Time) error {
+ cn.readDeadline = t
+ return nil
+}
+
+func (cn *Conn) SetWriteDeadline(t time.Time) error {
+ cn.writeDeadline = t
+ return nil
+}
+
+// KeepAlive signals that the connection is still in use.
+// It may be called to prevent the socket being closed due to inactivity.
+func (cn *Conn) KeepAlive() error {
+ req := &pb.GetSocketNameRequest{
+ SocketDescriptor: &cn.desc,
+ }
+ res := &pb.GetSocketNameReply{}
+ return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res)
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/socket/socket_vm.go b/vendor/google.golang.org/appengine/socket/socket_vm.go
new file mode 100644
index 00000000..c804169a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/socket/socket_vm.go
@@ -0,0 +1,64 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package socket
+
+import (
+ "net"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+// Dial connects to the address addr on the network protocol.
+// The address format is host:port, where host may be a hostname or an IP address.
+// Known protocols are "tcp" and "udp".
+// The returned connection satisfies net.Conn, and is valid while ctx is valid;
+// if the connection is to be used after ctx becomes invalid, invoke SetContext
+// with the new context.
+func Dial(ctx context.Context, protocol, addr string) (*Conn, error) {
+ conn, err := net.Dial(protocol, addr)
+ if err != nil {
+ return nil, err
+ }
+ return &Conn{conn}, nil
+}
+
+// DialTimeout is like Dial but takes a timeout.
+// The timeout includes name resolution, if required.
+func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) {
+ conn, err := net.DialTimeout(protocol, addr, timeout)
+ if err != nil {
+ return nil, err
+ }
+ return &Conn{conn}, nil
+}
+
+// LookupIP returns the given host's IP addresses.
+func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) {
+ return net.LookupIP(host)
+}
+
+// Conn represents a socket connection.
+// It implements net.Conn.
+type Conn struct {
+ net.Conn
+}
+
+// SetContext sets the context that is used by this Conn.
+// It is usually used only when using a Conn that was created in a different context,
+// such as when a connection is created during a warmup request but used while
+// servicing a user request.
+func (cn *Conn) SetContext(ctx context.Context) {
+ // This function is not required in App Engine "flexible environment".
+}
+
+// KeepAlive signals that the connection is still in use.
+// It may be called to prevent the socket being closed due to inactivity.
+func (cn *Conn) KeepAlive() error {
+ // This function is not required in App Engine "flexible environment".
+ return nil
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go
index 3ce7eb74..66fb9ffb 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go
@@ -1,22 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/monitored_resource.proto
-/*
-Package monitoredres is a generated protocol buffer package.
-
-It is generated from these files:
- google/api/monitored_resource.proto
-
-It has these top-level messages:
- MonitoredResourceDescriptor
- MonitoredResource
-*/
-package monitoredres
+package monitoredres // import "google.golang.org/genproto/googleapis/api/monitoredres"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
-import google_api "google.golang.org/genproto/googleapis/api/label"
+import _struct "github.com/golang/protobuf/ptypes/struct"
+import label "google.golang.org/genproto/googleapis/api/label"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -29,9 +20,10 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-// An object that describes the schema of a [MonitoredResource][google.api.MonitoredResource] object using a
-// type name and a set of labels. For example, the monitored resource
-// descriptor for Google Compute Engine VM instances has a type of
+// An object that describes the schema of a
+// [MonitoredResource][google.api.MonitoredResource] object using a type name
+// and a set of labels. For example, the monitored resource descriptor for
+// Google Compute Engine VM instances has a type of
// `"gce_instance"` and specifies the use of the labels `"instance_id"` and
// `"zone"` to identify particular VM instances.
//
@@ -45,29 +37,51 @@ type MonitoredResourceDescriptor struct {
// {project_id} is a project ID that provides API-specific context for
// accessing the type. APIs that do not use project information can use the
// resource name format `"monitoredResourceDescriptors/{type}"`.
- Name string `protobuf:"bytes,5,opt,name=name" json:"name,omitempty"`
+ Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
// Required. The monitored resource type. For example, the type
// `"cloudsql_database"` represents databases in Google Cloud SQL.
// The maximum length of this value is 256 characters.
- Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
// Optional. A concise name for the monitored resource type that might be
// displayed in user interfaces. It should be a Title Cased Noun Phrase,
// without any article or other determiners. For example,
// `"Google Cloud SQL Database"`.
- DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"`
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
// Optional. A detailed description of the monitored resource type that might
// be used in documentation.
- Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
+ Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
// Required. A set of labels used to describe instances of this monitored
// resource type. For example, an individual Google Cloud SQL database is
// identified by values for the labels `"database_id"` and `"zone"`.
- Labels []*google_api.LabelDescriptor `protobuf:"bytes,4,rep,name=labels" json:"labels,omitempty"`
+ Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MonitoredResourceDescriptor) Reset() { *m = MonitoredResourceDescriptor{} }
+func (m *MonitoredResourceDescriptor) String() string { return proto.CompactTextString(m) }
+func (*MonitoredResourceDescriptor) ProtoMessage() {}
+func (*MonitoredResourceDescriptor) Descriptor() ([]byte, []int) {
+ return fileDescriptor_monitored_resource_fb985cf78be5ad98, []int{0}
+}
+func (m *MonitoredResourceDescriptor) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MonitoredResourceDescriptor.Unmarshal(m, b)
+}
+func (m *MonitoredResourceDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MonitoredResourceDescriptor.Marshal(b, m, deterministic)
+}
+func (dst *MonitoredResourceDescriptor) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MonitoredResourceDescriptor.Merge(dst, src)
+}
+func (m *MonitoredResourceDescriptor) XXX_Size() int {
+ return xxx_messageInfo_MonitoredResourceDescriptor.Size(m)
+}
+func (m *MonitoredResourceDescriptor) XXX_DiscardUnknown() {
+ xxx_messageInfo_MonitoredResourceDescriptor.DiscardUnknown(m)
}
-func (m *MonitoredResourceDescriptor) Reset() { *m = MonitoredResourceDescriptor{} }
-func (m *MonitoredResourceDescriptor) String() string { return proto.CompactTextString(m) }
-func (*MonitoredResourceDescriptor) ProtoMessage() {}
-func (*MonitoredResourceDescriptor) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+var xxx_messageInfo_MonitoredResourceDescriptor proto.InternalMessageInfo
func (m *MonitoredResourceDescriptor) GetName() string {
if m != nil {
@@ -97,7 +111,7 @@ func (m *MonitoredResourceDescriptor) GetDescription() string {
return ""
}
-func (m *MonitoredResourceDescriptor) GetLabels() []*google_api.LabelDescriptor {
+func (m *MonitoredResourceDescriptor) GetLabels() []*label.LabelDescriptor {
if m != nil {
return m.Labels
}
@@ -107,11 +121,13 @@ func (m *MonitoredResourceDescriptor) GetLabels() []*google_api.LabelDescriptor
// An object representing a resource that can be used for monitoring, logging,
// billing, or other purposes. Examples include virtual machine instances,
// databases, and storage devices such as disks. The `type` field identifies a
-// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object that describes the resource's
-// schema. Information in the `labels` field identifies the actual resource and
-// its attributes according to the schema. For example, a particular Compute
-// Engine VM instance could be represented by the following object, because the
-// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] for `"gce_instance"` has labels
+// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object
+// that describes the resource's schema. Information in the `labels` field
+// identifies the actual resource and its attributes according to the schema.
+// For example, a particular Compute Engine VM instance could be represented by
+// the following object, because the
+// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] for
+// `"gce_instance"` has labels
// `"instance_id"` and `"zone"`:
//
// { "type": "gce_instance",
@@ -119,19 +135,43 @@ func (m *MonitoredResourceDescriptor) GetLabels() []*google_api.LabelDescriptor
// "zone": "us-central1-a" }}
type MonitoredResource struct {
// Required. The monitored resource type. This field must match
- // the `type` field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object. For
- // example, the type of a Cloud SQL database is `"cloudsql_database"`.
- Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
+ // the `type` field of a
+ // [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor]
+ // object. For example, the type of a Compute Engine VM instance is
+ // `gce_instance`.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
// Required. Values for all of the labels listed in the associated monitored
- // resource descriptor. For example, Cloud SQL databases use the labels
- // `"database_id"` and `"zone"`.
- Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ // resource descriptor. For example, Compute Engine VM instances use the
+ // labels `"project_id"`, `"instance_id"`, and `"zone"`.
+ Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MonitoredResource) Reset() { *m = MonitoredResource{} }
+func (m *MonitoredResource) String() string { return proto.CompactTextString(m) }
+func (*MonitoredResource) ProtoMessage() {}
+func (*MonitoredResource) Descriptor() ([]byte, []int) {
+ return fileDescriptor_monitored_resource_fb985cf78be5ad98, []int{1}
+}
+func (m *MonitoredResource) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MonitoredResource.Unmarshal(m, b)
+}
+func (m *MonitoredResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MonitoredResource.Marshal(b, m, deterministic)
+}
+func (dst *MonitoredResource) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MonitoredResource.Merge(dst, src)
+}
+func (m *MonitoredResource) XXX_Size() int {
+ return xxx_messageInfo_MonitoredResource.Size(m)
+}
+func (m *MonitoredResource) XXX_DiscardUnknown() {
+ xxx_messageInfo_MonitoredResource.DiscardUnknown(m)
}
-func (m *MonitoredResource) Reset() { *m = MonitoredResource{} }
-func (m *MonitoredResource) String() string { return proto.CompactTextString(m) }
-func (*MonitoredResource) ProtoMessage() {}
-func (*MonitoredResource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+var xxx_messageInfo_MonitoredResource proto.InternalMessageInfo
func (m *MonitoredResource) GetType() string {
if m != nil {
@@ -147,34 +187,107 @@ func (m *MonitoredResource) GetLabels() map[string]string {
return nil
}
+// Auxiliary metadata for a [MonitoredResource][google.api.MonitoredResource]
+// object. [MonitoredResource][google.api.MonitoredResource] objects contain the
+// minimum set of information to uniquely identify a monitored resource
+// instance. There is some other useful auxiliary metadata. Monitoring and
+// Logging use an ingestion pipeline to extract metadata for cloud resources of
+// all types, and store the metadata in this message.
+type MonitoredResourceMetadata struct {
+ // Output only. Values for predefined system metadata labels.
+ // System labels are a kind of metadata extracted by Google, including
+ // "machine_image", "vpc", "subnet_id",
+ // "security_group", "name", etc.
+ // System label values can be only strings, Boolean values, or a list of
+ // strings. For example:
+ //
+ // { "name": "my-test-instance",
+ // "security_group": ["a", "b", "c"],
+ // "spot_instance": false }
+ SystemLabels *_struct.Struct `protobuf:"bytes,1,opt,name=system_labels,json=systemLabels,proto3" json:"system_labels,omitempty"`
+ // Output only. A map of user-defined metadata labels.
+ UserLabels map[string]string `protobuf:"bytes,2,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MonitoredResourceMetadata) Reset() { *m = MonitoredResourceMetadata{} }
+func (m *MonitoredResourceMetadata) String() string { return proto.CompactTextString(m) }
+func (*MonitoredResourceMetadata) ProtoMessage() {}
+func (*MonitoredResourceMetadata) Descriptor() ([]byte, []int) {
+ return fileDescriptor_monitored_resource_fb985cf78be5ad98, []int{2}
+}
+func (m *MonitoredResourceMetadata) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MonitoredResourceMetadata.Unmarshal(m, b)
+}
+func (m *MonitoredResourceMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MonitoredResourceMetadata.Marshal(b, m, deterministic)
+}
+func (dst *MonitoredResourceMetadata) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MonitoredResourceMetadata.Merge(dst, src)
+}
+func (m *MonitoredResourceMetadata) XXX_Size() int {
+ return xxx_messageInfo_MonitoredResourceMetadata.Size(m)
+}
+func (m *MonitoredResourceMetadata) XXX_DiscardUnknown() {
+ xxx_messageInfo_MonitoredResourceMetadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MonitoredResourceMetadata proto.InternalMessageInfo
+
+func (m *MonitoredResourceMetadata) GetSystemLabels() *_struct.Struct {
+ if m != nil {
+ return m.SystemLabels
+ }
+ return nil
+}
+
+func (m *MonitoredResourceMetadata) GetUserLabels() map[string]string {
+ if m != nil {
+ return m.UserLabels
+ }
+ return nil
+}
+
func init() {
proto.RegisterType((*MonitoredResourceDescriptor)(nil), "google.api.MonitoredResourceDescriptor")
proto.RegisterType((*MonitoredResource)(nil), "google.api.MonitoredResource")
+ proto.RegisterMapType((map[string]string)(nil), "google.api.MonitoredResource.LabelsEntry")
+ proto.RegisterType((*MonitoredResourceMetadata)(nil), "google.api.MonitoredResourceMetadata")
+ proto.RegisterMapType((map[string]string)(nil), "google.api.MonitoredResourceMetadata.UserLabelsEntry")
+}
+
+func init() {
+ proto.RegisterFile("google/api/monitored_resource.proto", fileDescriptor_monitored_resource_fb985cf78be5ad98)
}
-func init() { proto.RegisterFile("google/api/monitored_resource.proto", fileDescriptor0) }
-
-var fileDescriptor0 = []byte{
- // 321 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x51, 0x4b, 0x4b, 0x3b, 0x31,
- 0x10, 0x27, 0xdb, 0x07, 0xfc, 0x67, 0xff, 0x88, 0x06, 0x29, 0x4b, 0x7b, 0xa9, 0xf5, 0x52, 0x2f,
- 0xbb, 0x60, 0x2f, 0x3e, 0x4e, 0xad, 0x8a, 0x08, 0x2a, 0xa5, 0x47, 0x2f, 0x25, 0x6d, 0xc3, 0x12,
- 0xdc, 0x66, 0x42, 0xb2, 0x15, 0xf6, 0xeb, 0x08, 0x7e, 0x0e, 0xbf, 0x96, 0x47, 0xc9, 0xa3, 0x76,
- 0xa5, 0xde, 0x26, 0xbf, 0xf9, 0x3d, 0x66, 0x32, 0x70, 0x9a, 0x23, 0xe6, 0x05, 0xcf, 0x98, 0x12,
- 0xd9, 0x1a, 0xa5, 0x28, 0x51, 0xf3, 0xd5, 0x5c, 0x73, 0x83, 0x1b, 0xbd, 0xe4, 0xa9, 0xd2, 0x58,
- 0x22, 0x05, 0x4f, 0x4a, 0x99, 0x12, 0xdd, 0x4e, 0x4d, 0x50, 0xb0, 0x05, 0x2f, 0x3c, 0x67, 0xf0,
- 0x49, 0xa0, 0xf7, 0xb4, 0x35, 0x98, 0x05, 0xfd, 0x2d, 0x37, 0x4b, 0x2d, 0x54, 0x89, 0x9a, 0x52,
- 0x68, 0x4a, 0xb6, 0xe6, 0x49, 0xab, 0x4f, 0x86, 0xff, 0x66, 0xae, 0xb6, 0x58, 0x59, 0x29, 0x9e,
- 0x10, 0x8f, 0xd9, 0x9a, 0x9e, 0xc0, 0xff, 0x95, 0x30, 0xaa, 0x60, 0xd5, 0xdc, 0xf1, 0x23, 0xd7,
- 0x8b, 0x03, 0xf6, 0x6c, 0x65, 0x7d, 0x88, 0x57, 0xc1, 0x58, 0xa0, 0x4c, 0x1a, 0x81, 0xb1, 0x83,
- 0xe8, 0x08, 0xda, 0x6e, 0x36, 0x93, 0x34, 0xfb, 0x8d, 0x61, 0x7c, 0xde, 0x4b, 0x77, 0x1b, 0xa4,
- 0x8f, 0xb6, 0xb3, 0x9b, 0x6c, 0x16, 0xa8, 0x83, 0x0f, 0x02, 0x47, 0x7b, 0x1b, 0xfc, 0x39, 0xe3,
- 0xf8, 0xc7, 0x3e, 0x72, 0xf6, 0x67, 0x75, 0xfb, 0x3d, 0x0b, 0x1f, 0x68, 0xee, 0x64, 0xa9, 0xab,
- 0x6d, 0x58, 0xf7, 0x12, 0xe2, 0x1a, 0x4c, 0x0f, 0xa1, 0xf1, 0xca, 0xab, 0x10, 0x62, 0x4b, 0x7a,
- 0x0c, 0xad, 0x37, 0x56, 0x6c, 0xb6, 0x1f, 0xe0, 0x1f, 0x57, 0xd1, 0x05, 0x99, 0x54, 0x70, 0xb0,
- 0xc4, 0x75, 0x2d, 0x72, 0xd2, 0xd9, 0xcb, 0x9c, 0xda, 0x9b, 0x4c, 0xc9, 0xcb, 0x4d, 0x60, 0xe5,
- 0x58, 0x30, 0x99, 0xa7, 0xa8, 0xf3, 0x2c, 0xe7, 0xd2, 0x5d, 0x2c, 0xf3, 0x2d, 0xa6, 0x84, 0xf9,
- 0x7d, 0x7d, 0xcd, 0xcd, 0x75, 0xfd, 0xf1, 0x45, 0xc8, 0x7b, 0xd4, 0xbc, 0x1f, 0x4f, 0x1f, 0x16,
- 0x6d, 0xa7, 0x1c, 0x7d, 0x07, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xfb, 0xfb, 0x11, 0x36, 0x02, 0x00,
- 0x00,
+var fileDescriptor_monitored_resource_fb985cf78be5ad98 = []byte{
+ // 415 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x4d, 0xab, 0xd3, 0x40,
+ 0x14, 0x65, 0xd2, 0x0f, 0xf0, 0xa6, 0x7e, 0x0d, 0x52, 0x63, 0xea, 0xa2, 0xd6, 0x4d, 0xdd, 0x24,
+ 0xd0, 0x22, 0xf8, 0xb9, 0x68, 0x55, 0x44, 0xb0, 0x52, 0x22, 0xba, 0x70, 0x13, 0xa6, 0xc9, 0x18,
+ 0x82, 0x49, 0x26, 0xcc, 0x4c, 0x84, 0xfc, 0x1d, 0xc1, 0xdf, 0xe1, 0x5f, 0x72, 0xe9, 0x52, 0x32,
+ 0x33, 0x69, 0xd3, 0x97, 0xc7, 0x83, 0xb7, 0xbb, 0xf7, 0xdc, 0x73, 0xcf, 0x3d, 0x27, 0x43, 0xe0,
+ 0x71, 0xc2, 0x58, 0x92, 0x51, 0x9f, 0x94, 0xa9, 0x9f, 0xb3, 0x22, 0x95, 0x8c, 0xd3, 0x38, 0xe4,
+ 0x54, 0xb0, 0x8a, 0x47, 0xd4, 0x2b, 0x39, 0x93, 0x0c, 0x83, 0x26, 0x79, 0xa4, 0x4c, 0xdd, 0x69,
+ 0x67, 0x21, 0x23, 0x07, 0x9a, 0x69, 0x8e, 0xfb, 0xd0, 0xe0, 0xaa, 0x3b, 0x54, 0xdf, 0x7d, 0x21,
+ 0x79, 0x15, 0x49, 0x3d, 0x5d, 0xfc, 0x41, 0x30, 0xdb, 0xb5, 0xf2, 0x81, 0x51, 0x7f, 0x4b, 0x45,
+ 0xc4, 0xd3, 0x52, 0x32, 0x8e, 0x31, 0x0c, 0x0b, 0x92, 0x53, 0x67, 0x34, 0x47, 0xcb, 0x1b, 0x81,
+ 0xaa, 0x1b, 0x4c, 0xd6, 0x25, 0x75, 0x90, 0xc6, 0x9a, 0x1a, 0x3f, 0x82, 0x49, 0x9c, 0x8a, 0x32,
+ 0x23, 0x75, 0xa8, 0xf8, 0x96, 0x9a, 0xd9, 0x06, 0xfb, 0xd4, 0xac, 0xcd, 0xc1, 0x8e, 0x8d, 0x70,
+ 0xca, 0x0a, 0x67, 0x60, 0x18, 0x27, 0x08, 0xaf, 0x61, 0xac, 0x9c, 0x0b, 0x67, 0x38, 0x1f, 0x2c,
+ 0xed, 0xd5, 0xcc, 0x3b, 0xe5, 0xf3, 0x3e, 0x36, 0x93, 0x93, 0xb3, 0xc0, 0x50, 0x17, 0xbf, 0x11,
+ 0xdc, 0xed, 0x25, 0xb8, 0xd4, 0xe3, 0xe6, 0x28, 0x6f, 0x29, 0xf9, 0x27, 0x5d, 0xf9, 0x9e, 0x84,
+ 0x3e, 0x28, 0xde, 0x15, 0x92, 0xd7, 0xed, 0x31, 0xf7, 0x39, 0xd8, 0x1d, 0x18, 0xdf, 0x81, 0xc1,
+ 0x0f, 0x5a, 0x9b, 0x23, 0x4d, 0x89, 0xef, 0xc1, 0xe8, 0x27, 0xc9, 0xaa, 0xf6, 0x03, 0xe8, 0xe6,
+ 0x85, 0xf5, 0x0c, 0x2d, 0xfe, 0x22, 0x78, 0xd0, 0x3b, 0xb2, 0xa3, 0x92, 0xc4, 0x44, 0x12, 0xfc,
+ 0x0a, 0x6e, 0x8a, 0x5a, 0x48, 0x9a, 0x87, 0xc6, 0x62, 0xa3, 0x69, 0xaf, 0xee, 0xb7, 0x16, 0xdb,
+ 0xd7, 0xf3, 0x3e, 0xab, 0xd7, 0x0b, 0x26, 0x9a, 0xad, 0xcd, 0xe0, 0xaf, 0x60, 0x57, 0x82, 0xf2,
+ 0xf0, 0x2c, 0xde, 0xd3, 0x2b, 0xe3, 0xb5, 0x97, 0xbd, 0x2f, 0x82, 0xf2, 0x6e, 0x54, 0xa8, 0x8e,
+ 0x80, 0xfb, 0x1a, 0x6e, 0x5f, 0x18, 0x5f, 0x27, 0xf2, 0xb6, 0x86, 0x5b, 0x11, 0xcb, 0x3b, 0x36,
+ 0xb6, 0xd3, 0x9e, 0x8f, 0x7d, 0x13, 0x6c, 0x8f, 0xbe, 0xbd, 0x31, 0xac, 0x84, 0x65, 0xa4, 0x48,
+ 0x3c, 0xc6, 0x13, 0x3f, 0xa1, 0x85, 0x8a, 0xed, 0xeb, 0x11, 0x29, 0x53, 0x71, 0xfe, 0x3b, 0x70,
+ 0x2a, 0x5e, 0x76, 0x9b, 0x7f, 0x08, 0xfd, 0xb2, 0x86, 0xef, 0x37, 0xfb, 0x0f, 0x87, 0xb1, 0xda,
+ 0x5c, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x10, 0x16, 0x7c, 0xe9, 0x47, 0x03, 0x00, 0x00,
}
diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go
new file mode 100644
index 00000000..9e79ae7d
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go
@@ -0,0 +1,1391 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/devtools/cloudtrace/v2/trace.proto
+
+package cloudtrace // import "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2"
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import timestamp "github.com/golang/protobuf/ptypes/timestamp"
+import wrappers "github.com/golang/protobuf/ptypes/wrappers"
+import _ "google.golang.org/genproto/googleapis/api/annotations"
+import status "google.golang.org/genproto/googleapis/rpc/status"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Indicates whether the message was sent or received.
+type Span_TimeEvent_MessageEvent_Type int32
+
+const (
+ // Unknown event type.
+ Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED Span_TimeEvent_MessageEvent_Type = 0
+ // Indicates a sent message.
+ Span_TimeEvent_MessageEvent_SENT Span_TimeEvent_MessageEvent_Type = 1
+ // Indicates a received message.
+ Span_TimeEvent_MessageEvent_RECEIVED Span_TimeEvent_MessageEvent_Type = 2
+)
+
+var Span_TimeEvent_MessageEvent_Type_name = map[int32]string{
+ 0: "TYPE_UNSPECIFIED",
+ 1: "SENT",
+ 2: "RECEIVED",
+}
+var Span_TimeEvent_MessageEvent_Type_value = map[string]int32{
+ "TYPE_UNSPECIFIED": 0,
+ "SENT": 1,
+ "RECEIVED": 2,
+}
+
+func (x Span_TimeEvent_MessageEvent_Type) String() string {
+ return proto.EnumName(Span_TimeEvent_MessageEvent_Type_name, int32(x))
+}
+func (Span_TimeEvent_MessageEvent_Type) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_trace_3b0c20aef2ae619c, []int{0, 1, 1, 0}
+}
+
+// The relationship of the current span relative to the linked span: child,
+// parent, or unspecified.
+type Span_Link_Type int32
+
+const (
+ // The relationship of the two spans is unknown.
+ Span_Link_TYPE_UNSPECIFIED Span_Link_Type = 0
+ // The linked span is a child of the current span.
+ Span_Link_CHILD_LINKED_SPAN Span_Link_Type = 1
+ // The linked span is a parent of the current span.
+ Span_Link_PARENT_LINKED_SPAN Span_Link_Type = 2
+)
+
+var Span_Link_Type_name = map[int32]string{
+ 0: "TYPE_UNSPECIFIED",
+ 1: "CHILD_LINKED_SPAN",
+ 2: "PARENT_LINKED_SPAN",
+}
+var Span_Link_Type_value = map[string]int32{
+ "TYPE_UNSPECIFIED": 0,
+ "CHILD_LINKED_SPAN": 1,
+ "PARENT_LINKED_SPAN": 2,
+}
+
+func (x Span_Link_Type) String() string {
+ return proto.EnumName(Span_Link_Type_name, int32(x))
+}
+func (Span_Link_Type) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_trace_3b0c20aef2ae619c, []int{0, 3, 0}
+}
+
+// A span represents a single operation within a trace. Spans can be
+// nested to form a trace tree. Often, a trace contains a root span
+// that describes the end-to-end latency, and one or more subspans for
+// its sub-operations. A trace can also contain multiple root spans,
+// or none at all. Spans do not need to be contiguous—there may be
+// gaps or overlaps between spans in a trace.
+type Span struct {
+ // The resource name of the span in the following format:
+ //
+ // projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID]
+ //
+ // [TRACE_ID] is a unique identifier for a trace within a project;
+ // it is a 32-character hexadecimal encoding of a 16-byte array.
+ //
+ // [SPAN_ID] is a unique identifier for a span within a trace; it
+ // is a 16-character hexadecimal encoding of an 8-byte array.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The [SPAN_ID] portion of the span's resource name.
+ SpanId string `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
+ // The [SPAN_ID] of this span's parent span. If this is a root span,
+ // then this field must be empty.
+ ParentSpanId string `protobuf:"bytes,3,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"`
+ // A description of the span's operation (up to 128 bytes).
+ // Stackdriver Trace displays the description in the
+ // {% dynamic print site_values.console_name %}.
+ // For example, the display name can be a qualified method name or a file name
+ // and a line number where the operation is called. A best practice is to use
+ // the same display name within an application and at the same call point.
+ // This makes it easier to correlate spans in different traces.
+ DisplayName *TruncatableString `protobuf:"bytes,4,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // The start time of the span. On the client side, this is the time kept by
+ // the local machine where the span execution starts. On the server side, this
+ // is the time when the server's application handler starts running.
+ StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
+ // The end time of the span. On the client side, this is the time kept by
+ // the local machine where the span execution ends. On the server side, this
+ // is the time when the server application handler stops running.
+ EndTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
+ // A set of attributes on the span. You can have up to 32 attributes per
+ // span.
+ Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"`
+ // Stack trace captured at the start of the span.
+ StackTrace *StackTrace `protobuf:"bytes,8,opt,name=stack_trace,json=stackTrace,proto3" json:"stack_trace,omitempty"`
+ // A set of time events. You can have up to 32 annotations and 128 message
+ // events per span.
+ TimeEvents *Span_TimeEvents `protobuf:"bytes,9,opt,name=time_events,json=timeEvents,proto3" json:"time_events,omitempty"`
+ // Links associated with the span. You can have up to 128 links per Span.
+ Links *Span_Links `protobuf:"bytes,10,opt,name=links,proto3" json:"links,omitempty"`
+ // An optional final status for this span.
+ Status *status.Status `protobuf:"bytes,11,opt,name=status,proto3" json:"status,omitempty"`
+ // (Optional) Set this parameter to indicate whether this span is in
+ // the same process as its parent. If you do not set this parameter,
+ // Stackdriver Trace is unable to take advantage of this helpful
+ // information.
+ SameProcessAsParentSpan *wrappers.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"`
+ // An optional number of child spans that were generated while this span
+ // was active. If set, allows implementation to detect missing child spans.
+ ChildSpanCount *wrappers.Int32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span) Reset() { *m = Span{} }
+func (m *Span) String() string { return proto.CompactTextString(m) }
+func (*Span) ProtoMessage() {}
+func (*Span) Descriptor() ([]byte, []int) {
+ return fileDescriptor_trace_3b0c20aef2ae619c, []int{0}
+}
+func (m *Span) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span.Unmarshal(m, b)
+}
+func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span.Marshal(b, m, deterministic)
+}
+func (dst *Span) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span.Merge(dst, src)
+}
+func (m *Span) XXX_Size() int {
+ return xxx_messageInfo_Span.Size(m)
+}
+func (m *Span) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span proto.InternalMessageInfo
+
+func (m *Span) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *Span) GetSpanId() string {
+ if m != nil {
+ return m.SpanId
+ }
+ return ""
+}
+
+func (m *Span) GetParentSpanId() string {
+ if m != nil {
+ return m.ParentSpanId
+ }
+ return ""
+}
+
+func (m *Span) GetDisplayName() *TruncatableString {
+ if m != nil {
+ return m.DisplayName
+ }
+ return nil
+}
+
+func (m *Span) GetStartTime() *timestamp.Timestamp {
+ if m != nil {
+ return m.StartTime
+ }
+ return nil
+}
+
+func (m *Span) GetEndTime() *timestamp.Timestamp {
+ if m != nil {
+ return m.EndTime
+ }
+ return nil
+}
+
+func (m *Span) GetAttributes() *Span_Attributes {
+ if m != nil {
+ return m.Attributes
+ }
+ return nil
+}
+
+func (m *Span) GetStackTrace() *StackTrace {
+ if m != nil {
+ return m.StackTrace
+ }
+ return nil
+}
+
+func (m *Span) GetTimeEvents() *Span_TimeEvents {
+ if m != nil {
+ return m.TimeEvents
+ }
+ return nil
+}
+
+func (m *Span) GetLinks() *Span_Links {
+ if m != nil {
+ return m.Links
+ }
+ return nil
+}
+
+func (m *Span) GetStatus() *status.Status {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *Span) GetSameProcessAsParentSpan() *wrappers.BoolValue {
+ if m != nil {
+ return m.SameProcessAsParentSpan
+ }
+ return nil
+}
+
+func (m *Span) GetChildSpanCount() *wrappers.Int32Value {
+ if m != nil {
+ return m.ChildSpanCount
+ }
+ return nil
+}
+
+// A set of attributes, each in the format `[KEY]:[VALUE]`.
+type Span_Attributes struct {
+ // The set of attributes. Each attribute's key can be up to 128 bytes
+ // long. The value can be a string up to 256 bytes, an integer, or the
+ // Boolean values `true` and `false`. For example:
+ //
+ // "/instance_id": "my-instance"
+ // "/http/user_agent": ""
+ // "/http/request_bytes": 300
+ // "abc.com/myattribute": true
+ AttributeMap map[string]*AttributeValue `protobuf:"bytes,1,rep,name=attribute_map,json=attributeMap,proto3" json:"attribute_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // The number of attributes that were discarded. Attributes can be discarded
+ // because their keys are too long or because there are too many attributes.
+ // If this value is 0 then all attributes are valid.
+ DroppedAttributesCount int32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_Attributes) Reset() { *m = Span_Attributes{} }
+func (m *Span_Attributes) String() string { return proto.CompactTextString(m) }
+func (*Span_Attributes) ProtoMessage() {}
+func (*Span_Attributes) Descriptor() ([]byte, []int) {
+ return fileDescriptor_trace_3b0c20aef2ae619c, []int{0, 0}
+}
+func (m *Span_Attributes) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_Attributes.Unmarshal(m, b)
+}
+func (m *Span_Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_Attributes.Marshal(b, m, deterministic)
+}
+func (dst *Span_Attributes) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_Attributes.Merge(dst, src)
+}
+func (m *Span_Attributes) XXX_Size() int {
+ return xxx_messageInfo_Span_Attributes.Size(m)
+}
+func (m *Span_Attributes) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_Attributes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_Attributes proto.InternalMessageInfo
+
+func (m *Span_Attributes) GetAttributeMap() map[string]*AttributeValue {
+ if m != nil {
+ return m.AttributeMap
+ }
+ return nil
+}
+
+func (m *Span_Attributes) GetDroppedAttributesCount() int32 {
+ if m != nil {
+ return m.DroppedAttributesCount
+ }
+ return 0
+}
+
+// A time-stamped annotation or message event in the Span.
+type Span_TimeEvent struct {
+ // The timestamp indicating the time the event occurred.
+ Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"`
+ // A `TimeEvent` can contain either an `Annotation` object or a
+ // `MessageEvent` object, but not both.
+ //
+ // Types that are valid to be assigned to Value:
+ // *Span_TimeEvent_Annotation_
+ // *Span_TimeEvent_MessageEvent_
+ Value isSpan_TimeEvent_Value `protobuf_oneof:"value"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_TimeEvent) Reset() { *m = Span_TimeEvent{} }
+func (m *Span_TimeEvent) String() string { return proto.CompactTextString(m) }
+func (*Span_TimeEvent) ProtoMessage() {}
+func (*Span_TimeEvent) Descriptor() ([]byte, []int) {
+ return fileDescriptor_trace_3b0c20aef2ae619c, []int{0, 1}
+}
+func (m *Span_TimeEvent) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_TimeEvent.Unmarshal(m, b)
+}
+func (m *Span_TimeEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_TimeEvent.Marshal(b, m, deterministic)
+}
+func (dst *Span_TimeEvent) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_TimeEvent.Merge(dst, src)
+}
+func (m *Span_TimeEvent) XXX_Size() int {
+ return xxx_messageInfo_Span_TimeEvent.Size(m)
+}
+func (m *Span_TimeEvent) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_TimeEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_TimeEvent proto.InternalMessageInfo
+
+func (m *Span_TimeEvent) GetTime() *timestamp.Timestamp {
+ if m != nil {
+ return m.Time
+ }
+ return nil
+}
+
+type isSpan_TimeEvent_Value interface {
+ isSpan_TimeEvent_Value()
+}
+
+type Span_TimeEvent_Annotation_ struct {
+ Annotation *Span_TimeEvent_Annotation `protobuf:"bytes,2,opt,name=annotation,proto3,oneof"`
+}
+
+type Span_TimeEvent_MessageEvent_ struct {
+ MessageEvent *Span_TimeEvent_MessageEvent `protobuf:"bytes,3,opt,name=message_event,json=messageEvent,proto3,oneof"`
+}
+
+func (*Span_TimeEvent_Annotation_) isSpan_TimeEvent_Value() {}
+
+func (*Span_TimeEvent_MessageEvent_) isSpan_TimeEvent_Value() {}
+
+func (m *Span_TimeEvent) GetValue() isSpan_TimeEvent_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *Span_TimeEvent) GetAnnotation() *Span_TimeEvent_Annotation {
+ if x, ok := m.GetValue().(*Span_TimeEvent_Annotation_); ok {
+ return x.Annotation
+ }
+ return nil
+}
+
+func (m *Span_TimeEvent) GetMessageEvent() *Span_TimeEvent_MessageEvent {
+ if x, ok := m.GetValue().(*Span_TimeEvent_MessageEvent_); ok {
+ return x.MessageEvent
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Span_TimeEvent) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Span_TimeEvent_OneofMarshaler, _Span_TimeEvent_OneofUnmarshaler, _Span_TimeEvent_OneofSizer, []interface{}{
+ (*Span_TimeEvent_Annotation_)(nil),
+ (*Span_TimeEvent_MessageEvent_)(nil),
+ }
+}
+
+func _Span_TimeEvent_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Span_TimeEvent)
+ // value
+ switch x := m.Value.(type) {
+ case *Span_TimeEvent_Annotation_:
+ b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Annotation); err != nil {
+ return err
+ }
+ case *Span_TimeEvent_MessageEvent_:
+ b.EncodeVarint(3<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.MessageEvent); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("Span_TimeEvent.Value has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Span_TimeEvent_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Span_TimeEvent)
+ switch tag {
+ case 2: // value.annotation
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Span_TimeEvent_Annotation)
+ err := b.DecodeMessage(msg)
+ m.Value = &Span_TimeEvent_Annotation_{msg}
+ return true, err
+ case 3: // value.message_event
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Span_TimeEvent_MessageEvent)
+ err := b.DecodeMessage(msg)
+ m.Value = &Span_TimeEvent_MessageEvent_{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Span_TimeEvent_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Span_TimeEvent)
+ // value
+ switch x := m.Value.(type) {
+ case *Span_TimeEvent_Annotation_:
+ s := proto.Size(x.Annotation)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Span_TimeEvent_MessageEvent_:
+ s := proto.Size(x.MessageEvent)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// Text annotation with a set of attributes.
+type Span_TimeEvent_Annotation struct {
+ // A user-supplied message describing the event. The maximum length for
+ // the description is 256 bytes.
+ Description *TruncatableString `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ // A set of attributes on the annotation. You can have up to 4 attributes
+ // per Annotation.
+ Attributes *Span_Attributes `protobuf:"bytes,2,opt,name=attributes,proto3" json:"attributes,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_TimeEvent_Annotation) Reset() { *m = Span_TimeEvent_Annotation{} }
+func (m *Span_TimeEvent_Annotation) String() string { return proto.CompactTextString(m) }
+func (*Span_TimeEvent_Annotation) ProtoMessage() {}
+func (*Span_TimeEvent_Annotation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_trace_3b0c20aef2ae619c, []int{0, 1, 0}
+}
+func (m *Span_TimeEvent_Annotation) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_TimeEvent_Annotation.Unmarshal(m, b)
+}
+func (m *Span_TimeEvent_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_TimeEvent_Annotation.Marshal(b, m, deterministic)
+}
+func (dst *Span_TimeEvent_Annotation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_TimeEvent_Annotation.Merge(dst, src)
+}
+func (m *Span_TimeEvent_Annotation) XXX_Size() int {
+ return xxx_messageInfo_Span_TimeEvent_Annotation.Size(m)
+}
+func (m *Span_TimeEvent_Annotation) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_TimeEvent_Annotation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_TimeEvent_Annotation proto.InternalMessageInfo
+
+func (m *Span_TimeEvent_Annotation) GetDescription() *TruncatableString {
+ if m != nil {
+ return m.Description
+ }
+ return nil
+}
+
+func (m *Span_TimeEvent_Annotation) GetAttributes() *Span_Attributes {
+ if m != nil {
+ return m.Attributes
+ }
+ return nil
+}
+
+// An event describing a message sent/received between Spans.
+type Span_TimeEvent_MessageEvent struct {
+ // Type of MessageEvent. Indicates whether the message was sent or
+ // received.
+ Type Span_TimeEvent_MessageEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=google.devtools.cloudtrace.v2.Span_TimeEvent_MessageEvent_Type" json:"type,omitempty"`
+ // An identifier for the MessageEvent's message that can be used to match
+ // SENT and RECEIVED MessageEvents. It is recommended to be unique within
+ // a Span.
+ Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
+ // The number of uncompressed bytes sent or received.
+ UncompressedSizeBytes int64 `protobuf:"varint,3,opt,name=uncompressed_size_bytes,json=uncompressedSizeBytes,proto3" json:"uncompressed_size_bytes,omitempty"`
+ // The number of compressed bytes sent or received. If missing assumed to
+ // be the same size as uncompressed.
+ CompressedSizeBytes int64 `protobuf:"varint,4,opt,name=compressed_size_bytes,json=compressedSizeBytes,proto3" json:"compressed_size_bytes,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_TimeEvent_MessageEvent) Reset() { *m = Span_TimeEvent_MessageEvent{} }
+func (m *Span_TimeEvent_MessageEvent) String() string { return proto.CompactTextString(m) }
+func (*Span_TimeEvent_MessageEvent) ProtoMessage() {}
+func (*Span_TimeEvent_MessageEvent) Descriptor() ([]byte, []int) {
+ return fileDescriptor_trace_3b0c20aef2ae619c, []int{0, 1, 1}
+}
+func (m *Span_TimeEvent_MessageEvent) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_TimeEvent_MessageEvent.Unmarshal(m, b)
+}
+func (m *Span_TimeEvent_MessageEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_TimeEvent_MessageEvent.Marshal(b, m, deterministic)
+}
+func (dst *Span_TimeEvent_MessageEvent) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_TimeEvent_MessageEvent.Merge(dst, src)
+}
+func (m *Span_TimeEvent_MessageEvent) XXX_Size() int {
+ return xxx_messageInfo_Span_TimeEvent_MessageEvent.Size(m)
+}
+func (m *Span_TimeEvent_MessageEvent) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_TimeEvent_MessageEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_TimeEvent_MessageEvent proto.InternalMessageInfo
+
+func (m *Span_TimeEvent_MessageEvent) GetType() Span_TimeEvent_MessageEvent_Type {
+ if m != nil {
+ return m.Type
+ }
+ return Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED
+}
+
+func (m *Span_TimeEvent_MessageEvent) GetId() int64 {
+ if m != nil {
+ return m.Id
+ }
+ return 0
+}
+
+func (m *Span_TimeEvent_MessageEvent) GetUncompressedSizeBytes() int64 {
+ if m != nil {
+ return m.UncompressedSizeBytes
+ }
+ return 0
+}
+
+func (m *Span_TimeEvent_MessageEvent) GetCompressedSizeBytes() int64 {
+ if m != nil {
+ return m.CompressedSizeBytes
+ }
+ return 0
+}
+
+// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation
+// on the span, consisting of either user-supplied key:value pairs, or
+// details of a message sent/received between Spans.
+type Span_TimeEvents struct {
+ // A collection of `TimeEvent`s.
+ TimeEvent []*Span_TimeEvent `protobuf:"bytes,1,rep,name=time_event,json=timeEvent,proto3" json:"time_event,omitempty"`
+ // The number of dropped annotations in all the included time events.
+ // If the value is 0, then no annotations were dropped.
+ DroppedAnnotationsCount int32 `protobuf:"varint,2,opt,name=dropped_annotations_count,json=droppedAnnotationsCount,proto3" json:"dropped_annotations_count,omitempty"`
+ // The number of dropped message events in all the included time events.
+ // If the value is 0, then no message events were dropped.
+ DroppedMessageEventsCount int32 `protobuf:"varint,3,opt,name=dropped_message_events_count,json=droppedMessageEventsCount,proto3" json:"dropped_message_events_count,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_TimeEvents) Reset() { *m = Span_TimeEvents{} }
+func (m *Span_TimeEvents) String() string { return proto.CompactTextString(m) }
+func (*Span_TimeEvents) ProtoMessage() {}
+func (*Span_TimeEvents) Descriptor() ([]byte, []int) {
+ return fileDescriptor_trace_3b0c20aef2ae619c, []int{0, 2}
+}
+func (m *Span_TimeEvents) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_TimeEvents.Unmarshal(m, b)
+}
+func (m *Span_TimeEvents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_TimeEvents.Marshal(b, m, deterministic)
+}
+func (dst *Span_TimeEvents) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_TimeEvents.Merge(dst, src)
+}
+func (m *Span_TimeEvents) XXX_Size() int {
+ return xxx_messageInfo_Span_TimeEvents.Size(m)
+}
+func (m *Span_TimeEvents) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_TimeEvents.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_TimeEvents proto.InternalMessageInfo
+
+func (m *Span_TimeEvents) GetTimeEvent() []*Span_TimeEvent {
+ if m != nil {
+ return m.TimeEvent
+ }
+ return nil
+}
+
+func (m *Span_TimeEvents) GetDroppedAnnotationsCount() int32 {
+ if m != nil {
+ return m.DroppedAnnotationsCount
+ }
+ return 0
+}
+
+func (m *Span_TimeEvents) GetDroppedMessageEventsCount() int32 {
+ if m != nil {
+ return m.DroppedMessageEventsCount
+ }
+ return 0
+}
+
+// A pointer from the current span to another span in the same trace or in a
+// different trace. For example, this can be used in batching operations,
+// where a single batch handler processes multiple requests from different
+// traces or when the handler receives a request from a different project.
+type Span_Link struct {
+ // The [TRACE_ID] for a trace within a project.
+ TraceId string `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
+ // The [SPAN_ID] for a span within a trace.
+ SpanId string `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
+ // The relationship of the current span relative to the linked span.
+ Type Span_Link_Type `protobuf:"varint,3,opt,name=type,proto3,enum=google.devtools.cloudtrace.v2.Span_Link_Type" json:"type,omitempty"`
+ // A set of attributes on the link. You have have up to 32 attributes per
+ // link.
+ Attributes *Span_Attributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_Link) Reset() { *m = Span_Link{} }
+func (m *Span_Link) String() string { return proto.CompactTextString(m) }
+func (*Span_Link) ProtoMessage() {}
+func (*Span_Link) Descriptor() ([]byte, []int) {
+ return fileDescriptor_trace_3b0c20aef2ae619c, []int{0, 3}
+}
+func (m *Span_Link) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_Link.Unmarshal(m, b)
+}
+func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic)
+}
+func (dst *Span_Link) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_Link.Merge(dst, src)
+}
+func (m *Span_Link) XXX_Size() int {
+ return xxx_messageInfo_Span_Link.Size(m)
+}
+func (m *Span_Link) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_Link.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_Link proto.InternalMessageInfo
+
+func (m *Span_Link) GetTraceId() string {
+ if m != nil {
+ return m.TraceId
+ }
+ return ""
+}
+
+func (m *Span_Link) GetSpanId() string {
+ if m != nil {
+ return m.SpanId
+ }
+ return ""
+}
+
+func (m *Span_Link) GetType() Span_Link_Type {
+ if m != nil {
+ return m.Type
+ }
+ return Span_Link_TYPE_UNSPECIFIED
+}
+
+func (m *Span_Link) GetAttributes() *Span_Attributes {
+ if m != nil {
+ return m.Attributes
+ }
+ return nil
+}
+
+// A collection of links, which are references from this span to a span
+// in the same or different trace.
+type Span_Links struct {
+ // A collection of links.
+ Link []*Span_Link `protobuf:"bytes,1,rep,name=link,proto3" json:"link,omitempty"`
+ // The number of dropped links after the maximum size was enforced. If
+ // this value is 0, then no links were dropped.
+ DroppedLinksCount int32 `protobuf:"varint,2,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_Links) Reset() { *m = Span_Links{} }
+func (m *Span_Links) String() string { return proto.CompactTextString(m) }
+func (*Span_Links) ProtoMessage() {}
+func (*Span_Links) Descriptor() ([]byte, []int) {
+ return fileDescriptor_trace_3b0c20aef2ae619c, []int{0, 4}
+}
+func (m *Span_Links) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_Links.Unmarshal(m, b)
+}
+func (m *Span_Links) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_Links.Marshal(b, m, deterministic)
+}
+func (dst *Span_Links) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_Links.Merge(dst, src)
+}
+func (m *Span_Links) XXX_Size() int {
+ return xxx_messageInfo_Span_Links.Size(m)
+}
+func (m *Span_Links) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_Links.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_Links proto.InternalMessageInfo
+
+func (m *Span_Links) GetLink() []*Span_Link {
+ if m != nil {
+ return m.Link
+ }
+ return nil
+}
+
+func (m *Span_Links) GetDroppedLinksCount() int32 {
+ if m != nil {
+ return m.DroppedLinksCount
+ }
+ return 0
+}
+
+// The allowed types for [VALUE] in a `[KEY]:[VALUE]` attribute.
+type AttributeValue struct {
+ // The type of the value.
+ //
+ // Types that are valid to be assigned to Value:
+ // *AttributeValue_StringValue
+ // *AttributeValue_IntValue
+ // *AttributeValue_BoolValue
+ Value isAttributeValue_Value `protobuf_oneof:"value"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AttributeValue) Reset() { *m = AttributeValue{} }
+func (m *AttributeValue) String() string { return proto.CompactTextString(m) }
+func (*AttributeValue) ProtoMessage() {}
+func (*AttributeValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_trace_3b0c20aef2ae619c, []int{1}
+}
+func (m *AttributeValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AttributeValue.Unmarshal(m, b)
+}
+func (m *AttributeValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AttributeValue.Marshal(b, m, deterministic)
+}
+func (dst *AttributeValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AttributeValue.Merge(dst, src)
+}
+func (m *AttributeValue) XXX_Size() int {
+ return xxx_messageInfo_AttributeValue.Size(m)
+}
+func (m *AttributeValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_AttributeValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AttributeValue proto.InternalMessageInfo
+
+type isAttributeValue_Value interface {
+ isAttributeValue_Value()
+}
+
+type AttributeValue_StringValue struct {
+ StringValue *TruncatableString `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type AttributeValue_IntValue struct {
+ IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"`
+}
+
+type AttributeValue_BoolValue struct {
+ BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+func (*AttributeValue_StringValue) isAttributeValue_Value() {}
+
+func (*AttributeValue_IntValue) isAttributeValue_Value() {}
+
+func (*AttributeValue_BoolValue) isAttributeValue_Value() {}
+
+func (m *AttributeValue) GetValue() isAttributeValue_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *AttributeValue) GetStringValue() *TruncatableString {
+ if x, ok := m.GetValue().(*AttributeValue_StringValue); ok {
+ return x.StringValue
+ }
+ return nil
+}
+
+func (m *AttributeValue) GetIntValue() int64 {
+ if x, ok := m.GetValue().(*AttributeValue_IntValue); ok {
+ return x.IntValue
+ }
+ return 0
+}
+
+func (m *AttributeValue) GetBoolValue() bool {
+ if x, ok := m.GetValue().(*AttributeValue_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*AttributeValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _AttributeValue_OneofMarshaler, _AttributeValue_OneofUnmarshaler, _AttributeValue_OneofSizer, []interface{}{
+ (*AttributeValue_StringValue)(nil),
+ (*AttributeValue_IntValue)(nil),
+ (*AttributeValue_BoolValue)(nil),
+ }
+}
+
+func _AttributeValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*AttributeValue)
+ // value
+ switch x := m.Value.(type) {
+ case *AttributeValue_StringValue:
+ b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.StringValue); err != nil {
+ return err
+ }
+ case *AttributeValue_IntValue:
+ b.EncodeVarint(2<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.IntValue))
+ case *AttributeValue_BoolValue:
+ t := uint64(0)
+ if x.BoolValue {
+ t = 1
+ }
+ b.EncodeVarint(3<<3 | proto.WireVarint)
+ b.EncodeVarint(t)
+ case nil:
+ default:
+ return fmt.Errorf("AttributeValue.Value has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _AttributeValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*AttributeValue)
+ switch tag {
+ case 1: // value.string_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(TruncatableString)
+ err := b.DecodeMessage(msg)
+ m.Value = &AttributeValue_StringValue{msg}
+ return true, err
+ case 2: // value.int_value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Value = &AttributeValue_IntValue{int64(x)}
+ return true, err
+ case 3: // value.bool_value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Value = &AttributeValue_BoolValue{x != 0}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _AttributeValue_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*AttributeValue)
+ // value
+ switch x := m.Value.(type) {
+ case *AttributeValue_StringValue:
+ s := proto.Size(x.StringValue)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *AttributeValue_IntValue:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(x.IntValue))
+ case *AttributeValue_BoolValue:
+ n += 1 // tag and wire
+ n += 1
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// A call stack appearing in a trace.
+type StackTrace struct {
+ // Stack frames in this stack trace. A maximum of 128 frames are allowed.
+ StackFrames *StackTrace_StackFrames `protobuf:"bytes,1,opt,name=stack_frames,json=stackFrames,proto3" json:"stack_frames,omitempty"`
+ // The hash ID is used to conserve network bandwidth for duplicate
+ // stack traces within a single trace.
+ //
+ // Often multiple spans will have identical stack traces.
+ // The first occurrence of a stack trace should contain both the
+ // `stackFrame` content and a value in `stackTraceHashId`.
+ //
+ // Subsequent spans within the same request can refer
+ // to that stack trace by only setting `stackTraceHashId`.
+ StackTraceHashId int64 `protobuf:"varint,2,opt,name=stack_trace_hash_id,json=stackTraceHashId,proto3" json:"stack_trace_hash_id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *StackTrace) Reset() { *m = StackTrace{} }
+func (m *StackTrace) String() string { return proto.CompactTextString(m) }
+func (*StackTrace) ProtoMessage() {}
+func (*StackTrace) Descriptor() ([]byte, []int) {
+ return fileDescriptor_trace_3b0c20aef2ae619c, []int{2}
+}
+func (m *StackTrace) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StackTrace.Unmarshal(m, b)
+}
+func (m *StackTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StackTrace.Marshal(b, m, deterministic)
+}
+func (dst *StackTrace) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StackTrace.Merge(dst, src)
+}
+func (m *StackTrace) XXX_Size() int {
+ return xxx_messageInfo_StackTrace.Size(m)
+}
+func (m *StackTrace) XXX_DiscardUnknown() {
+ xxx_messageInfo_StackTrace.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StackTrace proto.InternalMessageInfo
+
+func (m *StackTrace) GetStackFrames() *StackTrace_StackFrames {
+ if m != nil {
+ return m.StackFrames
+ }
+ return nil
+}
+
+func (m *StackTrace) GetStackTraceHashId() int64 {
+ if m != nil {
+ return m.StackTraceHashId
+ }
+ return 0
+}
+
+// Represents a single stack frame in a stack trace.
+type StackTrace_StackFrame struct {
+ // The fully-qualified name that uniquely identifies the function or
+ // method that is active in this frame (up to 1024 bytes).
+ FunctionName *TruncatableString `protobuf:"bytes,1,opt,name=function_name,json=functionName,proto3" json:"function_name,omitempty"`
+ // An un-mangled function name, if `function_name` is
+ // [mangled](http://www.avabodh.com/cxxin/namemangling.html). The name can
+ // be fully-qualified (up to 1024 bytes).
+ OriginalFunctionName *TruncatableString `protobuf:"bytes,2,opt,name=original_function_name,json=originalFunctionName,proto3" json:"original_function_name,omitempty"`
+ // The name of the source file where the function call appears (up to 256
+ // bytes).
+ FileName *TruncatableString `protobuf:"bytes,3,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"`
+ // The line number in `file_name` where the function call appears.
+ LineNumber int64 `protobuf:"varint,4,opt,name=line_number,json=lineNumber,proto3" json:"line_number,omitempty"`
+ // The column number where the function call appears, if available.
+ // This is important in JavaScript because of its anonymous functions.
+ ColumnNumber int64 `protobuf:"varint,5,opt,name=column_number,json=columnNumber,proto3" json:"column_number,omitempty"`
+ // The binary module from where the code was loaded.
+ LoadModule *Module `protobuf:"bytes,6,opt,name=load_module,json=loadModule,proto3" json:"load_module,omitempty"`
+ // The version of the deployed source code (up to 128 bytes).
+ SourceVersion *TruncatableString `protobuf:"bytes,7,opt,name=source_version,json=sourceVersion,proto3" json:"source_version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *StackTrace_StackFrame) Reset() { *m = StackTrace_StackFrame{} }
+func (m *StackTrace_StackFrame) String() string { return proto.CompactTextString(m) }
+func (*StackTrace_StackFrame) ProtoMessage() {}
+func (*StackTrace_StackFrame) Descriptor() ([]byte, []int) {
+ return fileDescriptor_trace_3b0c20aef2ae619c, []int{2, 0}
+}
+func (m *StackTrace_StackFrame) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StackTrace_StackFrame.Unmarshal(m, b)
+}
+func (m *StackTrace_StackFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StackTrace_StackFrame.Marshal(b, m, deterministic)
+}
+func (dst *StackTrace_StackFrame) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StackTrace_StackFrame.Merge(dst, src)
+}
+func (m *StackTrace_StackFrame) XXX_Size() int {
+ return xxx_messageInfo_StackTrace_StackFrame.Size(m)
+}
+func (m *StackTrace_StackFrame) XXX_DiscardUnknown() {
+ xxx_messageInfo_StackTrace_StackFrame.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StackTrace_StackFrame proto.InternalMessageInfo
+
+func (m *StackTrace_StackFrame) GetFunctionName() *TruncatableString {
+ if m != nil {
+ return m.FunctionName
+ }
+ return nil
+}
+
+func (m *StackTrace_StackFrame) GetOriginalFunctionName() *TruncatableString {
+ if m != nil {
+ return m.OriginalFunctionName
+ }
+ return nil
+}
+
+func (m *StackTrace_StackFrame) GetFileName() *TruncatableString {
+ if m != nil {
+ return m.FileName
+ }
+ return nil
+}
+
+func (m *StackTrace_StackFrame) GetLineNumber() int64 {
+ if m != nil {
+ return m.LineNumber
+ }
+ return 0
+}
+
+func (m *StackTrace_StackFrame) GetColumnNumber() int64 {
+ if m != nil {
+ return m.ColumnNumber
+ }
+ return 0
+}
+
+func (m *StackTrace_StackFrame) GetLoadModule() *Module {
+ if m != nil {
+ return m.LoadModule
+ }
+ return nil
+}
+
+func (m *StackTrace_StackFrame) GetSourceVersion() *TruncatableString {
+ if m != nil {
+ return m.SourceVersion
+ }
+ return nil
+}
+
+// A collection of stack frames, which can be truncated.
+type StackTrace_StackFrames struct {
+ // Stack frames in this call stack.
+ Frame []*StackTrace_StackFrame `protobuf:"bytes,1,rep,name=frame,proto3" json:"frame,omitempty"`
+ // The number of stack frames that were dropped because there
+ // were too many stack frames.
+ // If this value is 0, then no stack frames were dropped.
+ DroppedFramesCount int32 `protobuf:"varint,2,opt,name=dropped_frames_count,json=droppedFramesCount,proto3" json:"dropped_frames_count,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *StackTrace_StackFrames) Reset() { *m = StackTrace_StackFrames{} }
+func (m *StackTrace_StackFrames) String() string { return proto.CompactTextString(m) }
+func (*StackTrace_StackFrames) ProtoMessage() {}
+func (*StackTrace_StackFrames) Descriptor() ([]byte, []int) {
+ return fileDescriptor_trace_3b0c20aef2ae619c, []int{2, 1}
+}
+func (m *StackTrace_StackFrames) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StackTrace_StackFrames.Unmarshal(m, b)
+}
+func (m *StackTrace_StackFrames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StackTrace_StackFrames.Marshal(b, m, deterministic)
+}
+func (dst *StackTrace_StackFrames) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StackTrace_StackFrames.Merge(dst, src)
+}
+func (m *StackTrace_StackFrames) XXX_Size() int {
+ return xxx_messageInfo_StackTrace_StackFrames.Size(m)
+}
+func (m *StackTrace_StackFrames) XXX_DiscardUnknown() {
+ xxx_messageInfo_StackTrace_StackFrames.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StackTrace_StackFrames proto.InternalMessageInfo
+
+func (m *StackTrace_StackFrames) GetFrame() []*StackTrace_StackFrame {
+ if m != nil {
+ return m.Frame
+ }
+ return nil
+}
+
+func (m *StackTrace_StackFrames) GetDroppedFramesCount() int32 {
+ if m != nil {
+ return m.DroppedFramesCount
+ }
+ return 0
+}
+
+// Binary module.
+type Module struct {
+ // For example: main binary, kernel modules, and dynamic libraries
+ // such as libc.so, sharedlib.so (up to 256 bytes).
+ Module *TruncatableString `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"`
+ // A unique identifier for the module, usually a hash of its
+ // contents (up to 128 bytes).
+ BuildId *TruncatableString `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Module) Reset() { *m = Module{} }
+func (m *Module) String() string { return proto.CompactTextString(m) }
+func (*Module) ProtoMessage() {}
+func (*Module) Descriptor() ([]byte, []int) {
+ return fileDescriptor_trace_3b0c20aef2ae619c, []int{3}
+}
+func (m *Module) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Module.Unmarshal(m, b)
+}
+func (m *Module) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Module.Marshal(b, m, deterministic)
+}
+func (dst *Module) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Module.Merge(dst, src)
+}
+func (m *Module) XXX_Size() int {
+ return xxx_messageInfo_Module.Size(m)
+}
+func (m *Module) XXX_DiscardUnknown() {
+ xxx_messageInfo_Module.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Module proto.InternalMessageInfo
+
+func (m *Module) GetModule() *TruncatableString {
+ if m != nil {
+ return m.Module
+ }
+ return nil
+}
+
+func (m *Module) GetBuildId() *TruncatableString {
+ if m != nil {
+ return m.BuildId
+ }
+ return nil
+}
+
+// Represents a string that might be shortened to a specified length.
+type TruncatableString struct {
+ // The shortened string. For example, if the original string is 500
+ // bytes long and the limit of the string is 128 bytes, then
+ // `value` contains the first 128 bytes of the 500-byte string.
+ //
+ // Truncation always happens on a UTF8 character boundary. If there
+ // are multi-byte characters in the string, then the length of the
+ // shortened string might be less than the size limit.
+ Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ // The number of bytes removed from the original string. If this
+ // value is 0, then the string was not shortened.
+ TruncatedByteCount int32 `protobuf:"varint,2,opt,name=truncated_byte_count,json=truncatedByteCount,proto3" json:"truncated_byte_count,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *TruncatableString) Reset() { *m = TruncatableString{} }
+func (m *TruncatableString) String() string { return proto.CompactTextString(m) }
+func (*TruncatableString) ProtoMessage() {}
+func (*TruncatableString) Descriptor() ([]byte, []int) {
+ return fileDescriptor_trace_3b0c20aef2ae619c, []int{4}
+}
+func (m *TruncatableString) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_TruncatableString.Unmarshal(m, b)
+}
+func (m *TruncatableString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_TruncatableString.Marshal(b, m, deterministic)
+}
+func (dst *TruncatableString) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TruncatableString.Merge(dst, src)
+}
+func (m *TruncatableString) XXX_Size() int {
+ return xxx_messageInfo_TruncatableString.Size(m)
+}
+func (m *TruncatableString) XXX_DiscardUnknown() {
+ xxx_messageInfo_TruncatableString.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TruncatableString proto.InternalMessageInfo
+
+func (m *TruncatableString) GetValue() string {
+ if m != nil {
+ return m.Value
+ }
+ return ""
+}
+
+func (m *TruncatableString) GetTruncatedByteCount() int32 {
+ if m != nil {
+ return m.TruncatedByteCount
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*Span)(nil), "google.devtools.cloudtrace.v2.Span")
+ proto.RegisterType((*Span_Attributes)(nil), "google.devtools.cloudtrace.v2.Span.Attributes")
+ proto.RegisterMapType((map[string]*AttributeValue)(nil), "google.devtools.cloudtrace.v2.Span.Attributes.AttributeMapEntry")
+ proto.RegisterType((*Span_TimeEvent)(nil), "google.devtools.cloudtrace.v2.Span.TimeEvent")
+ proto.RegisterType((*Span_TimeEvent_Annotation)(nil), "google.devtools.cloudtrace.v2.Span.TimeEvent.Annotation")
+ proto.RegisterType((*Span_TimeEvent_MessageEvent)(nil), "google.devtools.cloudtrace.v2.Span.TimeEvent.MessageEvent")
+ proto.RegisterType((*Span_TimeEvents)(nil), "google.devtools.cloudtrace.v2.Span.TimeEvents")
+ proto.RegisterType((*Span_Link)(nil), "google.devtools.cloudtrace.v2.Span.Link")
+ proto.RegisterType((*Span_Links)(nil), "google.devtools.cloudtrace.v2.Span.Links")
+ proto.RegisterType((*AttributeValue)(nil), "google.devtools.cloudtrace.v2.AttributeValue")
+ proto.RegisterType((*StackTrace)(nil), "google.devtools.cloudtrace.v2.StackTrace")
+ proto.RegisterType((*StackTrace_StackFrame)(nil), "google.devtools.cloudtrace.v2.StackTrace.StackFrame")
+ proto.RegisterType((*StackTrace_StackFrames)(nil), "google.devtools.cloudtrace.v2.StackTrace.StackFrames")
+ proto.RegisterType((*Module)(nil), "google.devtools.cloudtrace.v2.Module")
+ proto.RegisterType((*TruncatableString)(nil), "google.devtools.cloudtrace.v2.TruncatableString")
+ proto.RegisterEnum("google.devtools.cloudtrace.v2.Span_TimeEvent_MessageEvent_Type", Span_TimeEvent_MessageEvent_Type_name, Span_TimeEvent_MessageEvent_Type_value)
+ proto.RegisterEnum("google.devtools.cloudtrace.v2.Span_Link_Type", Span_Link_Type_name, Span_Link_Type_value)
+}
+
+func init() {
+ proto.RegisterFile("google/devtools/cloudtrace/v2/trace.proto", fileDescriptor_trace_3b0c20aef2ae619c)
+}
+
+var fileDescriptor_trace_3b0c20aef2ae619c = []byte{
+ // 1425 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x4b, 0x6f, 0xdb, 0xc6,
+ 0x16, 0x36, 0xf5, 0xd6, 0x91, 0x6c, 0xc8, 0x13, 0x3b, 0x56, 0x94, 0xe4, 0x26, 0xd7, 0xf7, 0x16,
+ 0x70, 0x0a, 0x98, 0x0a, 0x94, 0xa4, 0x48, 0xd3, 0x02, 0xa9, 0x1f, 0x72, 0xa4, 0xc4, 0x56, 0x05,
+ 0x4a, 0x71, 0xd3, 0x34, 0x00, 0x31, 0x22, 0xc7, 0x32, 0x11, 0x8a, 0x24, 0x38, 0x43, 0x17, 0xce,
+ 0xae, 0xeb, 0xae, 0xbb, 0x29, 0x50, 0x74, 0x59, 0x20, 0xab, 0xfc, 0x8e, 0x2e, 0xba, 0xed, 0x7f,
+ 0xe9, 0xaa, 0x98, 0x07, 0x49, 0x29, 0x2f, 0xdb, 0xca, 0x6e, 0x66, 0xce, 0xf9, 0x3e, 0x9e, 0x33,
+ 0x73, 0x5e, 0x84, 0x5b, 0x63, 0xdf, 0x1f, 0xbb, 0xa4, 0x69, 0x93, 0x13, 0xe6, 0xfb, 0x2e, 0x6d,
+ 0x5a, 0xae, 0x1f, 0xd9, 0x2c, 0xc4, 0x16, 0x69, 0x9e, 0xb4, 0x9a, 0x62, 0xa1, 0x07, 0xa1, 0xcf,
+ 0x7c, 0x74, 0x5d, 0xaa, 0xea, 0xb1, 0xaa, 0x9e, 0xaa, 0xea, 0x27, 0xad, 0xc6, 0x35, 0xc5, 0x84,
+ 0x03, 0xa7, 0x89, 0x3d, 0xcf, 0x67, 0x98, 0x39, 0xbe, 0x47, 0x25, 0xb8, 0x71, 0x43, 0x49, 0xc5,
+ 0x6e, 0x14, 0x1d, 0x35, 0x99, 0x33, 0x21, 0x94, 0xe1, 0x49, 0xa0, 0x14, 0xfe, 0xf3, 0xb6, 0xc2,
+ 0x8f, 0x21, 0x0e, 0x02, 0x12, 0xc6, 0x04, 0x6b, 0x4a, 0x1e, 0x06, 0x56, 0x93, 0x32, 0xcc, 0x22,
+ 0x25, 0x58, 0xff, 0x07, 0x41, 0x6e, 0x10, 0x60, 0x0f, 0x21, 0xc8, 0x79, 0x78, 0x42, 0xea, 0xda,
+ 0x4d, 0x6d, 0xa3, 0x6c, 0x88, 0x35, 0x5a, 0x83, 0x22, 0x0d, 0xb0, 0x67, 0x3a, 0x76, 0x3d, 0x23,
+ 0x8e, 0x0b, 0x7c, 0xdb, 0xb5, 0xd1, 0xff, 0x61, 0x29, 0xc0, 0x21, 0xf1, 0x98, 0x19, 0xcb, 0xb3,
+ 0x42, 0x5e, 0x95, 0xa7, 0x03, 0xa9, 0x35, 0x80, 0xaa, 0xed, 0xd0, 0xc0, 0xc5, 0xa7, 0xa6, 0xa0,
+ 0xce, 0xdd, 0xd4, 0x36, 0x2a, 0xad, 0xdb, 0xfa, 0x47, 0x6f, 0x42, 0x1f, 0x86, 0x91, 0x67, 0x61,
+ 0x86, 0x47, 0x2e, 0x19, 0xb0, 0xd0, 0xf1, 0xc6, 0x46, 0x45, 0xb1, 0xf4, 0xb8, 0x4d, 0x5f, 0x02,
+ 0x50, 0x86, 0x43, 0x66, 0xf2, 0x2b, 0xa8, 0xe7, 0x05, 0x65, 0x23, 0xa6, 0x8c, 0xdd, 0xd7, 0x87,
+ 0xf1, 0xfd, 0x18, 0x65, 0xa1, 0xcd, 0xf7, 0xe8, 0x1e, 0x94, 0x88, 0x67, 0x4b, 0x60, 0xe1, 0x4c,
+ 0x60, 0x91, 0x78, 0xb6, 0x80, 0xf5, 0x00, 0x30, 0x63, 0xa1, 0x33, 0x8a, 0x18, 0xa1, 0xf5, 0xa2,
+ 0x00, 0xea, 0x67, 0x38, 0xc1, 0x6f, 0x40, 0xdf, 0x4a, 0x50, 0xc6, 0x14, 0x03, 0x7a, 0x0c, 0x15,
+ 0xca, 0xb0, 0xf5, 0xd2, 0x14, 0xda, 0xf5, 0x92, 0x20, 0xbc, 0x75, 0x16, 0x21, 0x47, 0x0c, 0xf9,
+ 0xce, 0x00, 0x9a, 0xac, 0xd1, 0xb7, 0x50, 0xe1, 0xee, 0x98, 0xe4, 0x84, 0x78, 0x8c, 0xd6, 0xcb,
+ 0xe7, 0x37, 0x8e, 0xbb, 0xd6, 0x16, 0x28, 0x03, 0x58, 0xb2, 0x46, 0x0f, 0x21, 0xef, 0x3a, 0xde,
+ 0x4b, 0x5a, 0x87, 0xf3, 0x99, 0xc5, 0xa9, 0xf6, 0x39, 0xc0, 0x90, 0x38, 0xf4, 0x39, 0x14, 0x64,
+ 0x80, 0xd5, 0x2b, 0x82, 0x01, 0xc5, 0x0c, 0x61, 0x60, 0x71, 0x2f, 0x58, 0x44, 0x0d, 0xa5, 0x81,
+ 0x9e, 0xc1, 0x55, 0x8a, 0x27, 0xc4, 0x0c, 0x42, 0xdf, 0x22, 0x94, 0x9a, 0x98, 0x9a, 0x53, 0x61,
+ 0x55, 0xaf, 0x7e, 0xe0, 0x8d, 0xb6, 0x7d, 0xdf, 0x3d, 0xc4, 0x6e, 0x44, 0x8c, 0x35, 0x0e, 0xef,
+ 0x4b, 0xf4, 0x16, 0xed, 0x27, 0xc1, 0x87, 0xda, 0x50, 0xb3, 0x8e, 0x1d, 0xd7, 0x96, 0xf1, 0x69,
+ 0xf9, 0x91, 0xc7, 0xea, 0x8b, 0x82, 0xee, 0xea, 0x3b, 0x74, 0x5d, 0x8f, 0xdd, 0x69, 0x49, 0xbe,
+ 0x25, 0x01, 0xe2, 0x0c, 0x3b, 0x1c, 0xd2, 0xf8, 0x2d, 0x03, 0x90, 0xbe, 0x22, 0x22, 0xb0, 0x98,
+ 0xbc, 0xa3, 0x39, 0xc1, 0x41, 0x5d, 0xbb, 0x99, 0xdd, 0xa8, 0xb4, 0xbe, 0xb9, 0x58, 0x30, 0xa4,
+ 0xcb, 0x03, 0x1c, 0xb4, 0x3d, 0x16, 0x9e, 0x1a, 0x55, 0x3c, 0x75, 0x84, 0xee, 0x43, 0xdd, 0x0e,
+ 0xfd, 0x20, 0x20, 0xb6, 0x99, 0x86, 0x8d, 0x72, 0x82, 0xe7, 0x61, 0xde, 0xb8, 0xac, 0xe4, 0x29,
+ 0xa9, 0xb4, 0xd7, 0x83, 0xe5, 0x77, 0xc8, 0x51, 0x0d, 0xb2, 0x2f, 0xc9, 0xa9, 0x4a, 0x6c, 0xbe,
+ 0x44, 0x3b, 0x90, 0x3f, 0xe1, 0xfe, 0x0a, 0xb6, 0x4a, 0x6b, 0xf3, 0x0c, 0xfb, 0x13, 0x4a, 0x79,
+ 0x49, 0x12, 0xfb, 0x20, 0x73, 0x5f, 0x6b, 0xfc, 0x95, 0x87, 0x72, 0x12, 0x48, 0x48, 0x87, 0x9c,
+ 0xc8, 0x2d, 0xed, 0xcc, 0xdc, 0x12, 0x7a, 0xe8, 0x39, 0x40, 0x5a, 0xea, 0x94, 0x2d, 0xf7, 0x2f,
+ 0x14, 0xbb, 0xfa, 0x56, 0x82, 0xef, 0x2c, 0x18, 0x53, 0x6c, 0x08, 0xc3, 0xe2, 0x84, 0x50, 0x8a,
+ 0xc7, 0x2a, 0x37, 0x44, 0x81, 0xaa, 0xb4, 0x1e, 0x5c, 0x8c, 0xfe, 0x40, 0x52, 0x88, 0x4d, 0x67,
+ 0xc1, 0xa8, 0x4e, 0xa6, 0xf6, 0x8d, 0x37, 0x1a, 0x40, 0xfa, 0x7d, 0x64, 0x40, 0xc5, 0x26, 0xd4,
+ 0x0a, 0x9d, 0x40, 0xb8, 0xa3, 0xcd, 0x5d, 0xec, 0x52, 0x92, 0xb7, 0x4a, 0x4f, 0xe6, 0x53, 0x4b,
+ 0x4f, 0xe3, 0x97, 0x0c, 0x54, 0xa7, 0x7d, 0x42, 0x03, 0xc8, 0xb1, 0xd3, 0x40, 0x3e, 0xd9, 0x52,
+ 0xeb, 0xe1, 0xfc, 0xb7, 0xa3, 0x0f, 0x4f, 0x03, 0x62, 0x08, 0x32, 0xb4, 0x04, 0x19, 0xd5, 0x31,
+ 0xb2, 0x46, 0xc6, 0xb1, 0xd1, 0x17, 0xb0, 0x16, 0x79, 0x96, 0x3f, 0x09, 0x42, 0x42, 0x29, 0xb1,
+ 0x4d, 0xea, 0xbc, 0x22, 0xe6, 0xe8, 0x94, 0xbb, 0x94, 0x15, 0x4a, 0xab, 0xd3, 0xe2, 0x81, 0xf3,
+ 0x8a, 0x6c, 0x73, 0x21, 0x6a, 0xc1, 0xea, 0xfb, 0x51, 0x39, 0x81, 0xba, 0xf4, 0x1e, 0xcc, 0xfa,
+ 0x5d, 0xc8, 0x71, 0x4b, 0xd0, 0x0a, 0xd4, 0x86, 0xdf, 0xf7, 0xdb, 0xe6, 0xd3, 0xde, 0xa0, 0xdf,
+ 0xde, 0xe9, 0xee, 0x75, 0xdb, 0xbb, 0xb5, 0x05, 0x54, 0x82, 0xdc, 0xa0, 0xdd, 0x1b, 0xd6, 0x34,
+ 0x54, 0x85, 0x92, 0xd1, 0xde, 0x69, 0x77, 0x0f, 0xdb, 0xbb, 0xb5, 0xcc, 0x76, 0x51, 0x25, 0x44,
+ 0xe3, 0x6f, 0x0d, 0x20, 0xad, 0x8c, 0x68, 0x1f, 0x20, 0x2d, 0xaf, 0x2a, 0xdb, 0x37, 0x2f, 0x74,
+ 0x49, 0x46, 0x39, 0x29, 0xae, 0xe8, 0x01, 0x5c, 0x49, 0xf2, 0x3a, 0x6d, 0xf1, 0x33, 0x89, 0xbd,
+ 0x16, 0x27, 0x76, 0x2a, 0x17, 0x99, 0x8d, 0x1e, 0xc2, 0xb5, 0x18, 0x3b, 0x13, 0xd7, 0x31, 0x3c,
+ 0x2b, 0xe0, 0x31, 0xff, 0xf4, 0xcb, 0xa8, 0xd2, 0xf0, 0x6b, 0x06, 0x72, 0xbc, 0x50, 0xa3, 0x2b,
+ 0x50, 0x12, 0xb6, 0xf2, 0xae, 0x2d, 0x6b, 0x42, 0x51, 0xec, 0xbb, 0xf6, 0x87, 0xfb, 0xfd, 0x96,
+ 0x0a, 0x93, 0xac, 0x08, 0x93, 0xcd, 0xf3, 0x36, 0x85, 0xe9, 0xa0, 0x98, 0x0d, 0xe5, 0xdc, 0xa7,
+ 0x86, 0xf2, 0xfa, 0x93, 0x8f, 0x3e, 0xf4, 0x2a, 0x2c, 0xef, 0x74, 0xba, 0xfb, 0xbb, 0xe6, 0x7e,
+ 0xb7, 0xf7, 0xa4, 0xbd, 0x6b, 0x0e, 0xfa, 0x5b, 0xbd, 0x9a, 0x86, 0x2e, 0x03, 0xea, 0x6f, 0x19,
+ 0xed, 0xde, 0x70, 0xe6, 0x3c, 0xd3, 0x88, 0x20, 0x2f, 0x9a, 0x18, 0xfa, 0x1a, 0x72, 0xbc, 0x8d,
+ 0xa9, 0xa7, 0xde, 0x38, 0xaf, 0xa3, 0x86, 0x40, 0x21, 0x1d, 0x2e, 0xc5, 0x8f, 0x24, 0x9a, 0xe1,
+ 0xcc, 0xd3, 0x2e, 0x2b, 0x91, 0xf8, 0x90, 0x78, 0x93, 0xf5, 0x37, 0x1a, 0x2c, 0xcd, 0x16, 0x57,
+ 0xf4, 0x14, 0xaa, 0x54, 0x14, 0x02, 0x53, 0x56, 0xe8, 0x39, 0xcb, 0x48, 0x67, 0xc1, 0xa8, 0x48,
+ 0x1e, 0x49, 0x7b, 0x1d, 0xca, 0x8e, 0xc7, 0xcc, 0xb4, 0xea, 0x67, 0x3b, 0x0b, 0x46, 0xc9, 0xf1,
+ 0x98, 0x14, 0xdf, 0x00, 0x18, 0xf9, 0xbe, 0xab, 0xe4, 0xfc, 0x95, 0x4b, 0x9d, 0x05, 0xa3, 0x3c,
+ 0x8a, 0x1b, 0x6d, 0x92, 0x20, 0xeb, 0x7f, 0x14, 0x00, 0xd2, 0x59, 0x04, 0x3d, 0xe3, 0xe6, 0xf2,
+ 0x59, 0xe6, 0x28, 0xc4, 0x13, 0x42, 0x95, 0xb9, 0xf7, 0xce, 0x3d, 0xcc, 0xc8, 0xe5, 0x9e, 0x00,
+ 0x1b, 0x72, 0x2c, 0x92, 0x1b, 0xb4, 0x09, 0x97, 0xa6, 0xa6, 0x24, 0xf3, 0x18, 0xd3, 0x63, 0x33,
+ 0xa9, 0x2a, 0xb5, 0x74, 0x04, 0xea, 0x60, 0x7a, 0xdc, 0xb5, 0x1b, 0x3f, 0xe5, 0x94, 0x5d, 0x02,
+ 0x8e, 0x9e, 0xc2, 0xe2, 0x51, 0xe4, 0x59, 0x3c, 0x81, 0xcc, 0x64, 0xac, 0x9d, 0xa7, 0x1c, 0x57,
+ 0x63, 0x1a, 0x31, 0x7c, 0x1e, 0xc1, 0x65, 0x3f, 0x74, 0xc6, 0x8e, 0x87, 0x5d, 0x73, 0x96, 0x3f,
+ 0x33, 0x27, 0xff, 0x4a, 0xcc, 0xb7, 0x37, 0xfd, 0x9d, 0x03, 0x28, 0x1f, 0x39, 0x2e, 0x91, 0xd4,
+ 0xd9, 0x39, 0xa9, 0x4b, 0x9c, 0x42, 0xd0, 0xdd, 0x80, 0x8a, 0xeb, 0x78, 0xc4, 0xf4, 0xa2, 0xc9,
+ 0x88, 0x84, 0xaa, 0x7c, 0x02, 0x3f, 0xea, 0x89, 0x13, 0xf4, 0x3f, 0x58, 0xb4, 0x7c, 0x37, 0x9a,
+ 0x78, 0xb1, 0x4a, 0x5e, 0xa8, 0x54, 0xe5, 0xa1, 0x52, 0xda, 0x83, 0x8a, 0xeb, 0x63, 0xdb, 0x9c,
+ 0xf8, 0x76, 0xe4, 0xc6, 0x13, 0xf4, 0x67, 0x67, 0x98, 0x75, 0x20, 0x94, 0x0d, 0xe0, 0x48, 0xb9,
+ 0x46, 0xdf, 0xc1, 0x12, 0xf5, 0xa3, 0xd0, 0x22, 0xe6, 0x09, 0x09, 0x29, 0xef, 0x95, 0xc5, 0x39,
+ 0x3d, 0x5c, 0x94, 0x3c, 0x87, 0x92, 0xa6, 0xf1, 0xb3, 0x06, 0x95, 0xa9, 0x78, 0x42, 0x8f, 0x21,
+ 0x2f, 0xc2, 0x52, 0x65, 0xf3, 0xdd, 0x79, 0xa2, 0xd2, 0x90, 0x14, 0xe8, 0x36, 0xac, 0xc4, 0xa9,
+ 0x2d, 0x43, 0x7d, 0x26, 0xb7, 0x91, 0x92, 0xc9, 0x0f, 0xcb, 0xe4, 0xfe, 0x5d, 0x83, 0x82, 0xf2,
+ 0xb8, 0x03, 0x05, 0x75, 0x69, 0xf3, 0x86, 0xa1, 0xc2, 0xa3, 0x27, 0x50, 0x1a, 0x45, 0x7c, 0xae,
+ 0x55, 0xa9, 0x30, 0x0f, 0x57, 0x51, 0x30, 0x74, 0xed, 0xf5, 0x1f, 0x60, 0xf9, 0x1d, 0x29, 0x5a,
+ 0x89, 0x67, 0x43, 0xd9, 0x1b, 0xe4, 0x86, 0xbb, 0xcf, 0xa4, 0x2a, 0xb1, 0x45, 0x13, 0x9e, 0x75,
+ 0x3f, 0x91, 0xf1, 0x26, 0x2c, 0xdc, 0xdf, 0x7e, 0xad, 0xc1, 0x7f, 0x2d, 0x7f, 0xf2, 0x71, 0xeb,
+ 0xb6, 0x41, 0xdc, 0x77, 0x9f, 0x4f, 0x88, 0x7d, 0xed, 0xf9, 0x23, 0xa5, 0x3c, 0xf6, 0x5d, 0xec,
+ 0x8d, 0x75, 0x3f, 0x1c, 0x37, 0xc7, 0xc4, 0x13, 0xf3, 0x63, 0x53, 0x8a, 0x70, 0xe0, 0xd0, 0x0f,
+ 0xfc, 0x6d, 0x7f, 0x95, 0xee, 0x5e, 0x67, 0x56, 0x1f, 0x49, 0xa6, 0x1d, 0x7e, 0xa6, 0xcb, 0x47,
+ 0x3d, 0x6c, 0xfd, 0x19, 0x9f, 0xbf, 0x10, 0xe7, 0x2f, 0xc4, 0xf9, 0x8b, 0xc3, 0xd6, 0xa8, 0x20,
+ 0xbe, 0x71, 0xe7, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x3c, 0x8a, 0x77, 0xd0, 0x0f, 0x00,
+ 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go
new file mode 100644
index 00000000..6fe2b44c
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go
@@ -0,0 +1,227 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/devtools/cloudtrace/v2/tracing.proto
+
+package cloudtrace // import "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2"
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import empty "github.com/golang/protobuf/ptypes/empty"
+import _ "github.com/golang/protobuf/ptypes/timestamp"
+import _ "google.golang.org/genproto/googleapis/api/annotations"
+
+import (
+ context "golang.org/x/net/context"
+ grpc "google.golang.org/grpc"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The request message for the `BatchWriteSpans` method.
+type BatchWriteSpansRequest struct {
+ // Required. The name of the project where the spans belong. The format is
+ // `projects/[PROJECT_ID]`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // A list of new spans. The span names must not match existing
+ // spans, or the results are undefined.
+ Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *BatchWriteSpansRequest) Reset() { *m = BatchWriteSpansRequest{} }
+func (m *BatchWriteSpansRequest) String() string { return proto.CompactTextString(m) }
+func (*BatchWriteSpansRequest) ProtoMessage() {}
+func (*BatchWriteSpansRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_tracing_b9cda2b913ee5eaf, []int{0}
+}
+func (m *BatchWriteSpansRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_BatchWriteSpansRequest.Unmarshal(m, b)
+}
+func (m *BatchWriteSpansRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_BatchWriteSpansRequest.Marshal(b, m, deterministic)
+}
+func (dst *BatchWriteSpansRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BatchWriteSpansRequest.Merge(dst, src)
+}
+func (m *BatchWriteSpansRequest) XXX_Size() int {
+ return xxx_messageInfo_BatchWriteSpansRequest.Size(m)
+}
+func (m *BatchWriteSpansRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_BatchWriteSpansRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BatchWriteSpansRequest proto.InternalMessageInfo
+
+func (m *BatchWriteSpansRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *BatchWriteSpansRequest) GetSpans() []*Span {
+ if m != nil {
+ return m.Spans
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*BatchWriteSpansRequest)(nil), "google.devtools.cloudtrace.v2.BatchWriteSpansRequest")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// TraceServiceClient is the client API for TraceService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type TraceServiceClient interface {
+ // Sends new spans to new or existing traces. You cannot update
+ // existing spans.
+ BatchWriteSpans(ctx context.Context, in *BatchWriteSpansRequest, opts ...grpc.CallOption) (*empty.Empty, error)
+ // Creates a new span.
+ CreateSpan(ctx context.Context, in *Span, opts ...grpc.CallOption) (*Span, error)
+}
+
+type traceServiceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient {
+ return &traceServiceClient{cc}
+}
+
+func (c *traceServiceClient) BatchWriteSpans(ctx context.Context, in *BatchWriteSpansRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
+ out := new(empty.Empty)
+ err := c.cc.Invoke(ctx, "/google.devtools.cloudtrace.v2.TraceService/BatchWriteSpans", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *traceServiceClient) CreateSpan(ctx context.Context, in *Span, opts ...grpc.CallOption) (*Span, error) {
+ out := new(Span)
+ err := c.cc.Invoke(ctx, "/google.devtools.cloudtrace.v2.TraceService/CreateSpan", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// TraceServiceServer is the server API for TraceService service.
+type TraceServiceServer interface {
+ // Sends new spans to new or existing traces. You cannot update
+ // existing spans.
+ BatchWriteSpans(context.Context, *BatchWriteSpansRequest) (*empty.Empty, error)
+ // Creates a new span.
+ CreateSpan(context.Context, *Span) (*Span, error)
+}
+
+func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) {
+ s.RegisterService(&_TraceService_serviceDesc, srv)
+}
+
+func _TraceService_BatchWriteSpans_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(BatchWriteSpansRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(TraceServiceServer).BatchWriteSpans(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.devtools.cloudtrace.v2.TraceService/BatchWriteSpans",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(TraceServiceServer).BatchWriteSpans(ctx, req.(*BatchWriteSpansRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _TraceService_CreateSpan_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Span)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(TraceServiceServer).CreateSpan(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.devtools.cloudtrace.v2.TraceService/CreateSpan",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(TraceServiceServer).CreateSpan(ctx, req.(*Span))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _TraceService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.devtools.cloudtrace.v2.TraceService",
+ HandlerType: (*TraceServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "BatchWriteSpans",
+ Handler: _TraceService_BatchWriteSpans_Handler,
+ },
+ {
+ MethodName: "CreateSpan",
+ Handler: _TraceService_CreateSpan_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/devtools/cloudtrace/v2/tracing.proto",
+}
+
+func init() {
+ proto.RegisterFile("google/devtools/cloudtrace/v2/tracing.proto", fileDescriptor_tracing_b9cda2b913ee5eaf)
+}
+
+var fileDescriptor_tracing_b9cda2b913ee5eaf = []byte{
+ // 404 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0xdd, 0x6a, 0xdb, 0x30,
+ 0x14, 0x46, 0xde, 0x0f, 0x4c, 0x1b, 0x0c, 0x04, 0x0b, 0xc1, 0xdb, 0x58, 0xe6, 0x0d, 0x96, 0x64,
+ 0x43, 0x02, 0x8f, 0x5d, 0x2c, 0x63, 0x37, 0x09, 0x23, 0xb7, 0x21, 0x19, 0x19, 0x8c, 0xdc, 0x28,
+ 0x8e, 0xa6, 0x69, 0xd8, 0x92, 0x67, 0x29, 0x86, 0x52, 0x7a, 0xd3, 0x9b, 0x3e, 0x40, 0xfb, 0x14,
+ 0xa5, 0xd0, 0xf7, 0xe8, 0x6d, 0x5f, 0xa1, 0x0f, 0x52, 0x24, 0xd9, 0x0d, 0x84, 0x34, 0xc9, 0x9d,
+ 0xce, 0x39, 0xdf, 0xf9, 0xce, 0xf7, 0x7d, 0x36, 0xfc, 0xc8, 0x95, 0xe2, 0x29, 0x23, 0x0b, 0x56,
+ 0x1a, 0xa5, 0x52, 0x4d, 0x92, 0x54, 0x2d, 0x17, 0xa6, 0xa0, 0x09, 0x23, 0x65, 0x4c, 0xec, 0x43,
+ 0x48, 0x8e, 0xf3, 0x42, 0x19, 0x85, 0x5e, 0x7b, 0x30, 0xae, 0xc1, 0x78, 0x05, 0xc6, 0x65, 0x1c,
+ 0xbe, 0xaa, 0xb8, 0x68, 0x2e, 0x08, 0x95, 0x52, 0x19, 0x6a, 0x84, 0x92, 0xda, 0x2f, 0x87, 0x9d,
+ 0xdd, 0x97, 0x58, 0x05, 0x7d, 0x59, 0x41, 0x5d, 0x35, 0x5f, 0xfe, 0x21, 0x2c, 0xcb, 0xcd, 0x41,
+ 0x35, 0x7c, 0xb3, 0x3e, 0x34, 0x22, 0x63, 0xda, 0xd0, 0x2c, 0xf7, 0x80, 0x88, 0xc3, 0x46, 0x9f,
+ 0x9a, 0xe4, 0xef, 0xaf, 0x42, 0x18, 0x36, 0xc9, 0xa9, 0xd4, 0x63, 0xf6, 0x7f, 0xc9, 0xb4, 0x41,
+ 0x08, 0x3e, 0x94, 0x34, 0x63, 0x4d, 0xd0, 0x02, 0xed, 0x27, 0x63, 0xf7, 0x46, 0x5f, 0xe1, 0x23,
+ 0x6d, 0x31, 0xcd, 0xa0, 0xf5, 0xa0, 0xfd, 0x34, 0x7e, 0x87, 0xb7, 0x7a, 0xc4, 0x96, 0x6f, 0xec,
+ 0x37, 0xe2, 0xcb, 0x00, 0x3e, 0xfb, 0x69, 0x07, 0x13, 0x56, 0x94, 0x22, 0x61, 0xe8, 0x0c, 0xc0,
+ 0xe7, 0x6b, 0xa7, 0xd1, 0x97, 0x1d, 0x84, 0x9b, 0xa5, 0x86, 0x8d, 0x7a, 0xad, 0xb6, 0x89, 0x7f,
+ 0xd8, 0x0c, 0xa2, 0xf8, 0xf8, 0xfa, 0xe6, 0x34, 0xf8, 0x14, 0x7d, 0xb0, 0x99, 0x1d, 0x5a, 0x07,
+ 0xdf, 0xf3, 0x42, 0xfd, 0x63, 0x89, 0xd1, 0xa4, 0x7b, 0xe4, 0x53, 0xd4, 0xbd, 0xf9, 0x1d, 0x69,
+ 0x0f, 0x74, 0xd1, 0x09, 0x80, 0x70, 0x50, 0x30, 0xea, 0x4f, 0xa0, 0x7d, 0x2c, 0x86, 0xfb, 0x80,
+ 0x22, 0xe2, 0xc4, 0x74, 0xa2, 0xf7, 0x9b, 0xc4, 0x54, 0x5a, 0xac, 0x2a, 0x17, 0x57, 0x0f, 0x74,
+ 0xfb, 0x17, 0x00, 0xbe, 0x4d, 0x54, 0xb6, 0x9d, 0xbb, 0xef, 0x42, 0x15, 0x92, 0x8f, 0xac, 0xf5,
+ 0x11, 0xf8, 0x3d, 0xac, 0xe0, 0x5c, 0xa5, 0x54, 0x72, 0xac, 0x0a, 0x4e, 0x38, 0x93, 0x2e, 0x18,
+ 0xe2, 0x47, 0x34, 0x17, 0xfa, 0x9e, 0x1f, 0xeb, 0xdb, 0xaa, 0x3a, 0x0f, 0x5e, 0x0c, 0x3d, 0xd3,
+ 0xc0, 0xf6, 0xb0, 0xfb, 0x76, 0x78, 0x1a, 0x5f, 0xd5, 0xfd, 0x99, 0xeb, 0xcf, 0x5c, 0x7f, 0x36,
+ 0x8d, 0xe7, 0x8f, 0xdd, 0x8d, 0xcf, 0xb7, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x94, 0x51, 0x1d,
+ 0x25, 0x03, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go
new file mode 100644
index 00000000..61570e2a
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go
@@ -0,0 +1,904 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/monitoring/v3/alert.proto
+
+/*
+Package monitoring is a generated protocol buffer package.
+
+It is generated from these files:
+ google/monitoring/v3/alert.proto
+ google/monitoring/v3/alert_service.proto
+ google/monitoring/v3/common.proto
+ google/monitoring/v3/group.proto
+ google/monitoring/v3/group_service.proto
+ google/monitoring/v3/metric.proto
+ google/monitoring/v3/metric_service.proto
+ google/monitoring/v3/mutation_record.proto
+ google/monitoring/v3/notification.proto
+ google/monitoring/v3/notification_service.proto
+ google/monitoring/v3/uptime.proto
+ google/monitoring/v3/uptime_service.proto
+
+It has these top-level messages:
+ AlertPolicy
+ CreateAlertPolicyRequest
+ GetAlertPolicyRequest
+ ListAlertPoliciesRequest
+ ListAlertPoliciesResponse
+ UpdateAlertPolicyRequest
+ DeleteAlertPolicyRequest
+ TypedValue
+ TimeInterval
+ Aggregation
+ Group
+ ListGroupsRequest
+ ListGroupsResponse
+ GetGroupRequest
+ CreateGroupRequest
+ UpdateGroupRequest
+ DeleteGroupRequest
+ ListGroupMembersRequest
+ ListGroupMembersResponse
+ Point
+ TimeSeries
+ ListMonitoredResourceDescriptorsRequest
+ ListMonitoredResourceDescriptorsResponse
+ GetMonitoredResourceDescriptorRequest
+ ListMetricDescriptorsRequest
+ ListMetricDescriptorsResponse
+ GetMetricDescriptorRequest
+ CreateMetricDescriptorRequest
+ DeleteMetricDescriptorRequest
+ ListTimeSeriesRequest
+ ListTimeSeriesResponse
+ CreateTimeSeriesRequest
+ CreateTimeSeriesError
+ MutationRecord
+ NotificationChannelDescriptor
+ NotificationChannel
+ ListNotificationChannelDescriptorsRequest
+ ListNotificationChannelDescriptorsResponse
+ GetNotificationChannelDescriptorRequest
+ CreateNotificationChannelRequest
+ ListNotificationChannelsRequest
+ ListNotificationChannelsResponse
+ GetNotificationChannelRequest
+ UpdateNotificationChannelRequest
+ DeleteNotificationChannelRequest
+ SendNotificationChannelVerificationCodeRequest
+ GetNotificationChannelVerificationCodeRequest
+ GetNotificationChannelVerificationCodeResponse
+ VerifyNotificationChannelRequest
+ UptimeCheckConfig
+ UptimeCheckIp
+ ListUptimeCheckConfigsRequest
+ ListUptimeCheckConfigsResponse
+ GetUptimeCheckConfigRequest
+ CreateUptimeCheckConfigRequest
+ UpdateUptimeCheckConfigRequest
+ DeleteUptimeCheckConfigRequest
+ ListUptimeCheckIpsRequest
+ ListUptimeCheckIpsResponse
+*/
+package monitoring
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "google.golang.org/genproto/googleapis/api/annotations"
+import google_protobuf3 "github.com/golang/protobuf/ptypes/duration"
+import google_protobuf4 "github.com/golang/protobuf/ptypes/wrappers"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Operators for combining conditions.
+type AlertPolicy_ConditionCombinerType int32
+
+const (
+ // An unspecified combiner.
+ AlertPolicy_COMBINE_UNSPECIFIED AlertPolicy_ConditionCombinerType = 0
+ // Combine conditions using the logical `AND` operator. An
+ // incident is created only if all conditions are met
+ // simultaneously. This combiner is satisfied if all conditions are
+ // met, even if they are met on completely different resources.
+ AlertPolicy_AND AlertPolicy_ConditionCombinerType = 1
+ // Combine conditions using the logical `OR` operator. An incident
+ // is created if any of the listed conditions is met.
+ AlertPolicy_OR AlertPolicy_ConditionCombinerType = 2
+ // Combine conditions using logical `AND` operator, but unlike the regular
+ // `AND` option, an incident is created only if all conditions are met
+ // simultaneously on at least one resource.
+ AlertPolicy_AND_WITH_MATCHING_RESOURCE AlertPolicy_ConditionCombinerType = 3
+)
+
+var AlertPolicy_ConditionCombinerType_name = map[int32]string{
+ 0: "COMBINE_UNSPECIFIED",
+ 1: "AND",
+ 2: "OR",
+ 3: "AND_WITH_MATCHING_RESOURCE",
+}
+var AlertPolicy_ConditionCombinerType_value = map[string]int32{
+ "COMBINE_UNSPECIFIED": 0,
+ "AND": 1,
+ "OR": 2,
+ "AND_WITH_MATCHING_RESOURCE": 3,
+}
+
+func (x AlertPolicy_ConditionCombinerType) String() string {
+ return proto.EnumName(AlertPolicy_ConditionCombinerType_name, int32(x))
+}
+func (AlertPolicy_ConditionCombinerType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{0, 0}
+}
+
+// A description of the conditions under which some aspect of your system is
+// considered to be "unhealthy" and the ways to notify people or services about
+// this state. For an overview of alert policies, see
+// [Introduction to Alerting](/monitoring/alerts/).
+type AlertPolicy struct {
+ // Required if the policy exists. The resource name for this policy. The
+ // syntax is:
+ //
+ // projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]
+ //
+ // `[ALERT_POLICY_ID]` is assigned by Stackdriver Monitoring when the policy
+ // is created. When calling the
+ // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy]
+ // method, do not include the `name` field in the alerting policy passed as
+ // part of the request.
+ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ // A short name or phrase used to identify the policy in dashboards,
+ // notifications, and incidents. To avoid confusion, don't use the same
+ // display name for multiple policies in the same project. The name is
+ // limited to 512 Unicode characters.
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"`
+ // Documentation that is included with notifications and incidents related to
+ // this policy. Best practice is for the documentation to include information
+ // to help responders understand, mitigate, escalate, and correct the
+ // underlying problems detected by the alerting policy. Notification channels
+ // that have limited capacity might not show this documentation.
+ Documentation *AlertPolicy_Documentation `protobuf:"bytes,13,opt,name=documentation" json:"documentation,omitempty"`
+ // User-supplied key/value data to be used for organizing and
+ // identifying the `AlertPolicy` objects.
+ //
+ // The field can contain up to 64 entries. Each key and value is limited to
+ // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and
+ // values can contain only lowercase letters, numerals, underscores, and
+ // dashes. Keys must begin with a letter.
+ UserLabels map[string]string `protobuf:"bytes,16,rep,name=user_labels,json=userLabels" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ // A list of conditions for the policy. The conditions are combined by AND or
+ // OR according to the `combiner` field. If the combined conditions evaluate
+ // to true, then an incident is created. A policy can have from one to six
+ // conditions.
+ Conditions []*AlertPolicy_Condition `protobuf:"bytes,12,rep,name=conditions" json:"conditions,omitempty"`
+ // How to combine the results of multiple conditions
+ // to determine if an incident should be opened.
+ Combiner AlertPolicy_ConditionCombinerType `protobuf:"varint,6,opt,name=combiner,enum=google.monitoring.v3.AlertPolicy_ConditionCombinerType" json:"combiner,omitempty"`
+ // Whether or not the policy is enabled. On write, the default interpretation
+ // if unset is that the policy is enabled. On read, clients should not make
+ // any assumption about the state if it has not been populated. The
+ // field should always be populated on List and Get operations, unless
+ // a field projection has been specified that strips it out.
+ Enabled *google_protobuf4.BoolValue `protobuf:"bytes,17,opt,name=enabled" json:"enabled,omitempty"`
+ // Identifies the notification channels to which notifications should be sent
+ // when incidents are opened or closed or when new violations occur on
+ // an already opened incident. Each element of this array corresponds to
+ // the `name` field in each of the
+ // [`NotificationChannel`][google.monitoring.v3.NotificationChannel]
+ // objects that are returned from the [`ListNotificationChannels`]
+ // [google.monitoring.v3.NotificationChannelService.ListNotificationChannels]
+ // method. The syntax of the entries in this field is:
+ //
+ // projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]
+ NotificationChannels []string `protobuf:"bytes,14,rep,name=notification_channels,json=notificationChannels" json:"notification_channels,omitempty"`
+ // A read-only record of the creation of the alerting policy. If provided
+ // in a call to create or update, this field will be ignored.
+ CreationRecord *MutationRecord `protobuf:"bytes,10,opt,name=creation_record,json=creationRecord" json:"creation_record,omitempty"`
+ // A read-only record of the most recent change to the alerting policy. If
+ // provided in a call to create or update, this field will be ignored.
+ MutationRecord *MutationRecord `protobuf:"bytes,11,opt,name=mutation_record,json=mutationRecord" json:"mutation_record,omitempty"`
+}
+
+func (m *AlertPolicy) Reset() { *m = AlertPolicy{} }
+func (m *AlertPolicy) String() string { return proto.CompactTextString(m) }
+func (*AlertPolicy) ProtoMessage() {}
+func (*AlertPolicy) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *AlertPolicy) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *AlertPolicy) GetDisplayName() string {
+ if m != nil {
+ return m.DisplayName
+ }
+ return ""
+}
+
+func (m *AlertPolicy) GetDocumentation() *AlertPolicy_Documentation {
+ if m != nil {
+ return m.Documentation
+ }
+ return nil
+}
+
+func (m *AlertPolicy) GetUserLabels() map[string]string {
+ if m != nil {
+ return m.UserLabels
+ }
+ return nil
+}
+
+func (m *AlertPolicy) GetConditions() []*AlertPolicy_Condition {
+ if m != nil {
+ return m.Conditions
+ }
+ return nil
+}
+
+func (m *AlertPolicy) GetCombiner() AlertPolicy_ConditionCombinerType {
+ if m != nil {
+ return m.Combiner
+ }
+ return AlertPolicy_COMBINE_UNSPECIFIED
+}
+
+func (m *AlertPolicy) GetEnabled() *google_protobuf4.BoolValue {
+ if m != nil {
+ return m.Enabled
+ }
+ return nil
+}
+
+func (m *AlertPolicy) GetNotificationChannels() []string {
+ if m != nil {
+ return m.NotificationChannels
+ }
+ return nil
+}
+
+func (m *AlertPolicy) GetCreationRecord() *MutationRecord {
+ if m != nil {
+ return m.CreationRecord
+ }
+ return nil
+}
+
+func (m *AlertPolicy) GetMutationRecord() *MutationRecord {
+ if m != nil {
+ return m.MutationRecord
+ }
+ return nil
+}
+
+// A content string and a MIME type that describes the content string's
+// format.
+type AlertPolicy_Documentation struct {
+ // The text of the documentation, interpreted according to `mime_type`.
+ // The content may not exceed 8,192 Unicode characters and may not exceed
+ // more than 10,240 bytes when encoded in UTF-8 format, whichever is
+ // smaller.
+ Content string `protobuf:"bytes,1,opt,name=content" json:"content,omitempty"`
+ // The format of the `content` field. Presently, only the value
+ // `"text/markdown"` is supported. See
+ // [Markdown](https://en.wikipedia.org/wiki/Markdown) for more information.
+ MimeType string `protobuf:"bytes,2,opt,name=mime_type,json=mimeType" json:"mime_type,omitempty"`
+}
+
+func (m *AlertPolicy_Documentation) Reset() { *m = AlertPolicy_Documentation{} }
+func (m *AlertPolicy_Documentation) String() string { return proto.CompactTextString(m) }
+func (*AlertPolicy_Documentation) ProtoMessage() {}
+func (*AlertPolicy_Documentation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
+
+func (m *AlertPolicy_Documentation) GetContent() string {
+ if m != nil {
+ return m.Content
+ }
+ return ""
+}
+
+func (m *AlertPolicy_Documentation) GetMimeType() string {
+ if m != nil {
+ return m.MimeType
+ }
+ return ""
+}
+
+// A condition is a true/false test that determines when an alerting policy
+// should open an incident. If a condition evaluates to true, it signifies
+// that something is wrong.
+type AlertPolicy_Condition struct {
+ // Required if the condition exists. The unique resource name for this
+ // condition. Its syntax is:
+ //
+ // projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID]
+ //
+ // `[CONDITION_ID]` is assigned by Stackdriver Monitoring when the
+ // condition is created as part of a new or updated alerting policy.
+ //
+ // When calling the
+ // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy]
+ // method, do not include the `name` field in the conditions of the
+ // requested alerting policy. Stackdriver Monitoring creates the
+ // condition identifiers and includes them in the new policy.
+ //
+ // When calling the
+ // [alertPolicies.update][google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy]
+ // method to update a policy, including a condition `name` causes the
+ // existing condition to be updated. Conditions without names are added to
+ // the updated policy. Existing conditions are deleted if they are not
+ // updated.
+ //
+ // Best practice is to preserve `[CONDITION_ID]` if you make only small
+ // changes, such as those to condition thresholds, durations, or trigger
+ // values. Otherwise, treat the change as a new condition and let the
+ // existing condition be deleted.
+ Name string `protobuf:"bytes,12,opt,name=name" json:"name,omitempty"`
+ // A short name or phrase used to identify the condition in dashboards,
+ // notifications, and incidents. To avoid confusion, don't use the same
+ // display name for multiple conditions in the same policy.
+ DisplayName string `protobuf:"bytes,6,opt,name=display_name,json=displayName" json:"display_name,omitempty"`
+ // Only one of the following condition types will be specified.
+ //
+ // Types that are valid to be assigned to Condition:
+ // *AlertPolicy_Condition_ConditionThreshold
+ // *AlertPolicy_Condition_ConditionAbsent
+ Condition isAlertPolicy_Condition_Condition `protobuf_oneof:"condition"`
+}
+
+func (m *AlertPolicy_Condition) Reset() { *m = AlertPolicy_Condition{} }
+func (m *AlertPolicy_Condition) String() string { return proto.CompactTextString(m) }
+func (*AlertPolicy_Condition) ProtoMessage() {}
+func (*AlertPolicy_Condition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 1} }
+
+type isAlertPolicy_Condition_Condition interface {
+ isAlertPolicy_Condition_Condition()
+}
+
+type AlertPolicy_Condition_ConditionThreshold struct {
+ ConditionThreshold *AlertPolicy_Condition_MetricThreshold `protobuf:"bytes,1,opt,name=condition_threshold,json=conditionThreshold,oneof"`
+}
+type AlertPolicy_Condition_ConditionAbsent struct {
+ ConditionAbsent *AlertPolicy_Condition_MetricAbsence `protobuf:"bytes,2,opt,name=condition_absent,json=conditionAbsent,oneof"`
+}
+
+func (*AlertPolicy_Condition_ConditionThreshold) isAlertPolicy_Condition_Condition() {}
+func (*AlertPolicy_Condition_ConditionAbsent) isAlertPolicy_Condition_Condition() {}
+
+func (m *AlertPolicy_Condition) GetCondition() isAlertPolicy_Condition_Condition {
+ if m != nil {
+ return m.Condition
+ }
+ return nil
+}
+
+func (m *AlertPolicy_Condition) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *AlertPolicy_Condition) GetDisplayName() string {
+ if m != nil {
+ return m.DisplayName
+ }
+ return ""
+}
+
+func (m *AlertPolicy_Condition) GetConditionThreshold() *AlertPolicy_Condition_MetricThreshold {
+ if x, ok := m.GetCondition().(*AlertPolicy_Condition_ConditionThreshold); ok {
+ return x.ConditionThreshold
+ }
+ return nil
+}
+
+func (m *AlertPolicy_Condition) GetConditionAbsent() *AlertPolicy_Condition_MetricAbsence {
+ if x, ok := m.GetCondition().(*AlertPolicy_Condition_ConditionAbsent); ok {
+ return x.ConditionAbsent
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*AlertPolicy_Condition) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _AlertPolicy_Condition_OneofMarshaler, _AlertPolicy_Condition_OneofUnmarshaler, _AlertPolicy_Condition_OneofSizer, []interface{}{
+ (*AlertPolicy_Condition_ConditionThreshold)(nil),
+ (*AlertPolicy_Condition_ConditionAbsent)(nil),
+ }
+}
+
+func _AlertPolicy_Condition_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*AlertPolicy_Condition)
+ // condition
+ switch x := m.Condition.(type) {
+ case *AlertPolicy_Condition_ConditionThreshold:
+ b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ConditionThreshold); err != nil {
+ return err
+ }
+ case *AlertPolicy_Condition_ConditionAbsent:
+ b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ConditionAbsent); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("AlertPolicy_Condition.Condition has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _AlertPolicy_Condition_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*AlertPolicy_Condition)
+ switch tag {
+ case 1: // condition.condition_threshold
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(AlertPolicy_Condition_MetricThreshold)
+ err := b.DecodeMessage(msg)
+ m.Condition = &AlertPolicy_Condition_ConditionThreshold{msg}
+ return true, err
+ case 2: // condition.condition_absent
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(AlertPolicy_Condition_MetricAbsence)
+ err := b.DecodeMessage(msg)
+ m.Condition = &AlertPolicy_Condition_ConditionAbsent{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _AlertPolicy_Condition_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*AlertPolicy_Condition)
+ // condition
+ switch x := m.Condition.(type) {
+ case *AlertPolicy_Condition_ConditionThreshold:
+ s := proto.Size(x.ConditionThreshold)
+ n += proto.SizeVarint(1<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *AlertPolicy_Condition_ConditionAbsent:
+ s := proto.Size(x.ConditionAbsent)
+ n += proto.SizeVarint(2<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// Specifies how many time series must fail a predicate to trigger a
+// condition. If not specified, then a `{count: 1}` trigger is used.
+type AlertPolicy_Condition_Trigger struct {
+ // A type of trigger.
+ //
+ // Types that are valid to be assigned to Type:
+ // *AlertPolicy_Condition_Trigger_Count
+ // *AlertPolicy_Condition_Trigger_Percent
+ Type isAlertPolicy_Condition_Trigger_Type `protobuf_oneof:"type"`
+}
+
+func (m *AlertPolicy_Condition_Trigger) Reset() { *m = AlertPolicy_Condition_Trigger{} }
+func (m *AlertPolicy_Condition_Trigger) String() string { return proto.CompactTextString(m) }
+func (*AlertPolicy_Condition_Trigger) ProtoMessage() {}
+func (*AlertPolicy_Condition_Trigger) Descriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{0, 1, 0}
+}
+
+type isAlertPolicy_Condition_Trigger_Type interface {
+ isAlertPolicy_Condition_Trigger_Type()
+}
+
+type AlertPolicy_Condition_Trigger_Count struct {
+ Count int32 `protobuf:"varint,1,opt,name=count,oneof"`
+}
+type AlertPolicy_Condition_Trigger_Percent struct {
+ Percent float64 `protobuf:"fixed64,2,opt,name=percent,oneof"`
+}
+
+func (*AlertPolicy_Condition_Trigger_Count) isAlertPolicy_Condition_Trigger_Type() {}
+func (*AlertPolicy_Condition_Trigger_Percent) isAlertPolicy_Condition_Trigger_Type() {}
+
+func (m *AlertPolicy_Condition_Trigger) GetType() isAlertPolicy_Condition_Trigger_Type {
+ if m != nil {
+ return m.Type
+ }
+ return nil
+}
+
+func (m *AlertPolicy_Condition_Trigger) GetCount() int32 {
+ if x, ok := m.GetType().(*AlertPolicy_Condition_Trigger_Count); ok {
+ return x.Count
+ }
+ return 0
+}
+
+func (m *AlertPolicy_Condition_Trigger) GetPercent() float64 {
+ if x, ok := m.GetType().(*AlertPolicy_Condition_Trigger_Percent); ok {
+ return x.Percent
+ }
+ return 0
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*AlertPolicy_Condition_Trigger) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _AlertPolicy_Condition_Trigger_OneofMarshaler, _AlertPolicy_Condition_Trigger_OneofUnmarshaler, _AlertPolicy_Condition_Trigger_OneofSizer, []interface{}{
+ (*AlertPolicy_Condition_Trigger_Count)(nil),
+ (*AlertPolicy_Condition_Trigger_Percent)(nil),
+ }
+}
+
+func _AlertPolicy_Condition_Trigger_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*AlertPolicy_Condition_Trigger)
+ // type
+ switch x := m.Type.(type) {
+ case *AlertPolicy_Condition_Trigger_Count:
+ b.EncodeVarint(1<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Count))
+ case *AlertPolicy_Condition_Trigger_Percent:
+ b.EncodeVarint(2<<3 | proto.WireFixed64)
+ b.EncodeFixed64(math.Float64bits(x.Percent))
+ case nil:
+ default:
+ return fmt.Errorf("AlertPolicy_Condition_Trigger.Type has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _AlertPolicy_Condition_Trigger_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*AlertPolicy_Condition_Trigger)
+ switch tag {
+ case 1: // type.count
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Type = &AlertPolicy_Condition_Trigger_Count{int32(x)}
+ return true, err
+ case 2: // type.percent
+ if wire != proto.WireFixed64 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed64()
+ m.Type = &AlertPolicy_Condition_Trigger_Percent{math.Float64frombits(x)}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _AlertPolicy_Condition_Trigger_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*AlertPolicy_Condition_Trigger)
+ // type
+ switch x := m.Type.(type) {
+ case *AlertPolicy_Condition_Trigger_Count:
+ n += proto.SizeVarint(1<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.Count))
+ case *AlertPolicy_Condition_Trigger_Percent:
+ n += proto.SizeVarint(2<<3 | proto.WireFixed64)
+ n += 8
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// A condition type that compares a collection of time series
+// against a threshold.
+type AlertPolicy_Condition_MetricThreshold struct {
+ // A [filter](/monitoring/api/v3/filters) that
+ // identifies which time series should be compared with the threshold.
+ //
+ // The filter is similar to the one that is specified in the
+ // [`MetricService.ListTimeSeries`
+ // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that
+ // call is useful to verify the time series that will be retrieved /
+ // processed) and must specify the metric type and optionally may contain
+ // restrictions on resource type, resource labels, and metric labels.
+ // This field may not exceed 2048 Unicode characters in length.
+ Filter string `protobuf:"bytes,2,opt,name=filter" json:"filter,omitempty"`
+ // Specifies the alignment of data points in individual time series as
+ // well as how to combine the retrieved time series together (such as
+ // when aggregating multiple streams on each resource to a single
+ // stream for each resource or when aggregating streams across all
+ // members of a group of resrouces). Multiple aggregations
+ // are applied in the order specified.
+ //
+ // This field is similar to the one in the
+ // [`MetricService.ListTimeSeries` request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list).
+ // It is advisable to use the `ListTimeSeries` method when debugging this field.
+ Aggregations []*Aggregation `protobuf:"bytes,8,rep,name=aggregations" json:"aggregations,omitempty"`
+ // A [filter](/monitoring/api/v3/filters) that identifies a time
+ // series that should be used as the denominator of a ratio that will be
+ // compared with the threshold. If a `denominator_filter` is specified,
+ // the time series specified by the `filter` field will be used as the
+ // numerator.
+ //
+ // The filter is similar to the one that is specified in the
+ // [`MetricService.ListTimeSeries`
+ // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that
+ // call is useful to verify the time series that will be retrieved /
+ // processed) and must specify the metric type and optionally may contain
+ // restrictions on resource type, resource labels, and metric labels.
+ // This field may not exceed 2048 Unicode characters in length.
+ DenominatorFilter string `protobuf:"bytes,9,opt,name=denominator_filter,json=denominatorFilter" json:"denominator_filter,omitempty"`
+ // Specifies the alignment of data points in individual time series
+ // selected by `denominatorFilter` as
+ // well as how to combine the retrieved time series together (such as
+ // when aggregating multiple streams on each resource to a single
+ // stream for each resource or when aggregating streams across all
+ // members of a group of resources).
+ //
+ // When computing ratios, the `aggregations` and
+ // `denominator_aggregations` fields must use the same alignment period
+ // and produce time series that have the same periodicity and labels.
+ //
+ // This field is similar to the one in the
+ // [`MetricService.ListTimeSeries`
+ // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). It
+ // is advisable to use the `ListTimeSeries` method when debugging this
+ // field.
+ DenominatorAggregations []*Aggregation `protobuf:"bytes,10,rep,name=denominator_aggregations,json=denominatorAggregations" json:"denominator_aggregations,omitempty"`
+ // The comparison to apply between the time series (indicated by `filter`
+ // and `aggregation`) and the threshold (indicated by `threshold_value`).
+ // The comparison is applied on each time series, with the time series
+ // on the left-hand side and the threshold on the right-hand side.
+ //
+ // Only `COMPARISON_LT` and `COMPARISON_GT` are supported currently.
+ Comparison ComparisonType `protobuf:"varint,4,opt,name=comparison,enum=google.monitoring.v3.ComparisonType" json:"comparison,omitempty"`
+ // A value against which to compare the time series.
+ ThresholdValue float64 `protobuf:"fixed64,5,opt,name=threshold_value,json=thresholdValue" json:"threshold_value,omitempty"`
+ // The amount of time that a time series must violate the
+ // threshold to be considered failing. Currently, only values
+ // that are a multiple of a minute--e.g. 60, 120, or 300
+ // seconds--are supported. If an invalid value is given, an
+ // error will be returned. The `Duration.nanos` field is
+ // ignored. When choosing a duration, it is useful to keep in mind the
+ // frequency of the underlying time series data (which may also be
+ // affected by any alignments specified in the `aggregation` field);
+ // a good duration is long enough so that a single outlier does not
+ // generate spurious alerts, but short enough that unhealthy states
+ // are detected and alerted on quickly.
+ Duration *google_protobuf3.Duration `protobuf:"bytes,6,opt,name=duration" json:"duration,omitempty"`
+ // The number/percent of time series for which the comparison must hold
+ // in order for the condition to trigger. If unspecified, then the
+ // condition will trigger if the comparison is true for any of the
+ // time series that have been identified by `filter` and `aggregations`,
+ // or by the ratio, if `denominator_filter` and `denominator_aggregations`
+ // are specified.
+ Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,7,opt,name=trigger" json:"trigger,omitempty"`
+}
+
+func (m *AlertPolicy_Condition_MetricThreshold) Reset() { *m = AlertPolicy_Condition_MetricThreshold{} }
+func (m *AlertPolicy_Condition_MetricThreshold) String() string { return proto.CompactTextString(m) }
+func (*AlertPolicy_Condition_MetricThreshold) ProtoMessage() {}
+func (*AlertPolicy_Condition_MetricThreshold) Descriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{0, 1, 1}
+}
+
+func (m *AlertPolicy_Condition_MetricThreshold) GetFilter() string {
+ if m != nil {
+ return m.Filter
+ }
+ return ""
+}
+
+func (m *AlertPolicy_Condition_MetricThreshold) GetAggregations() []*Aggregation {
+ if m != nil {
+ return m.Aggregations
+ }
+ return nil
+}
+
+func (m *AlertPolicy_Condition_MetricThreshold) GetDenominatorFilter() string {
+ if m != nil {
+ return m.DenominatorFilter
+ }
+ return ""
+}
+
+func (m *AlertPolicy_Condition_MetricThreshold) GetDenominatorAggregations() []*Aggregation {
+ if m != nil {
+ return m.DenominatorAggregations
+ }
+ return nil
+}
+
+func (m *AlertPolicy_Condition_MetricThreshold) GetComparison() ComparisonType {
+ if m != nil {
+ return m.Comparison
+ }
+ return ComparisonType_COMPARISON_UNSPECIFIED
+}
+
+func (m *AlertPolicy_Condition_MetricThreshold) GetThresholdValue() float64 {
+ if m != nil {
+ return m.ThresholdValue
+ }
+ return 0
+}
+
+func (m *AlertPolicy_Condition_MetricThreshold) GetDuration() *google_protobuf3.Duration {
+ if m != nil {
+ return m.Duration
+ }
+ return nil
+}
+
+func (m *AlertPolicy_Condition_MetricThreshold) GetTrigger() *AlertPolicy_Condition_Trigger {
+ if m != nil {
+ return m.Trigger
+ }
+ return nil
+}
+
+// A condition type that checks that monitored resources
+// are reporting data. The configuration defines a metric and
+// a set of monitored resources. The predicate is considered in violation
+// when a time series for the specified metric of a monitored
+// resource does not include any data in the specified `duration`.
+type AlertPolicy_Condition_MetricAbsence struct {
+ // A [filter](/monitoring/api/v3/filters) that
+ // identifies which time series should be compared with the threshold.
+ //
+ // The filter is similar to the one that is specified in the
+ // [`MetricService.ListTimeSeries`
+ // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that
+ // call is useful to verify the time series that will be retrieved /
+ // processed) and must specify the metric type and optionally may contain
+ // restrictions on resource type, resource labels, and metric labels.
+ // This field may not exceed 2048 Unicode characters in length.
+ Filter string `protobuf:"bytes,1,opt,name=filter" json:"filter,omitempty"`
+ // Specifies the alignment of data points in individual time series as
+ // well as how to combine the retrieved time series together (such as
+ // when aggregating multiple streams on each resource to a single
+ // stream for each resource or when aggregating streams across all
+ // members of a group of resrouces). Multiple aggregations
+ // are applied in the order specified.
+ //
+ // This field is similar to the
+ // one in the [`MetricService.ListTimeSeries` request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list).
+ // It is advisable to use the `ListTimeSeries` method when debugging this field.
+ Aggregations []*Aggregation `protobuf:"bytes,5,rep,name=aggregations" json:"aggregations,omitempty"`
+ // The amount of time that a time series must fail to report new
+ // data to be considered failing. Currently, only values that
+ // are a multiple of a minute--e.g. 60, 120, or 300
+ // seconds--are supported. If an invalid value is given, an
+ // error will be returned. The `Duration.nanos` field is
+ // ignored.
+ Duration *google_protobuf3.Duration `protobuf:"bytes,2,opt,name=duration" json:"duration,omitempty"`
+ // The number/percent of time series for which the comparison must hold
+ // in order for the condition to trigger. If unspecified, then the
+ // condition will trigger if the comparison is true for any of the
+ // time series that have been identified by `filter` and `aggregations`.
+ Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,3,opt,name=trigger" json:"trigger,omitempty"`
+}
+
+func (m *AlertPolicy_Condition_MetricAbsence) Reset() { *m = AlertPolicy_Condition_MetricAbsence{} }
+func (m *AlertPolicy_Condition_MetricAbsence) String() string { return proto.CompactTextString(m) }
+func (*AlertPolicy_Condition_MetricAbsence) ProtoMessage() {}
+func (*AlertPolicy_Condition_MetricAbsence) Descriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{0, 1, 2}
+}
+
+func (m *AlertPolicy_Condition_MetricAbsence) GetFilter() string {
+ if m != nil {
+ return m.Filter
+ }
+ return ""
+}
+
+func (m *AlertPolicy_Condition_MetricAbsence) GetAggregations() []*Aggregation {
+ if m != nil {
+ return m.Aggregations
+ }
+ return nil
+}
+
+func (m *AlertPolicy_Condition_MetricAbsence) GetDuration() *google_protobuf3.Duration {
+ if m != nil {
+ return m.Duration
+ }
+ return nil
+}
+
+func (m *AlertPolicy_Condition_MetricAbsence) GetTrigger() *AlertPolicy_Condition_Trigger {
+ if m != nil {
+ return m.Trigger
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*AlertPolicy)(nil), "google.monitoring.v3.AlertPolicy")
+ proto.RegisterType((*AlertPolicy_Documentation)(nil), "google.monitoring.v3.AlertPolicy.Documentation")
+ proto.RegisterType((*AlertPolicy_Condition)(nil), "google.monitoring.v3.AlertPolicy.Condition")
+ proto.RegisterType((*AlertPolicy_Condition_Trigger)(nil), "google.monitoring.v3.AlertPolicy.Condition.Trigger")
+ proto.RegisterType((*AlertPolicy_Condition_MetricThreshold)(nil), "google.monitoring.v3.AlertPolicy.Condition.MetricThreshold")
+ proto.RegisterType((*AlertPolicy_Condition_MetricAbsence)(nil), "google.monitoring.v3.AlertPolicy.Condition.MetricAbsence")
+ proto.RegisterEnum("google.monitoring.v3.AlertPolicy_ConditionCombinerType", AlertPolicy_ConditionCombinerType_name, AlertPolicy_ConditionCombinerType_value)
+}
+
+func init() { proto.RegisterFile("google/monitoring/v3/alert.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 941 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xeb, 0x6e, 0xe3, 0x44,
+ 0x14, 0xae, 0x93, 0xe6, 0x76, 0xd2, 0x36, 0xd9, 0xd9, 0xee, 0xae, 0x31, 0x68, 0x95, 0xae, 0x90,
+ 0x88, 0x40, 0x38, 0x22, 0x01, 0x71, 0x59, 0x81, 0x94, 0x5b, 0x37, 0x11, 0x24, 0xad, 0xa6, 0x69,
+ 0x91, 0x50, 0x25, 0xcb, 0x71, 0xa6, 0xae, 0x85, 0x3d, 0x63, 0x4d, 0xec, 0xa2, 0xbc, 0x0e, 0x3f,
+ 0x79, 0x14, 0x1e, 0x81, 0x7f, 0xbc, 0x02, 0xe2, 0x01, 0x90, 0xc7, 0x63, 0xc7, 0xe9, 0xa6, 0xbb,
+ 0x64, 0xf7, 0x5f, 0xce, 0x9c, 0xef, 0x7c, 0xe7, 0xf6, 0xcd, 0x38, 0xd0, 0xb0, 0x19, 0xb3, 0x5d,
+ 0xd2, 0xf2, 0x18, 0x75, 0x02, 0xc6, 0x1d, 0x6a, 0xb7, 0xee, 0x3a, 0x2d, 0xd3, 0x25, 0x3c, 0xd0,
+ 0x7d, 0xce, 0x02, 0x86, 0x8e, 0x63, 0x84, 0xbe, 0x46, 0xe8, 0x77, 0x1d, 0xed, 0x23, 0x19, 0x67,
+ 0xfa, 0x4e, 0xcb, 0xa4, 0x94, 0x05, 0x66, 0xe0, 0x30, 0xba, 0x8c, 0x63, 0xb4, 0x93, 0xad, 0xac,
+ 0x16, 0xf3, 0x3c, 0x46, 0x25, 0xe4, 0xd3, 0xad, 0x10, 0x2f, 0x8c, 0x89, 0x0c, 0x4e, 0x2c, 0xc6,
+ 0x17, 0x12, 0xfb, 0x5c, 0x62, 0x85, 0x35, 0x0f, 0x6f, 0x5a, 0x8b, 0x90, 0x0b, 0xd8, 0x43, 0xfe,
+ 0xdf, 0xb8, 0xe9, 0xfb, 0x84, 0xcb, 0x72, 0x5e, 0xfc, 0x5d, 0x83, 0x6a, 0x37, 0x6a, 0xe9, 0x9c,
+ 0xb9, 0x8e, 0xb5, 0x42, 0x08, 0xf6, 0xa9, 0xe9, 0x11, 0x55, 0x69, 0x28, 0xcd, 0x0a, 0x16, 0xbf,
+ 0xd1, 0x09, 0x1c, 0x2c, 0x9c, 0xa5, 0xef, 0x9a, 0x2b, 0x43, 0xf8, 0x72, 0xc2, 0x57, 0x95, 0x67,
+ 0xd3, 0x08, 0x72, 0x09, 0x87, 0x0b, 0x66, 0x85, 0x1e, 0xa1, 0x71, 0x91, 0xea, 0x61, 0x43, 0x69,
+ 0x56, 0xdb, 0x2d, 0x7d, 0xdb, 0x84, 0xf4, 0x4c, 0x42, 0x7d, 0x90, 0x0d, 0xc3, 0x9b, 0x2c, 0x08,
+ 0x43, 0x35, 0x5c, 0x12, 0x6e, 0xb8, 0xe6, 0x9c, 0xb8, 0x4b, 0xb5, 0xde, 0xc8, 0x37, 0xab, 0xed,
+ 0x2f, 0xde, 0x4e, 0x7a, 0xb9, 0x24, 0xfc, 0x27, 0x11, 0x33, 0xa4, 0x01, 0x5f, 0x61, 0x08, 0xd3,
+ 0x03, 0xf4, 0x23, 0x80, 0xc5, 0xe8, 0xc2, 0x11, 0x4b, 0x51, 0x0f, 0x04, 0xe5, 0x67, 0x6f, 0xa7,
+ 0xec, 0x27, 0x31, 0x38, 0x13, 0x8e, 0x2e, 0xa0, 0x6c, 0x31, 0x6f, 0xee, 0x50, 0xc2, 0xd5, 0x62,
+ 0x43, 0x69, 0x1e, 0xb5, 0xbf, 0xde, 0x81, 0xaa, 0x2f, 0x43, 0x67, 0x2b, 0x9f, 0xe0, 0x94, 0x08,
+ 0x7d, 0x09, 0x25, 0x42, 0xcd, 0xb9, 0x4b, 0x16, 0xea, 0x23, 0x31, 0x46, 0x2d, 0xe1, 0x4c, 0xb6,
+ 0xa8, 0xf7, 0x18, 0x73, 0xaf, 0x4c, 0x37, 0x24, 0x38, 0x81, 0xa2, 0x0e, 0x3c, 0xa1, 0x2c, 0x70,
+ 0x6e, 0x1c, 0x2b, 0x96, 0x89, 0x75, 0x6b, 0x52, 0x1a, 0x4d, 0xed, 0xa8, 0x91, 0x6f, 0x56, 0xf0,
+ 0x71, 0xd6, 0xd9, 0x97, 0x3e, 0x34, 0x81, 0x9a, 0xc5, 0x49, 0x56, 0x57, 0x2a, 0x88, 0x94, 0x1f,
+ 0x6f, 0x6f, 0x63, 0x22, 0x45, 0x88, 0x05, 0x16, 0x1f, 0x25, 0xc1, 0xb1, 0x1d, 0xd1, 0xdd, 0x93,
+ 0xa9, 0x5a, 0xdd, 0x85, 0xce, 0xdb, 0xb0, 0xb5, 0x53, 0x38, 0xdc, 0x90, 0x07, 0x52, 0xa1, 0x64,
+ 0x31, 0x1a, 0x10, 0x1a, 0x48, 0x81, 0x26, 0x26, 0xfa, 0x10, 0x2a, 0x9e, 0xe3, 0x11, 0x23, 0x58,
+ 0xf9, 0x89, 0x40, 0xcb, 0xd1, 0x41, 0x34, 0x5a, 0xed, 0xaf, 0x32, 0x54, 0xd2, 0xa1, 0xa7, 0x12,
+ 0x3f, 0x78, 0x83, 0xc4, 0x8b, 0xaf, 0x4b, 0x9c, 0xc2, 0xe3, 0x74, 0xf1, 0x46, 0x70, 0xcb, 0xc9,
+ 0xf2, 0x96, 0xb9, 0x0b, 0x51, 0x47, 0xb5, 0xfd, 0x72, 0x87, 0xad, 0xeb, 0x13, 0x12, 0x70, 0xc7,
+ 0x9a, 0x25, 0x14, 0xa3, 0x3d, 0x8c, 0x52, 0xe6, 0xf4, 0x14, 0xdd, 0x40, 0x7d, 0x9d, 0xcf, 0x9c,
+ 0x2f, 0xa3, 0xa6, 0x73, 0x22, 0xd9, 0xb7, 0xbb, 0x27, 0xeb, 0x46, 0xf1, 0x16, 0x19, 0xed, 0xe1,
+ 0x5a, 0x4a, 0x2a, 0xce, 0x02, 0x6d, 0x08, 0xa5, 0x19, 0x77, 0x6c, 0x9b, 0x70, 0xf4, 0x14, 0x0a,
+ 0x16, 0x0b, 0xe5, 0x70, 0x0b, 0xa3, 0x3d, 0x1c, 0x9b, 0x48, 0x83, 0x92, 0x4f, 0xb8, 0x95, 0x54,
+ 0xa0, 0x8c, 0xf6, 0x70, 0x72, 0xd0, 0x2b, 0xc2, 0x7e, 0x34, 0x73, 0xed, 0x9f, 0x3c, 0xd4, 0xee,
+ 0x35, 0x86, 0x9e, 0x42, 0xf1, 0xc6, 0x71, 0x03, 0xc2, 0xe5, 0x46, 0xa4, 0x85, 0x86, 0x70, 0x60,
+ 0xda, 0x36, 0x27, 0x76, 0xfc, 0x32, 0xaa, 0x65, 0x71, 0x09, 0x4f, 0x1e, 0x68, 0x6b, 0x8d, 0xc4,
+ 0x1b, 0x61, 0xe8, 0x73, 0x40, 0x0b, 0x42, 0x99, 0xe7, 0x50, 0x33, 0x60, 0xdc, 0x90, 0xa9, 0x2a,
+ 0x22, 0xd5, 0xa3, 0x8c, 0xe7, 0x34, 0xce, 0x7a, 0x0d, 0x6a, 0x16, 0xbe, 0x51, 0x01, 0xfc, 0xdf,
+ 0x0a, 0x9e, 0x65, 0x28, 0xba, 0xd9, 0x62, 0x06, 0xd1, 0xb3, 0xe2, 0xf9, 0x26, 0x77, 0x96, 0x8c,
+ 0xaa, 0xfb, 0xe2, 0x2d, 0x78, 0x40, 0xf5, 0xfd, 0x14, 0x27, 0x2e, 0x7e, 0x26, 0x0e, 0x7d, 0x02,
+ 0xb5, 0x54, 0x5a, 0xc6, 0x5d, 0x74, 0xc1, 0xd5, 0x42, 0x34, 0x71, 0x7c, 0x94, 0x1e, 0x8b, 0x6b,
+ 0x8f, 0xbe, 0x82, 0x72, 0xf2, 0xd2, 0x0b, 0xb1, 0x56, 0xdb, 0x1f, 0xbc, 0xf6, 0x48, 0x0c, 0x24,
+ 0x00, 0xa7, 0x50, 0x34, 0x81, 0x52, 0x10, 0x2f, 0x5b, 0x2d, 0x89, 0xa8, 0xce, 0x2e, 0x5a, 0x92,
+ 0x3a, 0xc1, 0x09, 0x87, 0xf6, 0xaf, 0x02, 0x87, 0x1b, 0x02, 0xcb, 0xac, 0x5c, 0x79, 0xe3, 0xca,
+ 0x0b, 0xef, 0xb6, 0xf2, 0x6c, 0xdb, 0xb9, 0x77, 0x6a, 0x3b, 0xff, 0xfe, 0x6d, 0xf7, 0xaa, 0x50,
+ 0x49, 0x6f, 0x91, 0xf6, 0x3d, 0xd4, 0xee, 0x7d, 0x6e, 0x50, 0x1d, 0xf2, 0xbf, 0x92, 0x95, 0x9c,
+ 0x40, 0xf4, 0x13, 0x1d, 0x43, 0x21, 0xde, 0x66, 0x7c, 0x11, 0x62, 0xe3, 0xbb, 0xdc, 0x37, 0xca,
+ 0x0b, 0x13, 0x9e, 0x6c, 0xfd, 0x1e, 0xa0, 0x67, 0xf0, 0xb8, 0x7f, 0x36, 0xe9, 0x8d, 0xa7, 0x43,
+ 0xe3, 0x72, 0x7a, 0x71, 0x3e, 0xec, 0x8f, 0x4f, 0xc7, 0xc3, 0x41, 0x7d, 0x0f, 0x95, 0x20, 0xdf,
+ 0x9d, 0x0e, 0xea, 0x0a, 0x2a, 0x42, 0xee, 0x0c, 0xd7, 0x73, 0xe8, 0x39, 0x68, 0xdd, 0xe9, 0xc0,
+ 0xf8, 0x79, 0x3c, 0x1b, 0x19, 0x93, 0xee, 0xac, 0x3f, 0x1a, 0x4f, 0x5f, 0x19, 0x78, 0x78, 0x71,
+ 0x76, 0x89, 0xfb, 0xc3, 0x7a, 0xbe, 0xf7, 0xbb, 0x02, 0xaa, 0xc5, 0xbc, 0xad, 0x2d, 0xf7, 0x20,
+ 0xee, 0x39, 0x1a, 0xde, 0xb9, 0xf2, 0xcb, 0x0f, 0x12, 0x63, 0x33, 0xd7, 0xa4, 0xb6, 0xce, 0xb8,
+ 0xdd, 0xb2, 0x09, 0x15, 0xa3, 0x6d, 0xc5, 0x2e, 0xd3, 0x77, 0x96, 0x9b, 0xff, 0x4c, 0x5e, 0xae,
+ 0xad, 0x3f, 0x72, 0xda, 0xab, 0x98, 0xa0, 0xef, 0xb2, 0x70, 0xa1, 0x4f, 0xd6, 0xa9, 0xae, 0x3a,
+ 0x7f, 0x26, 0xce, 0x6b, 0xe1, 0xbc, 0x5e, 0x3b, 0xaf, 0xaf, 0x3a, 0xf3, 0xa2, 0x48, 0xd2, 0xf9,
+ 0x2f, 0x00, 0x00, 0xff, 0xff, 0x66, 0xb5, 0x16, 0x64, 0x76, 0x09, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go
new file mode 100644
index 00000000..0b51478a
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go
@@ -0,0 +1,527 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/monitoring/v3/alert_service.proto
+
+package monitoring
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "google.golang.org/genproto/googleapis/api/annotations"
+import google_protobuf5 "github.com/golang/protobuf/ptypes/empty"
+import google_protobuf6 "google.golang.org/genproto/protobuf/field_mask"
+
+import (
+ context "golang.org/x/net/context"
+ grpc "google.golang.org/grpc"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// The protocol for the `CreateAlertPolicy` request.
+type CreateAlertPolicyRequest struct {
+ // The project in which to create the alerting policy. The format is
+ // `projects/[PROJECT_ID]`.
+ //
+ // Note that this field names the parent container in which the alerting
+ // policy will be written, not the name of the created policy. The alerting
+ // policy that is returned will have a name that contains a normalized
+ // representation of this name as a prefix but adds a suffix of the form
+ // `/alertPolicies/[POLICY_ID]`, identifying the policy in the container.
+ Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"`
+ // The requested alerting policy. You should omit the `name` field in this
+ // policy. The name will be returned in the new policy, including
+ // a new [ALERT_POLICY_ID] value.
+ AlertPolicy *AlertPolicy `protobuf:"bytes,2,opt,name=alert_policy,json=alertPolicy" json:"alert_policy,omitempty"`
+}
+
+func (m *CreateAlertPolicyRequest) Reset() { *m = CreateAlertPolicyRequest{} }
+func (m *CreateAlertPolicyRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateAlertPolicyRequest) ProtoMessage() {}
+func (*CreateAlertPolicyRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
+
+func (m *CreateAlertPolicyRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *CreateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy {
+ if m != nil {
+ return m.AlertPolicy
+ }
+ return nil
+}
+
+// The protocol for the `GetAlertPolicy` request.
+type GetAlertPolicyRequest struct {
+ // The alerting policy to retrieve. The format is
+ //
+ // projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]
+ Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"`
+}
+
+func (m *GetAlertPolicyRequest) Reset() { *m = GetAlertPolicyRequest{} }
+func (m *GetAlertPolicyRequest) String() string { return proto.CompactTextString(m) }
+func (*GetAlertPolicyRequest) ProtoMessage() {}
+func (*GetAlertPolicyRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
+
+func (m *GetAlertPolicyRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// The protocol for the `ListAlertPolicies` request.
+type ListAlertPoliciesRequest struct {
+ // The project whose alert policies are to be listed. The format is
+ //
+ // projects/[PROJECT_ID]
+ //
+ // Note that this field names the parent container in which the alerting
+ // policies to be listed are stored. To retrieve a single alerting policy
+ // by name, use the
+ // [GetAlertPolicy][google.monitoring.v3.AlertPolicyService.GetAlertPolicy]
+ // operation, instead.
+ Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
+ // If provided, this field specifies the criteria that must be met by
+ // alert policies to be included in the response.
+ //
+ // For more details, see [sorting and
+ // filtering](/monitoring/api/v3/sorting-and-filtering).
+ Filter string `protobuf:"bytes,5,opt,name=filter" json:"filter,omitempty"`
+ // A comma-separated list of fields by which to sort the result. Supports
+ // the same set of field references as the `filter` field. Entries can be
+ // prefixed with a minus sign to sort by the field in descending order.
+ //
+ // For more details, see [sorting and
+ // filtering](/monitoring/api/v3/sorting-and-filtering).
+ OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy" json:"order_by,omitempty"`
+ // The maximum number of results to return in a single response.
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return more results from the previous method call.
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"`
+}
+
+func (m *ListAlertPoliciesRequest) Reset() { *m = ListAlertPoliciesRequest{} }
+func (m *ListAlertPoliciesRequest) String() string { return proto.CompactTextString(m) }
+func (*ListAlertPoliciesRequest) ProtoMessage() {}
+func (*ListAlertPoliciesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} }
+
+func (m *ListAlertPoliciesRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *ListAlertPoliciesRequest) GetFilter() string {
+ if m != nil {
+ return m.Filter
+ }
+ return ""
+}
+
+func (m *ListAlertPoliciesRequest) GetOrderBy() string {
+ if m != nil {
+ return m.OrderBy
+ }
+ return ""
+}
+
+func (m *ListAlertPoliciesRequest) GetPageSize() int32 {
+ if m != nil {
+ return m.PageSize
+ }
+ return 0
+}
+
+func (m *ListAlertPoliciesRequest) GetPageToken() string {
+ if m != nil {
+ return m.PageToken
+ }
+ return ""
+}
+
+// The protocol for the `ListAlertPolicies` response.
+type ListAlertPoliciesResponse struct {
+ // The returned alert policies.
+ AlertPolicies []*AlertPolicy `protobuf:"bytes,3,rep,name=alert_policies,json=alertPolicies" json:"alert_policies,omitempty"`
+ // If there might be more results than were returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `pageToken` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"`
+}
+
+func (m *ListAlertPoliciesResponse) Reset() { *m = ListAlertPoliciesResponse{} }
+func (m *ListAlertPoliciesResponse) String() string { return proto.CompactTextString(m) }
+func (*ListAlertPoliciesResponse) ProtoMessage() {}
+func (*ListAlertPoliciesResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} }
+
+func (m *ListAlertPoliciesResponse) GetAlertPolicies() []*AlertPolicy {
+ if m != nil {
+ return m.AlertPolicies
+ }
+ return nil
+}
+
+func (m *ListAlertPoliciesResponse) GetNextPageToken() string {
+ if m != nil {
+ return m.NextPageToken
+ }
+ return ""
+}
+
+// The protocol for the `UpdateAlertPolicy` request.
+type UpdateAlertPolicyRequest struct {
+ // Optional. A list of alerting policy field names. If this field is not
+ // empty, each listed field in the existing alerting policy is set to the
+ // value of the corresponding field in the supplied policy (`alert_policy`),
+ // or to the field's default value if the field is not in the supplied
+ // alerting policy. Fields not listed retain their previous value.
+ //
+ // Examples of valid field masks include `display_name`, `documentation`,
+ // `documentation.content`, `documentation.mime_type`, `user_labels`,
+ // `user_label.nameofkey`, `enabled`, `conditions`, `combiner`, etc.
+ //
+ // If this field is empty, then the supplied alerting policy replaces the
+ // existing policy. It is the same as deleting the existing policy and
+ // adding the supplied policy, except for the following:
+ //
+ // + The new policy will have the same `[ALERT_POLICY_ID]` as the former
+ // policy. This gives you continuity with the former policy in your
+ // notifications and incidents.
+ // + Conditions in the new policy will keep their former `[CONDITION_ID]` if
+ // the supplied condition includes the `name` field with that
+ // `[CONDITION_ID]`. If the supplied condition omits the `name` field,
+ // then a new `[CONDITION_ID]` is created.
+ UpdateMask *google_protobuf6.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"`
+ // Required. The updated alerting policy or the updated values for the
+ // fields listed in `update_mask`.
+ // If `update_mask` is not empty, any fields in this policy that are
+ // not in `update_mask` are ignored.
+ AlertPolicy *AlertPolicy `protobuf:"bytes,3,opt,name=alert_policy,json=alertPolicy" json:"alert_policy,omitempty"`
+}
+
+func (m *UpdateAlertPolicyRequest) Reset() { *m = UpdateAlertPolicyRequest{} }
+func (m *UpdateAlertPolicyRequest) String() string { return proto.CompactTextString(m) }
+func (*UpdateAlertPolicyRequest) ProtoMessage() {}
+func (*UpdateAlertPolicyRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} }
+
+func (m *UpdateAlertPolicyRequest) GetUpdateMask() *google_protobuf6.FieldMask {
+ if m != nil {
+ return m.UpdateMask
+ }
+ return nil
+}
+
+func (m *UpdateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy {
+ if m != nil {
+ return m.AlertPolicy
+ }
+ return nil
+}
+
+// The protocol for the `DeleteAlertPolicy` request.
+type DeleteAlertPolicyRequest struct {
+ // The alerting policy to delete. The format is:
+ //
+ // projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]
+ //
+ // For more information, see [AlertPolicy][google.monitoring.v3.AlertPolicy].
+ Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"`
+}
+
+func (m *DeleteAlertPolicyRequest) Reset() { *m = DeleteAlertPolicyRequest{} }
+func (m *DeleteAlertPolicyRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteAlertPolicyRequest) ProtoMessage() {}
+func (*DeleteAlertPolicyRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} }
+
+func (m *DeleteAlertPolicyRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*CreateAlertPolicyRequest)(nil), "google.monitoring.v3.CreateAlertPolicyRequest")
+ proto.RegisterType((*GetAlertPolicyRequest)(nil), "google.monitoring.v3.GetAlertPolicyRequest")
+ proto.RegisterType((*ListAlertPoliciesRequest)(nil), "google.monitoring.v3.ListAlertPoliciesRequest")
+ proto.RegisterType((*ListAlertPoliciesResponse)(nil), "google.monitoring.v3.ListAlertPoliciesResponse")
+ proto.RegisterType((*UpdateAlertPolicyRequest)(nil), "google.monitoring.v3.UpdateAlertPolicyRequest")
+ proto.RegisterType((*DeleteAlertPolicyRequest)(nil), "google.monitoring.v3.DeleteAlertPolicyRequest")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for AlertPolicyService service
+
+type AlertPolicyServiceClient interface {
+ // Lists the existing alerting policies for the project.
+ ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error)
+ // Gets a single alerting policy.
+ GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error)
+ // Creates a new alerting policy.
+ CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error)
+ // Deletes an alerting policy.
+ DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error)
+ // Updates an alerting policy. You can either replace the entire policy with
+ // a new one or replace only certain fields in the current alerting policy by
+ // specifying the fields to be updated via `updateMask`. Returns the
+ // updated alerting policy.
+ UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error)
+}
+
+type alertPolicyServiceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewAlertPolicyServiceClient(cc *grpc.ClientConn) AlertPolicyServiceClient {
+ return &alertPolicyServiceClient{cc}
+}
+
+func (c *alertPolicyServiceClient) ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) {
+ out := new(ListAlertPoliciesResponse)
+ err := grpc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *alertPolicyServiceClient) GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) {
+ out := new(AlertPolicy)
+ err := grpc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *alertPolicyServiceClient) CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) {
+ out := new(AlertPolicy)
+ err := grpc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *alertPolicyServiceClient) DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) {
+ out := new(google_protobuf5.Empty)
+ err := grpc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *alertPolicyServiceClient) UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) {
+ out := new(AlertPolicy)
+ err := grpc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for AlertPolicyService service
+
+type AlertPolicyServiceServer interface {
+ // Lists the existing alerting policies for the project.
+ ListAlertPolicies(context.Context, *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error)
+ // Gets a single alerting policy.
+ GetAlertPolicy(context.Context, *GetAlertPolicyRequest) (*AlertPolicy, error)
+ // Creates a new alerting policy.
+ CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error)
+ // Deletes an alerting policy.
+ DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*google_protobuf5.Empty, error)
+ // Updates an alerting policy. You can either replace the entire policy with
+ // a new one or replace only certain fields in the current alerting policy by
+ // specifying the fields to be updated via `updateMask`. Returns the
+ // updated alerting policy.
+ UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error)
+}
+
+func RegisterAlertPolicyServiceServer(s *grpc.Server, srv AlertPolicyServiceServer) {
+ s.RegisterService(&_AlertPolicyService_serviceDesc, srv)
+}
+
+func _AlertPolicyService_ListAlertPolicies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListAlertPoliciesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, req.(*ListAlertPoliciesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _AlertPolicyService_GetAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetAlertPolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, req.(*GetAlertPolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _AlertPolicyService_CreateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateAlertPolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, req.(*CreateAlertPolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _AlertPolicyService_DeleteAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteAlertPolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, req.(*DeleteAlertPolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _AlertPolicyService_UpdateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateAlertPolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, req.(*UpdateAlertPolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _AlertPolicyService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.AlertPolicyService",
+ HandlerType: (*AlertPolicyServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListAlertPolicies",
+ Handler: _AlertPolicyService_ListAlertPolicies_Handler,
+ },
+ {
+ MethodName: "GetAlertPolicy",
+ Handler: _AlertPolicyService_GetAlertPolicy_Handler,
+ },
+ {
+ MethodName: "CreateAlertPolicy",
+ Handler: _AlertPolicyService_CreateAlertPolicy_Handler,
+ },
+ {
+ MethodName: "DeleteAlertPolicy",
+ Handler: _AlertPolicyService_DeleteAlertPolicy_Handler,
+ },
+ {
+ MethodName: "UpdateAlertPolicy",
+ Handler: _AlertPolicyService_UpdateAlertPolicy_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/alert_service.proto",
+}
+
+func init() { proto.RegisterFile("google/monitoring/v3/alert_service.proto", fileDescriptor1) }
+
+var fileDescriptor1 = []byte{
+ // 656 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x41, 0x6f, 0xd3, 0x4c,
+ 0x10, 0x95, 0x93, 0x36, 0x5f, 0xbb, 0xfd, 0x5a, 0x94, 0x15, 0x54, 0xae, 0x0b, 0x52, 0x30, 0x2a,
+ 0x54, 0xad, 0xb0, 0xa5, 0xf8, 0x04, 0x15, 0x48, 0xa4, 0x85, 0xf6, 0x40, 0xa5, 0x28, 0x85, 0x1e,
+ 0x50, 0xa4, 0x68, 0x93, 0x4c, 0xac, 0x25, 0x8e, 0xd7, 0x78, 0x37, 0x11, 0x29, 0xea, 0x85, 0x23,
+ 0x12, 0xe2, 0xc0, 0x99, 0x03, 0x47, 0x38, 0x20, 0x7e, 0x07, 0x57, 0xfe, 0x02, 0x3f, 0x04, 0x79,
+ 0xed, 0x34, 0x76, 0x6d, 0xab, 0x16, 0xb7, 0xcc, 0xce, 0xdb, 0x99, 0xb7, 0x6f, 0xde, 0x38, 0x68,
+ 0xdb, 0x66, 0xcc, 0x76, 0xc0, 0x1c, 0x31, 0x97, 0x0a, 0xe6, 0x53, 0xd7, 0x36, 0x27, 0x96, 0x49,
+ 0x1c, 0xf0, 0x45, 0x87, 0x83, 0x3f, 0xa1, 0x3d, 0x30, 0x3c, 0x9f, 0x09, 0x86, 0xaf, 0x87, 0x48,
+ 0x63, 0x8e, 0x34, 0x26, 0x96, 0x76, 0x33, 0xba, 0x4f, 0x3c, 0x6a, 0x12, 0xd7, 0x65, 0x82, 0x08,
+ 0xca, 0x5c, 0x1e, 0xde, 0xd1, 0x6a, 0xf9, 0xd5, 0x23, 0xc4, 0x66, 0x84, 0x90, 0x51, 0x77, 0x3c,
+ 0x30, 0x61, 0xe4, 0x89, 0xe9, 0xa5, 0xeb, 0x17, 0xc9, 0x01, 0x05, 0xa7, 0xdf, 0x19, 0x11, 0x3e,
+ 0x0c, 0x11, 0xba, 0x40, 0xea, 0xbe, 0x0f, 0x44, 0xc0, 0x93, 0xa0, 0x66, 0x93, 0x39, 0xb4, 0x37,
+ 0x6d, 0xc1, 0x9b, 0x31, 0x70, 0x81, 0x31, 0x5a, 0x70, 0xc9, 0x08, 0xd4, 0x72, 0x4d, 0xd9, 0x5e,
+ 0x6e, 0xc9, 0xdf, 0xf8, 0x00, 0xfd, 0x1f, 0xbe, 0xcd, 0x93, 0x50, 0xb5, 0x54, 0x53, 0xb6, 0x57,
+ 0xea, 0xb7, 0x8d, 0xac, 0xb7, 0x19, 0xf1, 0x9a, 0x2b, 0x64, 0x1e, 0xe8, 0xbb, 0xe8, 0xc6, 0x21,
+ 0x88, 0x62, 0x2d, 0xf5, 0x2f, 0x0a, 0x52, 0x9f, 0x53, 0x1e, 0x83, 0x53, 0xe0, 0x97, 0x2f, 0x2c,
+ 0xc4, 0x38, 0xae, 0xa3, 0xca, 0x80, 0x3a, 0x02, 0x7c, 0x75, 0x51, 0x9e, 0x46, 0x11, 0xde, 0x40,
+ 0x4b, 0xcc, 0xef, 0x83, 0xdf, 0xe9, 0x4e, 0xd5, 0x8a, 0xcc, 0xfc, 0x27, 0xe3, 0xc6, 0x14, 0x6f,
+ 0xa2, 0x65, 0x8f, 0xd8, 0xd0, 0xe1, 0xf4, 0x0c, 0xe4, 0x9b, 0x16, 0x5b, 0x4b, 0xc1, 0xc1, 0x09,
+ 0x3d, 0x03, 0x7c, 0x0b, 0x21, 0x99, 0x14, 0x6c, 0x08, 0x6e, 0x44, 0x4d, 0xc2, 0x5f, 0x04, 0x07,
+ 0xfa, 0x47, 0x05, 0x6d, 0x64, 0xf0, 0xe3, 0x1e, 0x73, 0x39, 0xe0, 0x23, 0xb4, 0x16, 0x13, 0x8c,
+ 0x02, 0x57, 0xcb, 0xb5, 0x72, 0x31, 0xc9, 0x56, 0x49, 0xbc, 0x22, 0xbe, 0x8b, 0xae, 0xb9, 0xf0,
+ 0x56, 0x74, 0x62, 0x5c, 0x4a, 0x92, 0xcb, 0x6a, 0x70, 0xdc, 0xbc, 0xe0, 0x13, 0xe8, 0xf5, 0xd2,
+ 0xeb, 0x67, 0xcf, 0x74, 0x0f, 0xad, 0x8c, 0x65, 0x4e, 0x9a, 0x20, 0x1a, 0x9f, 0x36, 0xe3, 0x32,
+ 0xf3, 0x89, 0xf1, 0x2c, 0xf0, 0xc9, 0x31, 0xe1, 0xc3, 0x16, 0x0a, 0xe1, 0xc1, 0xef, 0xd4, 0xf0,
+ 0xcb, 0xff, 0x34, 0x7c, 0x03, 0xa9, 0x07, 0xe0, 0x40, 0x51, 0xcb, 0xd5, 0x7f, 0x54, 0x10, 0x8e,
+ 0x41, 0x4f, 0xc2, 0xa5, 0xc2, 0x5f, 0x15, 0x54, 0x4d, 0xc9, 0x8e, 0x8d, 0x6c, 0x32, 0x79, 0xfe,
+ 0xd1, 0xcc, 0xc2, 0xf8, 0x70, 0x9e, 0xfa, 0xee, 0xfb, 0xdf, 0x7f, 0x3e, 0x97, 0xb6, 0xf0, 0x9d,
+ 0x60, 0x11, 0xdf, 0x05, 0x04, 0x1f, 0x79, 0x3e, 0x7b, 0x0d, 0x3d, 0xc1, 0xcd, 0x9d, 0x73, 0x33,
+ 0x39, 0xb2, 0x4f, 0x0a, 0x5a, 0x4b, 0x1a, 0x1d, 0xef, 0x66, 0x37, 0xcc, 0x5c, 0x07, 0xed, 0x6a,
+ 0x69, 0xf5, 0xfb, 0x92, 0xcf, 0x3d, 0xbc, 0x95, 0xc5, 0x27, 0x49, 0xc7, 0xdc, 0x39, 0x97, 0xaa,
+ 0xa5, 0x16, 0x3e, 0x4f, 0xb5, 0xbc, 0x2f, 0x43, 0x11, 0x5e, 0x0f, 0x24, 0x2f, 0x4b, 0x2f, 0xa2,
+ 0xd3, 0xc3, 0x84, 0xad, 0xf0, 0x07, 0x05, 0x55, 0x53, 0x0e, 0xc9, 0xe3, 0x98, 0x67, 0x25, 0x6d,
+ 0x3d, 0x65, 0xea, 0xa7, 0xc1, 0x97, 0x71, 0x26, 0xd8, 0x4e, 0x41, 0xc1, 0x7e, 0x2a, 0xa8, 0x9a,
+ 0xda, 0xa6, 0x3c, 0x32, 0x79, 0x6b, 0x57, 0x44, 0xb0, 0x23, 0xc9, 0xab, 0x51, 0xaf, 0x4b, 0x5e,
+ 0x71, 0x41, 0x8c, 0xab, 0x48, 0x26, 0xf5, 0x6b, 0x7c, 0x53, 0x90, 0xda, 0x63, 0xa3, 0xcc, 0x96,
+ 0x8d, 0xaa, 0xec, 0x19, 0x2d, 0x51, 0x33, 0x90, 0xa6, 0xa9, 0xbc, 0x7a, 0x1c, 0x41, 0x6d, 0xe6,
+ 0x10, 0xd7, 0x36, 0x98, 0x6f, 0x9b, 0x36, 0xb8, 0x52, 0x38, 0x33, 0x4c, 0x11, 0x8f, 0xf2, 0xe4,
+ 0xbf, 0xd0, 0xde, 0x3c, 0xfa, 0x5e, 0xd2, 0x0e, 0xc3, 0x02, 0xfb, 0x0e, 0x1b, 0xf7, 0x8d, 0xe3,
+ 0x79, 0xc7, 0x53, 0xeb, 0xd7, 0x2c, 0xd9, 0x96, 0xc9, 0xf6, 0x3c, 0xd9, 0x3e, 0xb5, 0xba, 0x15,
+ 0xd9, 0xc4, 0xfa, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x6f, 0x1f, 0xe6, 0xf0, 0x47, 0x07, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go
index 4d967f52..60239967 100644
--- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go
@@ -1,61 +1,12 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/monitoring/v3/common.proto
-/*
-Package monitoring is a generated protocol buffer package.
-
-It is generated from these files:
- google/monitoring/v3/common.proto
- google/monitoring/v3/group.proto
- google/monitoring/v3/group_service.proto
- google/monitoring/v3/metric.proto
- google/monitoring/v3/metric_service.proto
- google/monitoring/v3/uptime.proto
- google/monitoring/v3/uptime_service.proto
-
-It has these top-level messages:
- TypedValue
- TimeInterval
- Aggregation
- Group
- ListGroupsRequest
- ListGroupsResponse
- GetGroupRequest
- CreateGroupRequest
- UpdateGroupRequest
- DeleteGroupRequest
- ListGroupMembersRequest
- ListGroupMembersResponse
- Point
- TimeSeries
- ListMonitoredResourceDescriptorsRequest
- ListMonitoredResourceDescriptorsResponse
- GetMonitoredResourceDescriptorRequest
- ListMetricDescriptorsRequest
- ListMetricDescriptorsResponse
- GetMetricDescriptorRequest
- CreateMetricDescriptorRequest
- DeleteMetricDescriptorRequest
- ListTimeSeriesRequest
- ListTimeSeriesResponse
- CreateTimeSeriesRequest
- CreateTimeSeriesError
- UptimeCheckConfig
- UptimeCheckIp
- ListUptimeCheckConfigsRequest
- ListUptimeCheckConfigsResponse
- GetUptimeCheckConfigRequest
- CreateUptimeCheckConfigRequest
- UpdateUptimeCheckConfigRequest
- DeleteUptimeCheckConfigRequest
- ListUptimeCheckIpsRequest
- ListUptimeCheckIpsResponse
-*/
package monitoring
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
+import _ "google.golang.org/genproto/googleapis/api/annotations"
import google_api2 "google.golang.org/genproto/googleapis/api/distribution"
import google_protobuf3 "github.com/golang/protobuf/ptypes/duration"
import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp"
@@ -65,11 +16,87 @@ var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+// Specifies an ordering relationship on two arguments, here called left and
+// right.
+type ComparisonType int32
+
+const (
+ // No ordering relationship is specified.
+ ComparisonType_COMPARISON_UNSPECIFIED ComparisonType = 0
+ // The left argument is greater than the right argument.
+ ComparisonType_COMPARISON_GT ComparisonType = 1
+ // The left argument is greater than or equal to the right argument.
+ ComparisonType_COMPARISON_GE ComparisonType = 2
+ // The left argument is less than the right argument.
+ ComparisonType_COMPARISON_LT ComparisonType = 3
+ // The left argument is less than or equal to the right argument.
+ ComparisonType_COMPARISON_LE ComparisonType = 4
+ // The left argument is equal to the right argument.
+ ComparisonType_COMPARISON_EQ ComparisonType = 5
+ // The left argument is not equal to the right argument.
+ ComparisonType_COMPARISON_NE ComparisonType = 6
+)
+
+var ComparisonType_name = map[int32]string{
+ 0: "COMPARISON_UNSPECIFIED",
+ 1: "COMPARISON_GT",
+ 2: "COMPARISON_GE",
+ 3: "COMPARISON_LT",
+ 4: "COMPARISON_LE",
+ 5: "COMPARISON_EQ",
+ 6: "COMPARISON_NE",
+}
+var ComparisonType_value = map[string]int32{
+ "COMPARISON_UNSPECIFIED": 0,
+ "COMPARISON_GT": 1,
+ "COMPARISON_GE": 2,
+ "COMPARISON_LT": 3,
+ "COMPARISON_LE": 4,
+ "COMPARISON_EQ": 5,
+ "COMPARISON_NE": 6,
+}
+
+func (x ComparisonType) String() string {
+ return proto.EnumName(ComparisonType_name, int32(x))
+}
+func (ComparisonType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{0} }
+
+// The tier of service for a Stackdriver account. Please see the
+// [service tiers documentation](https://cloud.google.com/monitoring/accounts/tiers)
+// for more details.
+type ServiceTier int32
+
+const (
+ // An invalid sentinel value, used to indicate that a tier has not
+ // been provided explicitly.
+ ServiceTier_SERVICE_TIER_UNSPECIFIED ServiceTier = 0
+ // The Stackdriver Basic tier, a free tier of service that provides basic
+ // features, a moderate allotment of logs, and access to built-in metrics.
+ // A number of features are not available in this tier. For more details,
+ // see [the service tiers documentation](https://cloud.google.com/monitoring/accounts/tiers).
+ ServiceTier_SERVICE_TIER_BASIC ServiceTier = 1
+ // The Stackdriver Premium tier, a higher, more expensive tier of service
+ // that provides access to all Stackdriver features, lets you use Stackdriver
+ // with AWS accounts, and has a larger allotments for logs and metrics. For
+ // more details, see [the service tiers documentation](https://cloud.google.com/monitoring/accounts/tiers).
+ ServiceTier_SERVICE_TIER_PREMIUM ServiceTier = 2
+)
+
+var ServiceTier_name = map[int32]string{
+ 0: "SERVICE_TIER_UNSPECIFIED",
+ 1: "SERVICE_TIER_BASIC",
+ 2: "SERVICE_TIER_PREMIUM",
+}
+var ServiceTier_value = map[string]int32{
+ "SERVICE_TIER_UNSPECIFIED": 0,
+ "SERVICE_TIER_BASIC": 1,
+ "SERVICE_TIER_PREMIUM": 2,
+}
+
+func (x ServiceTier) String() string {
+ return proto.EnumName(ServiceTier_name, int32(x))
+}
+func (ServiceTier) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{1} }
// The Aligner describes how to bring the data points in a single
// time series into temporal alignment.
@@ -85,11 +112,22 @@ const (
// delta metric to a delta metric requires that the alignment
// period be increased. The value type of the result is the same
// as the value type of the input.
+ //
+ // One can think of this aligner as a rate but without time units; that
+ // is, the output is conceptually (second_point - first_point).
Aggregation_ALIGN_DELTA Aggregation_Aligner = 1
// Align and convert to a rate. This alignment is valid for
// cumulative metrics and delta metrics with numeric values. The output is a
// gauge metric with value type
// [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ //
+ // One can think of this aligner as conceptually providing the slope of
+ // the line that passes through the value at the start and end of the
+ // window. In other words, this is conceptually ((y1 - y0)/(t1 - t0)),
+ // and the output unit is one that has a "/time" dimension.
+ //
+ // If, by rate, you are looking for percentage change, see the
+ // `ALIGN_PERCENT_CHANGE` aligner option.
Aggregation_ALIGN_RATE Aggregation_Aligner = 2
// Align by interpolating between adjacent points around the
// period boundary. This alignment is valid for gauge
@@ -144,6 +182,12 @@ const (
// [INT64][google.api.MetricDescriptor.ValueType.INT64].
Aggregation_ALIGN_COUNT_TRUE Aggregation_Aligner = 16
// Align time series via aggregation. The resulting data point in
+ // the alignment period is the count of False-valued data points in the
+ // period. This alignment is valid for gauge metrics with
+ // Boolean values. The value type of the output is
+ // [INT64][google.api.MetricDescriptor.ValueType.INT64].
+ Aggregation_ALIGN_COUNT_FALSE Aggregation_Aligner = 24
+ // Align time series via aggregation. The resulting data point in
// the alignment period is the fraction of True-valued data points in the
// period. This alignment is valid for gauge metrics with Boolean values.
// The output value is in the range [0, 1] and has value type
@@ -173,6 +217,22 @@ const (
// with distribution values. The output is a gauge metric with value type
// [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
Aggregation_ALIGN_PERCENTILE_05 Aggregation_Aligner = 21
+ // Align and convert to a percentage change. This alignment is valid for
+ // gauge and delta metrics with numeric values. This alignment conceptually
+ // computes the equivalent of "((current - previous)/previous)*100"
+ // where previous value is determined based on the alignmentPeriod.
+ // In the event that previous is 0 the calculated value is infinity with the
+ // exception that if both (current - previous) and previous are 0 the
+ // calculated value is 0.
+ // A 10 minute moving mean is computed at each point of the time window
+ // prior to the above calculation to smooth the metric and prevent false
+ // positives from very short lived spikes.
+ // Only applicable for data that is >= 0. Any values < 0 are treated as
+ // no data. While delta metrics are accepted by this alignment special care
+ // should be taken that the values for the metric will always be positive.
+ // The output is a gauge metric with value type
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ Aggregation_ALIGN_PERCENT_CHANGE Aggregation_Aligner = 23
)
var Aggregation_Aligner_name = map[int32]string{
@@ -188,36 +248,40 @@ var Aggregation_Aligner_name = map[int32]string{
14: "ALIGN_SUM",
15: "ALIGN_STDDEV",
16: "ALIGN_COUNT_TRUE",
+ 24: "ALIGN_COUNT_FALSE",
17: "ALIGN_FRACTION_TRUE",
18: "ALIGN_PERCENTILE_99",
19: "ALIGN_PERCENTILE_95",
20: "ALIGN_PERCENTILE_50",
21: "ALIGN_PERCENTILE_05",
+ 23: "ALIGN_PERCENT_CHANGE",
}
var Aggregation_Aligner_value = map[string]int32{
- "ALIGN_NONE": 0,
- "ALIGN_DELTA": 1,
- "ALIGN_RATE": 2,
- "ALIGN_INTERPOLATE": 3,
- "ALIGN_NEXT_OLDER": 4,
- "ALIGN_MIN": 10,
- "ALIGN_MAX": 11,
- "ALIGN_MEAN": 12,
- "ALIGN_COUNT": 13,
- "ALIGN_SUM": 14,
- "ALIGN_STDDEV": 15,
- "ALIGN_COUNT_TRUE": 16,
- "ALIGN_FRACTION_TRUE": 17,
- "ALIGN_PERCENTILE_99": 18,
- "ALIGN_PERCENTILE_95": 19,
- "ALIGN_PERCENTILE_50": 20,
- "ALIGN_PERCENTILE_05": 21,
+ "ALIGN_NONE": 0,
+ "ALIGN_DELTA": 1,
+ "ALIGN_RATE": 2,
+ "ALIGN_INTERPOLATE": 3,
+ "ALIGN_NEXT_OLDER": 4,
+ "ALIGN_MIN": 10,
+ "ALIGN_MAX": 11,
+ "ALIGN_MEAN": 12,
+ "ALIGN_COUNT": 13,
+ "ALIGN_SUM": 14,
+ "ALIGN_STDDEV": 15,
+ "ALIGN_COUNT_TRUE": 16,
+ "ALIGN_COUNT_FALSE": 24,
+ "ALIGN_FRACTION_TRUE": 17,
+ "ALIGN_PERCENTILE_99": 18,
+ "ALIGN_PERCENTILE_95": 19,
+ "ALIGN_PERCENTILE_50": 20,
+ "ALIGN_PERCENTILE_05": 21,
+ "ALIGN_PERCENT_CHANGE": 23,
}
func (x Aggregation_Aligner) String() string {
return proto.EnumName(Aggregation_Aligner_name, int32(x))
}
-func (Aggregation_Aligner) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
+func (Aggregation_Aligner) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{2, 0} }
// A Reducer describes how to aggregate data points from multiple
// time series into a single time series.
@@ -263,6 +327,11 @@ const (
// and gauge metrics of Boolean value type. The value type of
// the output is [INT64][google.api.MetricDescriptor.ValueType.INT64].
Aggregation_REDUCE_COUNT_TRUE Aggregation_Reducer = 7
+ // Reduce by computing the count of False-valued data points across time
+ // series for each alignment period. This reducer is valid for delta
+ // and gauge metrics of Boolean value type. The value type of
+ // the output is [INT64][google.api.MetricDescriptor.ValueType.INT64].
+ Aggregation_REDUCE_COUNT_FALSE Aggregation_Reducer = 15
// Reduce by computing the fraction of True-valued data points across time
// series for each alignment period. This reducer is valid for delta
// and gauge metrics of Boolean value type. The output value is in the
@@ -300,6 +369,7 @@ var Aggregation_Reducer_name = map[int32]string{
5: "REDUCE_STDDEV",
6: "REDUCE_COUNT",
7: "REDUCE_COUNT_TRUE",
+ 15: "REDUCE_COUNT_FALSE",
8: "REDUCE_FRACTION_TRUE",
9: "REDUCE_PERCENTILE_99",
10: "REDUCE_PERCENTILE_95",
@@ -315,6 +385,7 @@ var Aggregation_Reducer_value = map[string]int32{
"REDUCE_STDDEV": 5,
"REDUCE_COUNT": 6,
"REDUCE_COUNT_TRUE": 7,
+ "REDUCE_COUNT_FALSE": 15,
"REDUCE_FRACTION_TRUE": 8,
"REDUCE_PERCENTILE_99": 9,
"REDUCE_PERCENTILE_95": 10,
@@ -325,7 +396,7 @@ var Aggregation_Reducer_value = map[string]int32{
func (x Aggregation_Reducer) String() string {
return proto.EnumName(Aggregation_Reducer_name, int32(x))
}
-func (Aggregation_Reducer) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} }
+func (Aggregation_Reducer) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{2, 1} }
// A single strongly-typed value.
type TypedValue struct {
@@ -343,7 +414,7 @@ type TypedValue struct {
func (m *TypedValue) Reset() { *m = TypedValue{} }
func (m *TypedValue) String() string { return proto.CompactTextString(m) }
func (*TypedValue) ProtoMessage() {}
-func (*TypedValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (*TypedValue) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} }
type isTypedValue_Value interface {
isTypedValue_Value()
@@ -544,7 +615,7 @@ type TimeInterval struct {
func (m *TimeInterval) Reset() { *m = TimeInterval{} }
func (m *TimeInterval) String() string { return proto.CompactTextString(m) }
func (*TimeInterval) ProtoMessage() {}
-func (*TimeInterval) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+func (*TimeInterval) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} }
func (m *TimeInterval) GetEndTime() *google_protobuf2.Timestamp {
if m != nil {
@@ -562,8 +633,9 @@ func (m *TimeInterval) GetStartTime() *google_protobuf2.Timestamp {
// Describes how to combine multiple time series to provide different views of
// the data. Aggregation consists of an alignment step on individual time
-// series (`per_series_aligner`) followed by an optional reduction of the data
-// across different time series (`cross_series_reducer`). For more details, see
+// series (`alignment_period` and `per_series_aligner`) followed by an optional
+// reduction step of the data across the aligned time series
+// (`cross_series_reducer` and `group_by_fields`). For more details, see
// [Aggregation](/monitoring/api/learn_more#aggregation).
type Aggregation struct {
// The alignment period for per-[time series][google.monitoring.v3.TimeSeries]
@@ -619,7 +691,7 @@ type Aggregation struct {
func (m *Aggregation) Reset() { *m = Aggregation{} }
func (m *Aggregation) String() string { return proto.CompactTextString(m) }
func (*Aggregation) ProtoMessage() {}
-func (*Aggregation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+func (*Aggregation) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} }
func (m *Aggregation) GetAlignmentPeriod() *google_protobuf3.Duration {
if m != nil {
@@ -653,62 +725,74 @@ func init() {
proto.RegisterType((*TypedValue)(nil), "google.monitoring.v3.TypedValue")
proto.RegisterType((*TimeInterval)(nil), "google.monitoring.v3.TimeInterval")
proto.RegisterType((*Aggregation)(nil), "google.monitoring.v3.Aggregation")
+ proto.RegisterEnum("google.monitoring.v3.ComparisonType", ComparisonType_name, ComparisonType_value)
+ proto.RegisterEnum("google.monitoring.v3.ServiceTier", ServiceTier_name, ServiceTier_value)
proto.RegisterEnum("google.monitoring.v3.Aggregation_Aligner", Aggregation_Aligner_name, Aggregation_Aligner_value)
proto.RegisterEnum("google.monitoring.v3.Aggregation_Reducer", Aggregation_Reducer_name, Aggregation_Reducer_value)
}
-func init() { proto.RegisterFile("google/monitoring/v3/common.proto", fileDescriptor0) }
-
-var fileDescriptor0 = []byte{
- // 792 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0xdd, 0x6a, 0xe3, 0x46,
- 0x14, 0xb6, 0xec, 0x64, 0x1d, 0x1f, 0x39, 0xf1, 0x64, 0xd6, 0x4b, 0xdd, 0x40, 0xbb, 0x5e, 0x17,
- 0x8a, 0x7b, 0x23, 0x87, 0xb8, 0x2e, 0x84, 0x42, 0x41, 0xb1, 0xb5, 0x1b, 0x83, 0x23, 0x9b, 0x59,
- 0x25, 0x0d, 0x6d, 0x40, 0xc8, 0xd1, 0xac, 0x10, 0xc8, 0x1a, 0x31, 0x92, 0x0d, 0xb9, 0xeb, 0x6b,
- 0xf4, 0xba, 0x77, 0xfb, 0x28, 0x7d, 0x84, 0x3e, 0x44, 0x9f, 0xa1, 0x68, 0x66, 0xb4, 0x52, 0x5a,
- 0x97, 0xf6, 0xf2, 0xfb, 0x39, 0xdf, 0xe8, 0x7c, 0x23, 0xd9, 0xf0, 0x26, 0x60, 0x2c, 0x88, 0xe8,
- 0x68, 0xc3, 0xe2, 0x30, 0x63, 0x3c, 0x8c, 0x83, 0xd1, 0x6e, 0x3c, 0x7a, 0x64, 0x9b, 0x0d, 0x8b,
- 0x8d, 0x84, 0xb3, 0x8c, 0xe1, 0xae, 0xb4, 0x18, 0xa5, 0xc5, 0xd8, 0x8d, 0xcf, 0xbe, 0x50, 0x83,
- 0x5e, 0x12, 0x8e, 0xfc, 0x30, 0xcd, 0x78, 0xb8, 0xde, 0x66, 0x61, 0x31, 0x74, 0xf6, 0xa5, 0x92,
- 0x05, 0x5a, 0x6f, 0x3f, 0x8c, 0xfc, 0x2d, 0xf7, 0x2a, 0xfa, 0xeb, 0xbf, 0xeb, 0x59, 0xb8, 0xa1,
- 0x69, 0xe6, 0x6d, 0x12, 0x69, 0x18, 0xfc, 0xa9, 0x01, 0x38, 0x4f, 0x09, 0xf5, 0xef, 0xbc, 0x68,
- 0x4b, 0xf1, 0x6b, 0x80, 0x35, 0x63, 0x91, 0xbb, 0xcb, 0x51, 0x4f, 0xeb, 0x6b, 0xc3, 0xa3, 0xeb,
- 0x1a, 0x69, 0xe5, 0x9c, 0x34, 0xbc, 0x01, 0x3d, 0x8c, 0xb3, 0xef, 0xbe, 0x55, 0x8e, 0x7a, 0x5f,
- 0x1b, 0x36, 0xae, 0x6b, 0x04, 0x04, 0x29, 0x2d, 0x5f, 0x41, 0xdb, 0x67, 0xdb, 0x75, 0x44, 0x95,
- 0xa7, 0xd1, 0xd7, 0x86, 0xda, 0x75, 0x8d, 0xe8, 0x92, 0xfd, 0x64, 0xca, 0x97, 0x89, 0x03, 0x65,
- 0x3a, 0xe8, 0x6b, 0xc3, 0x56, 0x6e, 0x92, 0xac, 0x34, 0xcd, 0x01, 0x57, 0x77, 0x56, 0xd6, 0xc3,
- 0xbe, 0x36, 0xd4, 0x2f, 0x7a, 0x86, 0xea, 0xcb, 0x4b, 0x42, 0x63, 0x56, 0x71, 0x5d, 0xd7, 0xc8,
- 0x69, 0x75, 0x4a, 0x44, 0x5d, 0x35, 0xe1, 0x50, 0x4c, 0x0f, 0x7e, 0xd1, 0xa0, 0xed, 0x84, 0x1b,
- 0x3a, 0x8f, 0x33, 0xca, 0x77, 0x5e, 0x84, 0x27, 0x70, 0x44, 0x63, 0xdf, 0xcd, 0x8b, 0x11, 0xeb,
- 0xe8, 0x17, 0x67, 0x45, 0x74, 0xd1, 0x9a, 0xe1, 0x14, 0xad, 0x91, 0x26, 0x8d, 0xfd, 0x1c, 0xe1,
- 0x4b, 0x80, 0x34, 0xf3, 0x78, 0x26, 0x07, 0xb5, 0xff, 0x1c, 0x6c, 0x09, 0x77, 0x8e, 0x07, 0x1f,
- 0x9b, 0xa0, 0x9b, 0x41, 0xc0, 0x69, 0x20, 0xae, 0x0a, 0xcf, 0x00, 0x79, 0x51, 0x18, 0xc4, 0x1b,
- 0x1a, 0x67, 0x6e, 0x42, 0x79, 0xc8, 0x7c, 0x15, 0xf8, 0xf9, 0x3f, 0x02, 0x67, 0xea, 0x7e, 0x49,
- 0xe7, 0xd3, 0xc8, 0x4a, 0x4c, 0xe0, 0x1f, 0x01, 0x27, 0x94, 0xbb, 0x29, 0xe5, 0x21, 0x4d, 0x5d,
- 0xa1, 0x52, 0x2e, 0x36, 0x3a, 0xb9, 0xf8, 0xc6, 0xd8, 0xf7, 0x72, 0x19, 0x95, 0x87, 0x30, 0x4c,
- 0x39, 0x40, 0x50, 0x42, 0xf9, 0x7b, 0x91, 0xa1, 0x18, 0xfc, 0x33, 0x74, 0x1f, 0x39, 0x4b, 0xd3,
- 0x22, 0x9a, 0x53, 0x7f, 0xfb, 0x48, 0xb9, 0xb8, 0xb2, 0xff, 0x15, 0x4d, 0xe4, 0x00, 0xc1, 0x22,
- 0x46, 0x86, 0x2b, 0x0e, 0x7f, 0x0d, 0x9d, 0x80, 0xb3, 0x6d, 0xe2, 0xae, 0x9f, 0xdc, 0x0f, 0x21,
- 0x8d, 0xfc, 0xb4, 0x77, 0xd8, 0x6f, 0x0c, 0x5b, 0xe4, 0x58, 0xd0, 0x57, 0x4f, 0x6f, 0x05, 0x39,
- 0xf8, 0xa3, 0x0e, 0xcd, 0xe2, 0x81, 0x4e, 0x00, 0xcc, 0xc5, 0xfc, 0x9d, 0xed, 0xda, 0x4b, 0xdb,
- 0x42, 0x35, 0xdc, 0x01, 0x5d, 0xe2, 0x99, 0xb5, 0x70, 0x4c, 0xa4, 0x95, 0x06, 0x62, 0x3a, 0x16,
- 0xaa, 0xe3, 0x57, 0x70, 0x2a, 0xf1, 0xdc, 0x76, 0x2c, 0xb2, 0x5a, 0x2e, 0x72, 0xba, 0x81, 0xbb,
- 0x80, 0x54, 0x8e, 0x75, 0xef, 0xb8, 0xcb, 0xc5, 0xcc, 0x22, 0xe8, 0x00, 0x1f, 0x43, 0x4b, 0xb2,
- 0x37, 0x73, 0x1b, 0x41, 0x05, 0x9a, 0xf7, 0x48, 0x2f, 0xa3, 0x6f, 0x2c, 0xd3, 0x46, 0xed, 0xf2,
- 0xec, 0xe9, 0xf2, 0xd6, 0x76, 0xd0, 0x71, 0xe9, 0x7f, 0x7f, 0x7b, 0x83, 0x4e, 0x30, 0x82, 0xb6,
- 0x82, 0xce, 0x6c, 0x66, 0xdd, 0xa1, 0x4e, 0x79, 0xaa, 0x98, 0x70, 0x1d, 0x72, 0x6b, 0x21, 0x84,
- 0x3f, 0x83, 0x97, 0x92, 0x7d, 0x4b, 0xcc, 0xa9, 0x33, 0x5f, 0xda, 0x52, 0x38, 0x2d, 0x85, 0x95,
- 0x45, 0xa6, 0x96, 0xed, 0xcc, 0x17, 0x96, 0x7b, 0x79, 0x89, 0xf0, 0x7e, 0x61, 0x82, 0x5e, 0xee,
- 0x15, 0x26, 0xe7, 0xa8, 0xbb, 0x57, 0x38, 0x9f, 0xa0, 0x57, 0x83, 0x5f, 0xeb, 0xd0, 0x2c, 0x2e,
- 0xa4, 0x03, 0x3a, 0xb1, 0x66, 0xb7, 0x53, 0xab, 0xd2, 0xae, 0x22, 0xc4, 0xca, 0xa2, 0xdd, 0x82,
- 0x98, 0xdb, 0xa8, 0x5e, 0xc5, 0xe6, 0x3d, 0x6a, 0x54, 0x70, 0x5e, 0xc1, 0x01, 0x3e, 0x85, 0xe3,
- 0x02, 0xcb, 0x0e, 0x0e, 0xf3, 0x56, 0x14, 0x25, 0x6b, 0x7b, 0x91, 0x5f, 0x51, 0x95, 0x91, 0xdb,
- 0x37, 0x71, 0x0f, 0xba, 0x8a, 0x7e, 0xde, 0xcb, 0x51, 0x45, 0x79, 0x5e, 0x4c, 0xeb, 0x5f, 0x94,
- 0x09, 0x82, 0xfd, 0xca, 0xe4, 0x1c, 0xe9, 0xfb, 0x95, 0xf3, 0x09, 0x6a, 0x5f, 0xfd, 0xa6, 0x41,
- 0xef, 0x91, 0x6d, 0xf6, 0xbe, 0xe5, 0x57, 0xfa, 0x54, 0xfc, 0x82, 0xaf, 0xf2, 0xaf, 0x73, 0xa5,
- 0xfd, 0xf4, 0x83, 0x32, 0x05, 0x2c, 0xf2, 0xe2, 0xc0, 0x60, 0x3c, 0x18, 0x05, 0x34, 0x16, 0xdf,
- 0xee, 0x48, 0x4a, 0x5e, 0x12, 0xa6, 0xcf, 0xff, 0x04, 0xbe, 0x2f, 0xd1, 0xc7, 0xfa, 0xd9, 0x3b,
- 0x19, 0x30, 0x8d, 0xd8, 0xd6, 0x37, 0x6e, 0xca, 0xb3, 0xee, 0xc6, 0xbf, 0x17, 0xe2, 0x83, 0x10,
- 0x1f, 0x4a, 0xf1, 0xe1, 0x6e, 0xbc, 0x7e, 0x21, 0x0e, 0x19, 0xff, 0x15, 0x00, 0x00, 0xff, 0xff,
- 0x57, 0xa4, 0xb9, 0xce, 0x68, 0x06, 0x00, 0x00,
+func init() { proto.RegisterFile("google/monitoring/v3/common.proto", fileDescriptor2) }
+
+var fileDescriptor2 = []byte{
+ // 954 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0xc1, 0x6e, 0xe3, 0x44,
+ 0x18, 0xc7, 0xe3, 0xa4, 0x6d, 0x9a, 0xcf, 0x6d, 0x33, 0x9d, 0xed, 0x76, 0x43, 0xb5, 0xb0, 0xd9,
+ 0x22, 0xa1, 0xb0, 0x07, 0xa7, 0x6a, 0x09, 0x52, 0x85, 0x84, 0xe4, 0x3a, 0xd3, 0xd6, 0x52, 0xe2,
+ 0x84, 0x89, 0x53, 0x2a, 0x28, 0xb2, 0x9c, 0x66, 0xd6, 0xb2, 0x94, 0x78, 0x2c, 0xdb, 0xa9, 0xd4,
+ 0x1b, 0x77, 0xde, 0x81, 0x0b, 0x37, 0x6e, 0xbc, 0x06, 0x0f, 0xc3, 0x85, 0x17, 0x40, 0x9e, 0x71,
+ 0xd6, 0x4e, 0x08, 0x62, 0x8f, 0xdf, 0xef, 0xff, 0xff, 0xbe, 0x99, 0xf9, 0x8f, 0x35, 0x86, 0xb7,
+ 0x1e, 0xe7, 0xde, 0x8c, 0xb5, 0xe7, 0x3c, 0xf0, 0x13, 0x1e, 0xf9, 0x81, 0xd7, 0x7e, 0xba, 0x68,
+ 0x3f, 0xf2, 0xf9, 0x9c, 0x07, 0x5a, 0x18, 0xf1, 0x84, 0xe3, 0x23, 0x69, 0xd1, 0x72, 0x8b, 0xf6,
+ 0x74, 0x71, 0xf2, 0x3a, 0x6b, 0x74, 0x43, 0xbf, 0xed, 0x06, 0x01, 0x4f, 0xdc, 0xc4, 0xe7, 0x41,
+ 0x2c, 0x7b, 0x4e, 0x3e, 0x2d, 0xa8, 0x53, 0x3f, 0x4e, 0x22, 0x7f, 0xb2, 0x48, 0xf5, 0x4c, 0xfe,
+ 0x2c, 0x93, 0x45, 0x35, 0x59, 0xbc, 0x6f, 0x4f, 0x17, 0x91, 0x5b, 0xd0, 0xdf, 0xac, 0xeb, 0x89,
+ 0x3f, 0x67, 0x71, 0xe2, 0xce, 0x43, 0x69, 0x38, 0xfd, 0x4b, 0x01, 0xb0, 0x9f, 0x43, 0x36, 0xbd,
+ 0x73, 0x67, 0x0b, 0x86, 0xdf, 0x00, 0x4c, 0x38, 0x9f, 0x39, 0x4f, 0x69, 0xd5, 0x50, 0x9a, 0x4a,
+ 0x6b, 0xf7, 0xb6, 0x44, 0x6b, 0x29, 0x93, 0x86, 0xb7, 0xa0, 0xfa, 0x41, 0xf2, 0xf5, 0x57, 0x99,
+ 0xa3, 0xdc, 0x54, 0x5a, 0x95, 0xdb, 0x12, 0x05, 0x01, 0xa5, 0xe5, 0x73, 0xd8, 0x9b, 0xf2, 0xc5,
+ 0x64, 0xc6, 0x32, 0x4f, 0xa5, 0xa9, 0xb4, 0x94, 0xdb, 0x12, 0x55, 0x25, 0xfd, 0x60, 0x4a, 0x0f,
+ 0x13, 0x78, 0x99, 0x69, 0xab, 0xa9, 0xb4, 0x6a, 0xa9, 0x49, 0x52, 0x69, 0x32, 0x01, 0x17, 0xcf,
+ 0x9c, 0x59, 0xb7, 0x9b, 0x4a, 0x4b, 0x3d, 0x6f, 0x68, 0x59, 0x9a, 0x6e, 0xe8, 0x6b, 0xdd, 0x82,
+ 0xeb, 0xb6, 0x44, 0x0f, 0x8b, 0x5d, 0x62, 0xd4, 0x55, 0x15, 0xb6, 0x45, 0xf7, 0xe9, 0xcf, 0x0a,
+ 0xec, 0xd9, 0xfe, 0x9c, 0x99, 0x41, 0xc2, 0xa2, 0x27, 0x77, 0x86, 0x3b, 0xb0, 0xcb, 0x82, 0xa9,
+ 0x93, 0x06, 0x23, 0x8e, 0xa3, 0x9e, 0x9f, 0x2c, 0x47, 0x2f, 0x53, 0xd3, 0xec, 0x65, 0x6a, 0xb4,
+ 0xca, 0x82, 0x69, 0x5a, 0xe1, 0x4b, 0x80, 0x38, 0x71, 0xa3, 0x44, 0x36, 0x2a, 0xff, 0xdb, 0x58,
+ 0x13, 0xee, 0xb4, 0x3e, 0xfd, 0xbb, 0x0a, 0xaa, 0xee, 0x79, 0x11, 0xf3, 0xc4, 0x55, 0xe1, 0x2e,
+ 0x20, 0x77, 0xe6, 0x7b, 0xc1, 0x9c, 0x05, 0x89, 0x13, 0xb2, 0xc8, 0xe7, 0xd3, 0x6c, 0xe0, 0x27,
+ 0xff, 0x1a, 0xd8, 0xcd, 0xee, 0x97, 0xd6, 0x3f, 0xb4, 0x0c, 0x45, 0x07, 0xfe, 0x1e, 0x70, 0xc8,
+ 0x22, 0x27, 0x66, 0x91, 0xcf, 0x62, 0x47, 0xa8, 0x2c, 0x12, 0x27, 0x3a, 0x38, 0xff, 0x52, 0xdb,
+ 0xf4, 0xe9, 0x69, 0x85, 0x4d, 0x68, 0xba, 0x6c, 0xa0, 0x28, 0x64, 0xd1, 0x48, 0xcc, 0xc8, 0x08,
+ 0xfe, 0x11, 0x8e, 0x1e, 0x23, 0x1e, 0xc7, 0xcb, 0xd1, 0x11, 0x9b, 0x2e, 0x1e, 0x59, 0x24, 0xae,
+ 0xec, 0xa3, 0x46, 0x53, 0xd9, 0x40, 0xb1, 0x18, 0x23, 0x87, 0x67, 0x0c, 0x7f, 0x01, 0x75, 0x2f,
+ 0xe2, 0x8b, 0xd0, 0x99, 0x3c, 0x3b, 0xef, 0x7d, 0x36, 0x9b, 0xc6, 0x8d, 0xed, 0x66, 0xa5, 0x55,
+ 0xa3, 0xfb, 0x02, 0x5f, 0x3d, 0x5f, 0x0b, 0x78, 0xfa, 0x4b, 0x05, 0xaa, 0xcb, 0x0d, 0x1d, 0x00,
+ 0xe8, 0x3d, 0xf3, 0xc6, 0x72, 0xac, 0x81, 0x45, 0x50, 0x09, 0xd7, 0x41, 0x95, 0x75, 0x97, 0xf4,
+ 0x6c, 0x1d, 0x29, 0xb9, 0x81, 0xea, 0x36, 0x41, 0x65, 0xfc, 0x12, 0x0e, 0x65, 0x6d, 0x5a, 0x36,
+ 0xa1, 0xc3, 0x41, 0x2f, 0xc5, 0x15, 0x7c, 0x04, 0x28, 0x9b, 0x43, 0xee, 0x6d, 0x67, 0xd0, 0xeb,
+ 0x12, 0x8a, 0xb6, 0xf0, 0x3e, 0xd4, 0x24, 0xed, 0x9b, 0x16, 0x82, 0x42, 0xa9, 0xdf, 0x23, 0x35,
+ 0x1f, 0xdd, 0x27, 0xba, 0x85, 0xf6, 0xf2, 0xb5, 0x8d, 0xc1, 0xd8, 0xb2, 0xd1, 0x7e, 0xee, 0x1f,
+ 0x8d, 0xfb, 0xe8, 0x00, 0x23, 0xd8, 0xcb, 0x4a, 0xbb, 0xdb, 0x25, 0x77, 0xa8, 0x9e, 0xaf, 0x2a,
+ 0x3a, 0x1c, 0x9b, 0x8e, 0x09, 0x42, 0xf9, 0x16, 0x25, 0xbd, 0xd6, 0x7b, 0x23, 0x82, 0x1a, 0xf8,
+ 0x15, 0xbc, 0x90, 0xf8, 0x9a, 0xea, 0x86, 0x6d, 0x0e, 0x2c, 0xe9, 0x3f, 0xcc, 0x85, 0x21, 0xa1,
+ 0x06, 0xb1, 0x6c, 0xb3, 0x47, 0x9c, 0xcb, 0x4b, 0x84, 0x37, 0x0b, 0x1d, 0xf4, 0x62, 0xa3, 0xd0,
+ 0x39, 0x43, 0x47, 0x1b, 0x85, 0xb3, 0x0e, 0x7a, 0x89, 0x1b, 0x70, 0xb4, 0x22, 0x38, 0xc6, 0xad,
+ 0x6e, 0xdd, 0x10, 0xf4, 0xea, 0xf4, 0x8f, 0x32, 0x54, 0x97, 0x37, 0x58, 0x07, 0x95, 0x92, 0xee,
+ 0xd8, 0x20, 0x85, 0xeb, 0xc8, 0x80, 0xc8, 0x48, 0x5c, 0xc7, 0x12, 0x98, 0x16, 0x2a, 0x17, 0x6b,
+ 0xfd, 0x1e, 0x55, 0x0a, 0x75, 0x9a, 0xd9, 0x16, 0x3e, 0x84, 0xfd, 0x65, 0x2d, 0x43, 0xdb, 0x4e,
+ 0x63, 0xcc, 0x90, 0xcc, 0x79, 0x27, 0x0d, 0xac, 0x48, 0x64, 0x2e, 0x55, 0x7c, 0x0c, 0x78, 0x05,
+ 0xcb, 0x20, 0xeb, 0xe9, 0x59, 0x32, 0xbe, 0x9a, 0xe4, 0x6e, 0x41, 0x59, 0x8d, 0xb2, 0xf6, 0x1f,
+ 0x4a, 0x07, 0xc1, 0x66, 0xa5, 0x73, 0x86, 0xd4, 0xcd, 0xca, 0x59, 0x07, 0xed, 0xbd, 0xfb, 0x55,
+ 0x81, 0x03, 0x83, 0xcf, 0x43, 0x37, 0xf2, 0x63, 0x1e, 0xa4, 0x6f, 0x2e, 0x3e, 0x81, 0x63, 0x63,
+ 0xd0, 0x1f, 0xea, 0xd4, 0x1c, 0x0d, 0x2c, 0x67, 0x6c, 0x8d, 0x86, 0xc4, 0x30, 0xaf, 0x4d, 0xd2,
+ 0x45, 0xa5, 0x34, 0x84, 0x82, 0x76, 0x63, 0x23, 0x65, 0x1d, 0xa5, 0x5f, 0xf6, 0x2a, 0xea, 0xd9,
+ 0xa8, 0xb2, 0x8e, 0x88, 0x0c, 0xb4, 0x80, 0xc8, 0x77, 0x68, 0x7b, 0x0d, 0x59, 0x04, 0xed, 0xbc,
+ 0xfb, 0x09, 0xd4, 0x11, 0x8b, 0x9e, 0xfc, 0x47, 0x66, 0xfb, 0x2c, 0xc2, 0xaf, 0xa1, 0x31, 0x22,
+ 0xf4, 0xce, 0x34, 0x88, 0x63, 0x9b, 0x84, 0xae, 0x6d, 0xef, 0x18, 0xf0, 0x8a, 0x7a, 0xa5, 0x8f,
+ 0x4c, 0x03, 0x29, 0xe9, 0xf9, 0x57, 0xf8, 0x90, 0x92, 0xbe, 0x39, 0xee, 0xa3, 0xf2, 0xd5, 0x6f,
+ 0x0a, 0x34, 0x1e, 0xf9, 0x7c, 0xe3, 0x73, 0x71, 0xa5, 0x1a, 0xe2, 0x47, 0x39, 0x4c, 0x9f, 0xb9,
+ 0xa1, 0xf2, 0xc3, 0xb7, 0x99, 0xc9, 0xe3, 0x33, 0x37, 0xf0, 0x34, 0x1e, 0x79, 0x6d, 0x8f, 0x05,
+ 0xe2, 0x11, 0x6c, 0x4b, 0xc9, 0x0d, 0xfd, 0x78, 0xf5, 0x5f, 0xfb, 0x4d, 0x5e, 0xfd, 0x5e, 0x3e,
+ 0xb9, 0x91, 0x03, 0x8c, 0x19, 0x5f, 0x4c, 0xb5, 0x7e, 0xbe, 0xd6, 0xdd, 0xc5, 0x9f, 0x4b, 0xf1,
+ 0x41, 0x88, 0x0f, 0xb9, 0xf8, 0x70, 0x77, 0x31, 0xd9, 0x11, 0x8b, 0x5c, 0xfc, 0x13, 0x00, 0x00,
+ 0xff, 0xff, 0xe2, 0x9f, 0x67, 0xb2, 0xcf, 0x07, 0x00, 0x00,
}
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go
index a9c0dd81..0afad7a6 100644
--- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go
@@ -61,7 +61,7 @@ type Group struct {
func (m *Group) Reset() { *m = Group{} }
func (m *Group) String() string { return proto.CompactTextString(m) }
func (*Group) ProtoMessage() {}
-func (*Group) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
+func (*Group) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} }
func (m *Group) GetName() string {
if m != nil {
@@ -102,9 +102,9 @@ func init() {
proto.RegisterType((*Group)(nil), "google.monitoring.v3.Group")
}
-func init() { proto.RegisterFile("google/monitoring/v3/group.proto", fileDescriptor1) }
+func init() { proto.RegisterFile("google/monitoring/v3/group.proto", fileDescriptor3) }
-var fileDescriptor1 = []byte{
+var fileDescriptor3 = []byte{
// 261 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xcf, 0x4a, 0x2b, 0x31,
0x14, 0x87, 0x49, 0xef, 0xed, 0x60, 0x4f, 0x5d, 0x0d, 0x22, 0x83, 0x20, 0x8e, 0xae, 0xba, 0xca,
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go
index 14750fa9..de212a50 100644
--- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go
@@ -8,7 +8,7 @@ import fmt "fmt"
import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import google_api4 "google.golang.org/genproto/googleapis/api/monitoredres"
-import google_protobuf4 "github.com/golang/protobuf/ptypes/empty"
+import google_protobuf5 "github.com/golang/protobuf/ptypes/empty"
import (
context "golang.org/x/net/context"
@@ -45,7 +45,7 @@ type ListGroupsRequest struct {
func (m *ListGroupsRequest) Reset() { *m = ListGroupsRequest{} }
func (m *ListGroupsRequest) String() string { return proto.CompactTextString(m) }
func (*ListGroupsRequest) ProtoMessage() {}
-func (*ListGroupsRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} }
+func (*ListGroupsRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} }
type isListGroupsRequest_Filter interface {
isListGroupsRequest_Filter()
@@ -208,7 +208,7 @@ type ListGroupsResponse struct {
func (m *ListGroupsResponse) Reset() { *m = ListGroupsResponse{} }
func (m *ListGroupsResponse) String() string { return proto.CompactTextString(m) }
func (*ListGroupsResponse) ProtoMessage() {}
-func (*ListGroupsResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} }
+func (*ListGroupsResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} }
func (m *ListGroupsResponse) GetGroup() []*Group {
if m != nil {
@@ -234,7 +234,7 @@ type GetGroupRequest struct {
func (m *GetGroupRequest) Reset() { *m = GetGroupRequest{} }
func (m *GetGroupRequest) String() string { return proto.CompactTextString(m) }
func (*GetGroupRequest) ProtoMessage() {}
-func (*GetGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} }
+func (*GetGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} }
func (m *GetGroupRequest) GetName() string {
if m != nil {
@@ -258,7 +258,7 @@ type CreateGroupRequest struct {
func (m *CreateGroupRequest) Reset() { *m = CreateGroupRequest{} }
func (m *CreateGroupRequest) String() string { return proto.CompactTextString(m) }
func (*CreateGroupRequest) ProtoMessage() {}
-func (*CreateGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} }
+func (*CreateGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} }
func (m *CreateGroupRequest) GetName() string {
if m != nil {
@@ -293,7 +293,7 @@ type UpdateGroupRequest struct {
func (m *UpdateGroupRequest) Reset() { *m = UpdateGroupRequest{} }
func (m *UpdateGroupRequest) String() string { return proto.CompactTextString(m) }
func (*UpdateGroupRequest) ProtoMessage() {}
-func (*UpdateGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} }
+func (*UpdateGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{4} }
func (m *UpdateGroupRequest) GetGroup() *Group {
if m != nil {
@@ -319,7 +319,7 @@ type DeleteGroupRequest struct {
func (m *DeleteGroupRequest) Reset() { *m = DeleteGroupRequest{} }
func (m *DeleteGroupRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteGroupRequest) ProtoMessage() {}
-func (*DeleteGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} }
+func (*DeleteGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{5} }
func (m *DeleteGroupRequest) GetName() string {
if m != nil {
@@ -357,7 +357,7 @@ type ListGroupMembersRequest struct {
func (m *ListGroupMembersRequest) Reset() { *m = ListGroupMembersRequest{} }
func (m *ListGroupMembersRequest) String() string { return proto.CompactTextString(m) }
func (*ListGroupMembersRequest) ProtoMessage() {}
-func (*ListGroupMembersRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} }
+func (*ListGroupMembersRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{6} }
func (m *ListGroupMembersRequest) GetName() string {
if m != nil {
@@ -409,7 +409,7 @@ type ListGroupMembersResponse struct {
func (m *ListGroupMembersResponse) Reset() { *m = ListGroupMembersResponse{} }
func (m *ListGroupMembersResponse) String() string { return proto.CompactTextString(m) }
func (*ListGroupMembersResponse) ProtoMessage() {}
-func (*ListGroupMembersResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{7} }
+func (*ListGroupMembersResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{7} }
func (m *ListGroupMembersResponse) GetMembers() []*google_api4.MonitoredResource {
if m != nil {
@@ -464,7 +464,7 @@ type GroupServiceClient interface {
// You can change any group attributes except `name`.
UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error)
// Deletes an existing group.
- DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error)
+ DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error)
// Lists the monitored resources that are members of a group.
ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error)
}
@@ -513,8 +513,8 @@ func (c *groupServiceClient) UpdateGroup(ctx context.Context, in *UpdateGroupReq
return out, nil
}
-func (c *groupServiceClient) DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) {
- out := new(google_protobuf4.Empty)
+func (c *groupServiceClient) DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) {
+ out := new(google_protobuf5.Empty)
err := grpc.Invoke(ctx, "/google.monitoring.v3.GroupService/DeleteGroup", in, out, c.cc, opts...)
if err != nil {
return nil, err
@@ -544,7 +544,7 @@ type GroupServiceServer interface {
// You can change any group attributes except `name`.
UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error)
// Deletes an existing group.
- DeleteGroup(context.Context, *DeleteGroupRequest) (*google_protobuf4.Empty, error)
+ DeleteGroup(context.Context, *DeleteGroupRequest) (*google_protobuf5.Empty, error)
// Lists the monitored resources that are members of a group.
ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error)
}
@@ -694,9 +694,9 @@ var _GroupService_serviceDesc = grpc.ServiceDesc{
Metadata: "google/monitoring/v3/group_service.proto",
}
-func init() { proto.RegisterFile("google/monitoring/v3/group_service.proto", fileDescriptor2) }
+func init() { proto.RegisterFile("google/monitoring/v3/group_service.proto", fileDescriptor4) }
-var fileDescriptor2 = []byte{
+var fileDescriptor4 = []byte{
// 826 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x4d, 0x6f, 0xd3, 0x4c,
0x10, 0x7e, 0xdd, 0xa4, 0x69, 0xb2, 0x69, 0xd5, 0x76, 0x55, 0xf5, 0x8d, 0xdc, 0x0f, 0x05, 0xf7,
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go
index 533b5373..99e74513 100644
--- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go
@@ -32,7 +32,7 @@ type Point struct {
func (m *Point) Reset() { *m = Point{} }
func (m *Point) String() string { return proto.CompactTextString(m) }
func (*Point) ProtoMessage() {}
-func (*Point) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} }
+func (*Point) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0} }
func (m *Point) GetInterval() *TimeInterval {
if m != nil {
@@ -90,7 +90,7 @@ type TimeSeries struct {
func (m *TimeSeries) Reset() { *m = TimeSeries{} }
func (m *TimeSeries) String() string { return proto.CompactTextString(m) }
func (*TimeSeries) ProtoMessage() {}
-func (*TimeSeries) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} }
+func (*TimeSeries) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{1} }
func (m *TimeSeries) GetMetric() *google_api5.Metric {
if m != nil {
@@ -132,9 +132,9 @@ func init() {
proto.RegisterType((*TimeSeries)(nil), "google.monitoring.v3.TimeSeries")
}
-func init() { proto.RegisterFile("google/monitoring/v3/metric.proto", fileDescriptor3) }
+func init() { proto.RegisterFile("google/monitoring/v3/metric.proto", fileDescriptor5) }
-var fileDescriptor3 = []byte{
+var fileDescriptor5 = []byte{
// 396 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x4a, 0xeb, 0x40,
0x14, 0x86, 0x49, 0x7b, 0x5b, 0x7a, 0x27, 0x70, 0x17, 0xc3, 0x05, 0x43, 0x45, 0x88, 0x15, 0xb4,
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go
index 77840f1c..1673fecb 100644
--- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go
@@ -9,7 +9,7 @@ import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import google_api5 "google.golang.org/genproto/googleapis/api/metric"
import google_api4 "google.golang.org/genproto/googleapis/api/monitoredres"
-import google_protobuf4 "github.com/golang/protobuf/ptypes/empty"
+import google_protobuf5 "github.com/golang/protobuf/ptypes/empty"
import google_rpc "google.golang.org/genproto/googleapis/rpc/status"
import (
@@ -47,7 +47,7 @@ func (x ListTimeSeriesRequest_TimeSeriesView) String() string {
return proto.EnumName(ListTimeSeriesRequest_TimeSeriesView_name, int32(x))
}
func (ListTimeSeriesRequest_TimeSeriesView) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor4, []int{8, 0}
+ return fileDescriptor6, []int{8, 0}
}
// The `ListMonitoredResourceDescriptors` request.
@@ -77,7 +77,7 @@ func (m *ListMonitoredResourceDescriptorsRequest) Reset() {
func (m *ListMonitoredResourceDescriptorsRequest) String() string { return proto.CompactTextString(m) }
func (*ListMonitoredResourceDescriptorsRequest) ProtoMessage() {}
func (*ListMonitoredResourceDescriptorsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor4, []int{0}
+ return fileDescriptor6, []int{0}
}
func (m *ListMonitoredResourceDescriptorsRequest) GetName() string {
@@ -125,7 +125,7 @@ func (m *ListMonitoredResourceDescriptorsResponse) Reset() {
func (m *ListMonitoredResourceDescriptorsResponse) String() string { return proto.CompactTextString(m) }
func (*ListMonitoredResourceDescriptorsResponse) ProtoMessage() {}
func (*ListMonitoredResourceDescriptorsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor4, []int{1}
+ return fileDescriptor6, []int{1}
}
func (m *ListMonitoredResourceDescriptorsResponse) GetResourceDescriptors() []*google_api4.MonitoredResourceDescriptor {
@@ -155,7 +155,7 @@ func (m *GetMonitoredResourceDescriptorRequest) Reset() { *m = GetMonito
func (m *GetMonitoredResourceDescriptorRequest) String() string { return proto.CompactTextString(m) }
func (*GetMonitoredResourceDescriptorRequest) ProtoMessage() {}
func (*GetMonitoredResourceDescriptorRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor4, []int{2}
+ return fileDescriptor6, []int{2}
}
func (m *GetMonitoredResourceDescriptorRequest) GetName() string {
@@ -190,7 +190,7 @@ type ListMetricDescriptorsRequest struct {
func (m *ListMetricDescriptorsRequest) Reset() { *m = ListMetricDescriptorsRequest{} }
func (m *ListMetricDescriptorsRequest) String() string { return proto.CompactTextString(m) }
func (*ListMetricDescriptorsRequest) ProtoMessage() {}
-func (*ListMetricDescriptorsRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} }
+func (*ListMetricDescriptorsRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{3} }
func (m *ListMetricDescriptorsRequest) GetName() string {
if m != nil {
@@ -234,7 +234,7 @@ type ListMetricDescriptorsResponse struct {
func (m *ListMetricDescriptorsResponse) Reset() { *m = ListMetricDescriptorsResponse{} }
func (m *ListMetricDescriptorsResponse) String() string { return proto.CompactTextString(m) }
func (*ListMetricDescriptorsResponse) ProtoMessage() {}
-func (*ListMetricDescriptorsResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{4} }
+func (*ListMetricDescriptorsResponse) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{4} }
func (m *ListMetricDescriptorsResponse) GetMetricDescriptors() []*google_api5.MetricDescriptor {
if m != nil {
@@ -262,7 +262,7 @@ type GetMetricDescriptorRequest struct {
func (m *GetMetricDescriptorRequest) Reset() { *m = GetMetricDescriptorRequest{} }
func (m *GetMetricDescriptorRequest) String() string { return proto.CompactTextString(m) }
func (*GetMetricDescriptorRequest) ProtoMessage() {}
-func (*GetMetricDescriptorRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{5} }
+func (*GetMetricDescriptorRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{5} }
func (m *GetMetricDescriptorRequest) GetName() string {
if m != nil {
@@ -284,7 +284,7 @@ type CreateMetricDescriptorRequest struct {
func (m *CreateMetricDescriptorRequest) Reset() { *m = CreateMetricDescriptorRequest{} }
func (m *CreateMetricDescriptorRequest) String() string { return proto.CompactTextString(m) }
func (*CreateMetricDescriptorRequest) ProtoMessage() {}
-func (*CreateMetricDescriptorRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{6} }
+func (*CreateMetricDescriptorRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{6} }
func (m *CreateMetricDescriptorRequest) GetName() string {
if m != nil {
@@ -312,7 +312,7 @@ type DeleteMetricDescriptorRequest struct {
func (m *DeleteMetricDescriptorRequest) Reset() { *m = DeleteMetricDescriptorRequest{} }
func (m *DeleteMetricDescriptorRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteMetricDescriptorRequest) ProtoMessage() {}
-func (*DeleteMetricDescriptorRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{7} }
+func (*DeleteMetricDescriptorRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{7} }
func (m *DeleteMetricDescriptorRequest) GetName() string {
if m != nil {
@@ -362,7 +362,7 @@ type ListTimeSeriesRequest struct {
func (m *ListTimeSeriesRequest) Reset() { *m = ListTimeSeriesRequest{} }
func (m *ListTimeSeriesRequest) String() string { return proto.CompactTextString(m) }
func (*ListTimeSeriesRequest) ProtoMessage() {}
-func (*ListTimeSeriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{8} }
+func (*ListTimeSeriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{8} }
func (m *ListTimeSeriesRequest) GetName() string {
if m != nil {
@@ -433,7 +433,7 @@ type ListTimeSeriesResponse struct {
func (m *ListTimeSeriesResponse) Reset() { *m = ListTimeSeriesResponse{} }
func (m *ListTimeSeriesResponse) String() string { return proto.CompactTextString(m) }
func (*ListTimeSeriesResponse) ProtoMessage() {}
-func (*ListTimeSeriesResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{9} }
+func (*ListTimeSeriesResponse) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{9} }
func (m *ListTimeSeriesResponse) GetTimeSeries() []*TimeSeries {
if m != nil {
@@ -465,7 +465,7 @@ type CreateTimeSeriesRequest struct {
func (m *CreateTimeSeriesRequest) Reset() { *m = CreateTimeSeriesRequest{} }
func (m *CreateTimeSeriesRequest) String() string { return proto.CompactTextString(m) }
func (*CreateTimeSeriesRequest) ProtoMessage() {}
-func (*CreateTimeSeriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{10} }
+func (*CreateTimeSeriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{10} }
func (m *CreateTimeSeriesRequest) GetName() string {
if m != nil {
@@ -495,7 +495,7 @@ type CreateTimeSeriesError struct {
func (m *CreateTimeSeriesError) Reset() { *m = CreateTimeSeriesError{} }
func (m *CreateTimeSeriesError) String() string { return proto.CompactTextString(m) }
func (*CreateTimeSeriesError) ProtoMessage() {}
-func (*CreateTimeSeriesError) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{11} }
+func (*CreateTimeSeriesError) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{11} }
func (m *CreateTimeSeriesError) GetTimeSeries() *TimeSeries {
if m != nil {
@@ -552,14 +552,14 @@ type MetricServiceClient interface {
CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*google_api5.MetricDescriptor, error)
// Deletes a metric descriptor. Only user-created
// [custom metrics](/monitoring/custom-metrics) can be deleted.
- DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error)
+ DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error)
// Lists time series that match a filter. This method does not require a Stackdriver account.
ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error)
// Creates or adds data to one or more time series.
// The response is empty if all time series in the request were written.
// If any time series could not be written, a corresponding failure message is
// included in the error response.
- CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error)
+ CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error)
}
type metricServiceClient struct {
@@ -615,8 +615,8 @@ func (c *metricServiceClient) CreateMetricDescriptor(ctx context.Context, in *Cr
return out, nil
}
-func (c *metricServiceClient) DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) {
- out := new(google_protobuf4.Empty)
+func (c *metricServiceClient) DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) {
+ out := new(google_protobuf5.Empty)
err := grpc.Invoke(ctx, "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", in, out, c.cc, opts...)
if err != nil {
return nil, err
@@ -633,8 +633,8 @@ func (c *metricServiceClient) ListTimeSeries(ctx context.Context, in *ListTimeSe
return out, nil
}
-func (c *metricServiceClient) CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) {
- out := new(google_protobuf4.Empty)
+func (c *metricServiceClient) CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) {
+ out := new(google_protobuf5.Empty)
err := grpc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateTimeSeries", in, out, c.cc, opts...)
if err != nil {
return nil, err
@@ -659,14 +659,14 @@ type MetricServiceServer interface {
CreateMetricDescriptor(context.Context, *CreateMetricDescriptorRequest) (*google_api5.MetricDescriptor, error)
// Deletes a metric descriptor. Only user-created
// [custom metrics](/monitoring/custom-metrics) can be deleted.
- DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*google_protobuf4.Empty, error)
+ DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*google_protobuf5.Empty, error)
// Lists time series that match a filter. This method does not require a Stackdriver account.
ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error)
// Creates or adds data to one or more time series.
// The response is empty if all time series in the request were written.
// If any time series could not be written, a corresponding failure message is
// included in the error response.
- CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*google_protobuf4.Empty, error)
+ CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*google_protobuf5.Empty, error)
}
func RegisterMetricServiceServer(s *grpc.Server, srv MetricServiceServer) {
@@ -858,9 +858,9 @@ var _MetricService_serviceDesc = grpc.ServiceDesc{
Metadata: "google/monitoring/v3/metric_service.proto",
}
-func init() { proto.RegisterFile("google/monitoring/v3/metric_service.proto", fileDescriptor4) }
+func init() { proto.RegisterFile("google/monitoring/v3/metric_service.proto", fileDescriptor6) }
-var fileDescriptor4 = []byte{
+var fileDescriptor6 = []byte{
// 1011 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0x4d, 0x6f, 0x1b, 0x45,
0x18, 0x66, 0x92, 0x34, 0x1f, 0xaf, 0xd5, 0x90, 0x4e, 0x5b, 0xd7, 0x6c, 0x13, 0xe4, 0x2e, 0x2a,
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go
new file mode 100644
index 00000000..32f983d0
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go
@@ -0,0 +1,67 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/monitoring/v3/mutation_record.proto
+
+package monitoring
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// Describes a change made to a configuration.
+type MutationRecord struct {
+ // When the change occurred.
+ MutateTime *google_protobuf2.Timestamp `protobuf:"bytes,1,opt,name=mutate_time,json=mutateTime" json:"mutate_time,omitempty"`
+ // The email address of the user making the change.
+ MutatedBy string `protobuf:"bytes,2,opt,name=mutated_by,json=mutatedBy" json:"mutated_by,omitempty"`
+}
+
+func (m *MutationRecord) Reset() { *m = MutationRecord{} }
+func (m *MutationRecord) String() string { return proto.CompactTextString(m) }
+func (*MutationRecord) ProtoMessage() {}
+func (*MutationRecord) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{0} }
+
+func (m *MutationRecord) GetMutateTime() *google_protobuf2.Timestamp {
+ if m != nil {
+ return m.MutateTime
+ }
+ return nil
+}
+
+func (m *MutationRecord) GetMutatedBy() string {
+ if m != nil {
+ return m.MutatedBy
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*MutationRecord)(nil), "google.monitoring.v3.MutationRecord")
+}
+
+func init() { proto.RegisterFile("google/monitoring/v3/mutation_record.proto", fileDescriptor7) }
+
+var fileDescriptor7 = []byte{
+ // 251 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4a, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0xcf, 0xcd, 0xcf, 0xcb, 0x2c, 0xc9, 0x2f, 0xca, 0xcc, 0x4b, 0xd7, 0x2f, 0x33,
+ 0xd6, 0xcf, 0x2d, 0x2d, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0x8b, 0x2f, 0x4a, 0x4d, 0xce, 0x2f, 0x4a,
+ 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x81, 0xa8, 0xd5, 0x43, 0xa8, 0xd5, 0x2b, 0x33,
+ 0x96, 0x92, 0x87, 0x9a, 0x00, 0x56, 0x93, 0x54, 0x9a, 0xa6, 0x5f, 0x92, 0x99, 0x9b, 0x5a, 0x5c,
+ 0x92, 0x98, 0x5b, 0x00, 0xd1, 0xa6, 0x94, 0xc3, 0xc5, 0xe7, 0x0b, 0x35, 0x2f, 0x08, 0x6c, 0x9c,
+ 0x90, 0x35, 0x17, 0x37, 0xd8, 0x86, 0xd4, 0x78, 0x90, 0x5a, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x6e,
+ 0x23, 0x29, 0x3d, 0xa8, 0xf1, 0x30, 0x83, 0xf4, 0x42, 0x60, 0x06, 0x05, 0x71, 0x41, 0x94, 0x83,
+ 0x04, 0x84, 0x64, 0xb9, 0xa0, 0xbc, 0x94, 0xf8, 0xa4, 0x4a, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xce,
+ 0x20, 0x4e, 0xa8, 0x88, 0x53, 0xa5, 0xd3, 0x6a, 0x46, 0x2e, 0x89, 0xe4, 0xfc, 0x5c, 0x3d, 0x6c,
+ 0x6e, 0x75, 0x12, 0x46, 0x75, 0x48, 0x00, 0xc8, 0xa6, 0x00, 0xc6, 0x28, 0x3b, 0xa8, 0xe2, 0xf4,
+ 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74, 0xfd, 0xf4, 0xd4, 0x3c, 0xb0, 0x3b, 0xf4,
+ 0x21, 0x52, 0x89, 0x05, 0x99, 0xc5, 0xa8, 0x61, 0x64, 0x8d, 0xe0, 0xad, 0x62, 0x92, 0x72, 0x87,
+ 0x18, 0xe0, 0x9c, 0x93, 0x5f, 0x9a, 0xa2, 0xe7, 0x8b, 0xb0, 0x33, 0xcc, 0xf8, 0x14, 0x4c, 0x32,
+ 0x06, 0x2c, 0x19, 0x83, 0x90, 0x8c, 0x09, 0x33, 0x4e, 0x62, 0x03, 0x5b, 0x62, 0x0c, 0x08, 0x00,
+ 0x00, 0xff, 0xff, 0x95, 0xa7, 0xf3, 0xbd, 0x87, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go
new file mode 100644
index 00000000..6d0534ef
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go
@@ -0,0 +1,313 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/monitoring/v3/notification.proto
+
+package monitoring
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "google.golang.org/genproto/googleapis/api/annotations"
+import google_api3 "google.golang.org/genproto/googleapis/api/label"
+import google_protobuf4 "github.com/golang/protobuf/ptypes/wrappers"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// Indicates whether the channel has been verified or not. It is illegal
+// to specify this field in a
+// [`CreateNotificationChannel`][google.monitoring.v3.NotificationChannelService.CreateNotificationChannel]
+// or an
+// [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel]
+// operation.
+type NotificationChannel_VerificationStatus int32
+
+const (
+ // Sentinel value used to indicate that the state is unknown, omitted, or
+ // is not applicable (as in the case of channels that neither support
+ // nor require verification in order to function).
+ NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED NotificationChannel_VerificationStatus = 0
+ // The channel has yet to be verified and requires verification to function.
+ // Note that this state also applies to the case where the verification
+ // process has been initiated by sending a verification code but where
+ // the verification code has not been submitted to complete the process.
+ NotificationChannel_UNVERIFIED NotificationChannel_VerificationStatus = 1
+ // It has been proven that notifications can be received on this
+ // notification channel and that someone on the project has access
+ // to messages that are delivered to that channel.
+ NotificationChannel_VERIFIED NotificationChannel_VerificationStatus = 2
+)
+
+var NotificationChannel_VerificationStatus_name = map[int32]string{
+ 0: "VERIFICATION_STATUS_UNSPECIFIED",
+ 1: "UNVERIFIED",
+ 2: "VERIFIED",
+}
+var NotificationChannel_VerificationStatus_value = map[string]int32{
+ "VERIFICATION_STATUS_UNSPECIFIED": 0,
+ "UNVERIFIED": 1,
+ "VERIFIED": 2,
+}
+
+func (x NotificationChannel_VerificationStatus) String() string {
+ return proto.EnumName(NotificationChannel_VerificationStatus_name, int32(x))
+}
+func (NotificationChannel_VerificationStatus) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor8, []int{1, 0}
+}
+
+// A description of a notification channel. The descriptor includes
+// the properties of the channel and the set of labels or fields that
+// must be specified to configure channels of a given type.
+type NotificationChannelDescriptor struct {
+ // The full REST resource name for this descriptor. The syntax is:
+ //
+ // projects/[PROJECT_ID]/notificationChannelDescriptors/[TYPE]
+ //
+ // In the above, `[TYPE]` is the value of the `type` field.
+ Name string `protobuf:"bytes,6,opt,name=name" json:"name,omitempty"`
+ // The type of notification channel, such as "email", "sms", etc.
+ // Notification channel types are globally unique.
+ Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
+ // A human-readable name for the notification channel type. This
+ // form of the name is suitable for a user interface.
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"`
+ // A human-readable description of the notification channel
+ // type. The description may include a description of the properties
+ // of the channel and pointers to external documentation.
+ Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
+ // The set of labels that must be defined to identify a particular
+ // channel of the corresponding type. Each label includes a
+ // description for how that field should be populated.
+ Labels []*google_api3.LabelDescriptor `protobuf:"bytes,4,rep,name=labels" json:"labels,omitempty"`
+ // The tiers that support this notification channel; the project service tier
+ // must be one of the supported_tiers.
+ SupportedTiers []ServiceTier `protobuf:"varint,5,rep,packed,name=supported_tiers,json=supportedTiers,enum=google.monitoring.v3.ServiceTier" json:"supported_tiers,omitempty"`
+}
+
+func (m *NotificationChannelDescriptor) Reset() { *m = NotificationChannelDescriptor{} }
+func (m *NotificationChannelDescriptor) String() string { return proto.CompactTextString(m) }
+func (*NotificationChannelDescriptor) ProtoMessage() {}
+func (*NotificationChannelDescriptor) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{0} }
+
+func (m *NotificationChannelDescriptor) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *NotificationChannelDescriptor) GetType() string {
+ if m != nil {
+ return m.Type
+ }
+ return ""
+}
+
+func (m *NotificationChannelDescriptor) GetDisplayName() string {
+ if m != nil {
+ return m.DisplayName
+ }
+ return ""
+}
+
+func (m *NotificationChannelDescriptor) GetDescription() string {
+ if m != nil {
+ return m.Description
+ }
+ return ""
+}
+
+func (m *NotificationChannelDescriptor) GetLabels() []*google_api3.LabelDescriptor {
+ if m != nil {
+ return m.Labels
+ }
+ return nil
+}
+
+func (m *NotificationChannelDescriptor) GetSupportedTiers() []ServiceTier {
+ if m != nil {
+ return m.SupportedTiers
+ }
+ return nil
+}
+
+// A `NotificationChannel` is a medium through which an alert is
+// delivered when a policy violation is detected. Examples of channels
+// include email, SMS, and third-party messaging applications. Fields
+// containing sensitive information like authentication tokens or
+// contact info are only partially populated on retrieval.
+type NotificationChannel struct {
+ // The type of the notification channel. This field matches the
+ // value of the [NotificationChannelDescriptor.type][google.monitoring.v3.NotificationChannelDescriptor.type] field.
+ Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
+ // The full REST resource name for this channel. The syntax is:
+ //
+ // projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]
+ //
+ // The `[CHANNEL_ID]` is automatically assigned by the server on creation.
+ Name string `protobuf:"bytes,6,opt,name=name" json:"name,omitempty"`
+ // An optional human-readable name for this notification channel. It is
+ // recommended that you specify a non-empty and unique name in order to
+ // make it easier to identify the channels in your project, though this is
+ // not enforced. The display name is limited to 512 Unicode characters.
+ DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName" json:"display_name,omitempty"`
+ // An optional human-readable description of this notification channel. This
+ // description may provide additional details, beyond the display
+ // name, for the channel. This may not exceeed 1024 Unicode characters.
+ Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"`
+ // Configuration fields that define the channel and its behavior. The
+ // permissible and required labels are specified in the
+ // [NotificationChannelDescriptor.labels][google.monitoring.v3.NotificationChannelDescriptor.labels] of the
+ // `NotificationChannelDescriptor` corresponding to the `type` field.
+ Labels map[string]string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ // User-supplied key/value data that does not need to conform to
+ // the corresponding `NotificationChannelDescriptor`'s schema, unlike
+ // the `labels` field. This field is intended to be used for organizing
+ // and identifying the `NotificationChannel` objects.
+ //
+ // The field can contain up to 64 entries. Each key and value is limited to
+ // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and
+ // values can contain only lowercase letters, numerals, underscores, and
+ // dashes. Keys must begin with a letter.
+ UserLabels map[string]string `protobuf:"bytes,8,rep,name=user_labels,json=userLabels" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ // Indicates whether this channel has been verified or not. On a
+ // [`ListNotificationChannels`][google.monitoring.v3.NotificationChannelService.ListNotificationChannels]
+ // or
+ // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel]
+ // operation, this field is expected to be populated.
+ //
+ // If the value is `UNVERIFIED`, then it indicates that the channel is
+ // non-functioning (it both requires verification and lacks verification);
+ // otherwise, it is assumed that the channel works.
+ //
+ // If the channel is neither `VERIFIED` nor `UNVERIFIED`, it implies that
+ // the channel is of a type that does not require verification or that
+ // this specific channel has been exempted from verification because it was
+ // created prior to verification being required for channels of this type.
+ //
+ // This field cannot be modified using a standard
+ // [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel]
+ // operation. To change the value of this field, you must call
+ // [`VerifyNotificationChannel`][google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel].
+ VerificationStatus NotificationChannel_VerificationStatus `protobuf:"varint,9,opt,name=verification_status,json=verificationStatus,enum=google.monitoring.v3.NotificationChannel_VerificationStatus" json:"verification_status,omitempty"`
+ // Whether notifications are forwarded to the described channel. This makes
+ // it possible to disable delivery of notifications to a particular channel
+ // without removing the channel from all alerting policies that reference
+ // the channel. This is a more convenient approach when the change is
+ // temporary and you want to receive notifications from the same set
+ // of alerting policies on the channel at some point in the future.
+ Enabled *google_protobuf4.BoolValue `protobuf:"bytes,11,opt,name=enabled" json:"enabled,omitempty"`
+}
+
+func (m *NotificationChannel) Reset() { *m = NotificationChannel{} }
+func (m *NotificationChannel) String() string { return proto.CompactTextString(m) }
+func (*NotificationChannel) ProtoMessage() {}
+func (*NotificationChannel) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{1} }
+
+func (m *NotificationChannel) GetType() string {
+ if m != nil {
+ return m.Type
+ }
+ return ""
+}
+
+func (m *NotificationChannel) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *NotificationChannel) GetDisplayName() string {
+ if m != nil {
+ return m.DisplayName
+ }
+ return ""
+}
+
+func (m *NotificationChannel) GetDescription() string {
+ if m != nil {
+ return m.Description
+ }
+ return ""
+}
+
+func (m *NotificationChannel) GetLabels() map[string]string {
+ if m != nil {
+ return m.Labels
+ }
+ return nil
+}
+
+func (m *NotificationChannel) GetUserLabels() map[string]string {
+ if m != nil {
+ return m.UserLabels
+ }
+ return nil
+}
+
+func (m *NotificationChannel) GetVerificationStatus() NotificationChannel_VerificationStatus {
+ if m != nil {
+ return m.VerificationStatus
+ }
+ return NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED
+}
+
+func (m *NotificationChannel) GetEnabled() *google_protobuf4.BoolValue {
+ if m != nil {
+ return m.Enabled
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*NotificationChannelDescriptor)(nil), "google.monitoring.v3.NotificationChannelDescriptor")
+ proto.RegisterType((*NotificationChannel)(nil), "google.monitoring.v3.NotificationChannel")
+ proto.RegisterEnum("google.monitoring.v3.NotificationChannel_VerificationStatus", NotificationChannel_VerificationStatus_name, NotificationChannel_VerificationStatus_value)
+}
+
+func init() { proto.RegisterFile("google/monitoring/v3/notification.proto", fileDescriptor8) }
+
+var fileDescriptor8 = []byte{
+ // 599 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0xdd, 0x6e, 0xd3, 0x30,
+ 0x14, 0xc7, 0x49, 0xbb, 0x8e, 0xcd, 0x99, 0xba, 0xe1, 0x4d, 0x28, 0x0a, 0x5f, 0xdd, 0xb8, 0xa0,
+ 0x57, 0x89, 0xd4, 0x82, 0xc4, 0xf8, 0x92, 0xb6, 0xae, 0x43, 0x45, 0xac, 0x4c, 0xfd, 0x42, 0x9a,
+ 0x26, 0x55, 0x6e, 0xeb, 0x05, 0x8b, 0xc4, 0x8e, 0x6c, 0x27, 0xa8, 0x0f, 0xc1, 0x63, 0x70, 0x01,
+ 0x8f, 0xc2, 0x53, 0xa1, 0x38, 0x6e, 0x12, 0xb6, 0x48, 0x8c, 0x3b, 0x9f, 0x73, 0xfe, 0xe7, 0x7f,
+ 0xce, 0xf9, 0x35, 0x2a, 0x78, 0xe6, 0x31, 0xe6, 0xf9, 0xd8, 0x0d, 0x18, 0x25, 0x92, 0x71, 0x42,
+ 0x3d, 0x37, 0x6e, 0xbb, 0x94, 0x49, 0x72, 0x45, 0xe6, 0x48, 0x12, 0x46, 0x9d, 0x90, 0x33, 0xc9,
+ 0xe0, 0x5e, 0x2a, 0x74, 0x72, 0xa1, 0x13, 0xb7, 0xed, 0x87, 0xba, 0x1d, 0x85, 0xc4, 0x45, 0x94,
+ 0x32, 0xa9, 0x5a, 0x44, 0xda, 0x63, 0xdf, 0x2f, 0x54, 0x7d, 0x34, 0xc3, 0xbe, 0xce, 0xef, 0x97,
+ 0x0e, 0x9d, 0xb3, 0x20, 0x58, 0x8d, 0xb3, 0x1f, 0x6b, 0x89, 0x8a, 0x66, 0xd1, 0x95, 0xfb, 0x8d,
+ 0xa3, 0x30, 0xc4, 0x5c, 0x5b, 0x1f, 0x7c, 0xaf, 0x80, 0x47, 0xfd, 0xc2, 0x96, 0x9d, 0x2f, 0x88,
+ 0x52, 0xec, 0x9f, 0x60, 0x31, 0xe7, 0x24, 0x94, 0x8c, 0x43, 0x08, 0xd6, 0x28, 0x0a, 0xb0, 0xb5,
+ 0xde, 0x30, 0x9a, 0x9b, 0x03, 0xf5, 0x4e, 0x72, 0x72, 0x19, 0x62, 0xcb, 0x48, 0x73, 0xc9, 0x1b,
+ 0xee, 0x83, 0xad, 0x05, 0x11, 0xa1, 0x8f, 0x96, 0x53, 0xa5, 0xaf, 0xa8, 0x9a, 0xa9, 0x73, 0xfd,
+ 0xa4, 0xad, 0x01, 0xcc, 0x85, 0x36, 0x26, 0x8c, 0x5a, 0x55, 0xad, 0xc8, 0x53, 0xb0, 0x0d, 0xd6,
+ 0xd5, 0x81, 0xc2, 0x5a, 0x6b, 0x54, 0x9b, 0x66, 0xeb, 0x81, 0xa3, 0x71, 0xa1, 0x90, 0x38, 0x1f,
+ 0x93, 0x4a, 0xbe, 0xd9, 0x40, 0x4b, 0xe1, 0x07, 0xb0, 0x2d, 0xa2, 0x30, 0x64, 0x5c, 0xe2, 0xc5,
+ 0x54, 0x12, 0xcc, 0x85, 0x55, 0x6b, 0x54, 0x9b, 0xf5, 0xd6, 0xbe, 0x53, 0x06, 0xdb, 0x19, 0x62,
+ 0x1e, 0x93, 0x39, 0x1e, 0x11, 0xcc, 0x07, 0xf5, 0xac, 0x33, 0x09, 0xc5, 0xc1, 0x8f, 0x1a, 0xd8,
+ 0x2d, 0xe1, 0x51, 0x7a, 0x71, 0x19, 0x99, 0xeb, 0x14, 0xaa, 0xff, 0xa4, 0xb0, 0x76, 0x93, 0xc2,
+ 0x59, 0x46, 0xa1, 0xa6, 0x28, 0xbc, 0x28, 0xbf, 0xa3, 0x64, 0xcf, 0x94, 0x91, 0xe8, 0x52, 0xc9,
+ 0x97, 0x19, 0x9f, 0x0b, 0x60, 0x46, 0x02, 0xf3, 0xa9, 0xf6, 0xdc, 0x50, 0x9e, 0x87, 0xb7, 0xf7,
+ 0x1c, 0x0b, 0xcc, 0x8b, 0xbe, 0x20, 0xca, 0x12, 0x30, 0x00, 0xbb, 0x31, 0xe6, 0x59, 0xcb, 0x54,
+ 0x48, 0x24, 0x23, 0x61, 0x6d, 0x36, 0x8c, 0x66, 0xbd, 0xf5, 0xe6, 0xf6, 0x33, 0x26, 0x05, 0x93,
+ 0xa1, 0xf2, 0x18, 0xc0, 0xf8, 0x46, 0x0e, 0x3e, 0x07, 0x77, 0x31, 0x45, 0x33, 0x1f, 0x2f, 0x2c,
+ 0xb3, 0x61, 0x34, 0xcd, 0x96, 0xbd, 0x1a, 0xb1, 0xfa, 0xc0, 0x9d, 0x63, 0xc6, 0xfc, 0x09, 0xf2,
+ 0x23, 0x3c, 0x58, 0x49, 0xed, 0x43, 0x60, 0x16, 0xf6, 0x87, 0x3b, 0xa0, 0xfa, 0x15, 0x2f, 0xf5,
+ 0x4f, 0x99, 0x3c, 0xe1, 0x1e, 0xa8, 0xc5, 0x49, 0x8b, 0xfe, 0x68, 0xd3, 0xe0, 0x55, 0xe5, 0xa5,
+ 0x61, 0xbf, 0x05, 0xdb, 0xd7, 0xce, 0xff, 0x9f, 0xf6, 0x83, 0xcf, 0x00, 0xde, 0xbc, 0x0c, 0x3e,
+ 0x05, 0x4f, 0x26, 0xdd, 0x41, 0xef, 0xb4, 0xd7, 0x39, 0x1a, 0xf5, 0x3e, 0xf5, 0xa7, 0xc3, 0xd1,
+ 0xd1, 0x68, 0x3c, 0x9c, 0x8e, 0xfb, 0xc3, 0xf3, 0x6e, 0xa7, 0x77, 0xda, 0xeb, 0x9e, 0xec, 0xdc,
+ 0x81, 0x75, 0x00, 0xc6, 0xfd, 0x54, 0xd6, 0x3d, 0xd9, 0x31, 0xe0, 0x16, 0xd8, 0xc8, 0xa2, 0xca,
+ 0xf1, 0x4f, 0x03, 0x58, 0x73, 0x16, 0x94, 0x02, 0x3e, 0xbe, 0x57, 0x24, 0x7c, 0x9e, 0x80, 0x39,
+ 0x37, 0x2e, 0xde, 0x69, 0xa9, 0xc7, 0x7c, 0x44, 0x3d, 0x87, 0x71, 0xcf, 0xf5, 0x30, 0x55, 0xd8,
+ 0xdc, 0xb4, 0x84, 0x42, 0x22, 0xfe, 0xfe, 0x2f, 0x79, 0x9d, 0x47, 0xbf, 0x2a, 0xf6, 0xfb, 0xd4,
+ 0xa0, 0xe3, 0xb3, 0x68, 0xe1, 0x9c, 0xe5, 0x13, 0x27, 0xed, 0xdf, 0xab, 0xe2, 0xa5, 0x2a, 0x5e,
+ 0xe6, 0xc5, 0xcb, 0x49, 0x7b, 0xb6, 0xae, 0x86, 0xb4, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0xdf,
+ 0xb9, 0x3f, 0x8b, 0x24, 0x05, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go
new file mode 100644
index 00000000..ea5cf925
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go
@@ -0,0 +1,1035 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/monitoring/v3/notification_service.proto
+
+package monitoring
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "google.golang.org/genproto/googleapis/api/annotations"
+import google_protobuf5 "github.com/golang/protobuf/ptypes/empty"
+import google_protobuf6 "google.golang.org/genproto/protobuf/field_mask"
+import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp"
+
+import (
+ context "golang.org/x/net/context"
+ grpc "google.golang.org/grpc"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// The `ListNotificationChannelDescriptors` request.
+type ListNotificationChannelDescriptorsRequest struct {
+ // The REST resource name of the parent from which to retrieve
+ // the notification channel descriptors. The expected syntax is:
+ //
+ // projects/[PROJECT_ID]
+ //
+ // Note that this names the parent container in which to look for the
+ // descriptors; to retrieve a single descriptor by name, use the
+ // [GetNotificationChannelDescriptor][google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor]
+ // operation, instead.
+ Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
+ // The maximum number of results to return in a single response. If
+ // not set to a positive number, a reasonable value will be chosen by the
+ // service.
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"`
+ // If non-empty, `page_token` must contain a value returned as the
+ // `next_page_token` in a previous response to request the next set
+ // of results.
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"`
+}
+
+func (m *ListNotificationChannelDescriptorsRequest) Reset() {
+ *m = ListNotificationChannelDescriptorsRequest{}
+}
+func (m *ListNotificationChannelDescriptorsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListNotificationChannelDescriptorsRequest) ProtoMessage() {}
+func (*ListNotificationChannelDescriptorsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor9, []int{0}
+}
+
+func (m *ListNotificationChannelDescriptorsRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *ListNotificationChannelDescriptorsRequest) GetPageSize() int32 {
+ if m != nil {
+ return m.PageSize
+ }
+ return 0
+}
+
+func (m *ListNotificationChannelDescriptorsRequest) GetPageToken() string {
+ if m != nil {
+ return m.PageToken
+ }
+ return ""
+}
+
+// The `ListNotificationChannelDescriptors` response.
+type ListNotificationChannelDescriptorsResponse struct {
+ // The monitored resource descriptors supported for the specified
+ // project, optionally filtered.
+ ChannelDescriptors []*NotificationChannelDescriptor `protobuf:"bytes,1,rep,name=channel_descriptors,json=channelDescriptors" json:"channel_descriptors,omitempty"`
+ // If not empty, indicates that there may be more results that match
+ // the request. Use the value in the `page_token` field in a
+ // subsequent request to fetch the next set of results. If empty,
+ // all results have been returned.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"`
+}
+
+func (m *ListNotificationChannelDescriptorsResponse) Reset() {
+ *m = ListNotificationChannelDescriptorsResponse{}
+}
+func (m *ListNotificationChannelDescriptorsResponse) String() string {
+ return proto.CompactTextString(m)
+}
+func (*ListNotificationChannelDescriptorsResponse) ProtoMessage() {}
+func (*ListNotificationChannelDescriptorsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor9, []int{1}
+}
+
+func (m *ListNotificationChannelDescriptorsResponse) GetChannelDescriptors() []*NotificationChannelDescriptor {
+ if m != nil {
+ return m.ChannelDescriptors
+ }
+ return nil
+}
+
+func (m *ListNotificationChannelDescriptorsResponse) GetNextPageToken() string {
+ if m != nil {
+ return m.NextPageToken
+ }
+ return ""
+}
+
+// The `GetNotificationChannelDescriptor` response.
+type GetNotificationChannelDescriptorRequest struct {
+ // The channel type for which to execute the request. The format is
+ // `projects/[PROJECT_ID]/notificationChannelDescriptors/{channel_type}`.
+ Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"`
+}
+
+func (m *GetNotificationChannelDescriptorRequest) Reset() {
+ *m = GetNotificationChannelDescriptorRequest{}
+}
+func (m *GetNotificationChannelDescriptorRequest) String() string { return proto.CompactTextString(m) }
+func (*GetNotificationChannelDescriptorRequest) ProtoMessage() {}
+func (*GetNotificationChannelDescriptorRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor9, []int{2}
+}
+
+func (m *GetNotificationChannelDescriptorRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// The `CreateNotificationChannel` request.
+type CreateNotificationChannelRequest struct {
+ // The project on which to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID]
+ //
+ // Note that this names the container into which the channel will be
+ // written. This does not name the newly created channel. The resulting
+ // channel's name will have a normalized version of this field as a prefix,
+ // but will add `/notificationChannels/[CHANNEL_ID]` to identify the channel.
+ Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"`
+ // The definition of the `NotificationChannel` to create.
+ NotificationChannel *NotificationChannel `protobuf:"bytes,2,opt,name=notification_channel,json=notificationChannel" json:"notification_channel,omitempty"`
+}
+
+func (m *CreateNotificationChannelRequest) Reset() { *m = CreateNotificationChannelRequest{} }
+func (m *CreateNotificationChannelRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateNotificationChannelRequest) ProtoMessage() {}
+func (*CreateNotificationChannelRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor9, []int{3}
+}
+
+func (m *CreateNotificationChannelRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *CreateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel {
+ if m != nil {
+ return m.NotificationChannel
+ }
+ return nil
+}
+
+// The `ListNotificationChannels` request.
+type ListNotificationChannelsRequest struct {
+ // The project on which to execute the request. The format is
+ // `projects/[PROJECT_ID]`. That is, this names the container
+ // in which to look for the notification channels; it does not name a
+ // specific channel. To query a specific channel by REST resource name, use
+ // the
+ // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] operation.
+ Name string `protobuf:"bytes,5,opt,name=name" json:"name,omitempty"`
+ // If provided, this field specifies the criteria that must be met by
+ // notification channels to be included in the response.
+ //
+ // For more details, see [sorting and
+ // filtering](/monitoring/api/v3/sorting-and-filtering).
+ Filter string `protobuf:"bytes,6,opt,name=filter" json:"filter,omitempty"`
+ // A comma-separated list of fields by which to sort the result. Supports
+ // the same set of fields as in `filter`. Entries can be prefixed with
+ // a minus sign to sort in descending rather than ascending order.
+ //
+ // For more details, see [sorting and
+ // filtering](/monitoring/api/v3/sorting-and-filtering).
+ OrderBy string `protobuf:"bytes,7,opt,name=order_by,json=orderBy" json:"order_by,omitempty"`
+ // The maximum number of results to return in a single response. If
+ // not set to a positive number, a reasonable value will be chosen by the
+ // service.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"`
+ // If non-empty, `page_token` must contain a value returned as the
+ // `next_page_token` in a previous response to request the next set
+ // of results.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"`
+}
+
+func (m *ListNotificationChannelsRequest) Reset() { *m = ListNotificationChannelsRequest{} }
+func (m *ListNotificationChannelsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListNotificationChannelsRequest) ProtoMessage() {}
+func (*ListNotificationChannelsRequest) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{4} }
+
+func (m *ListNotificationChannelsRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *ListNotificationChannelsRequest) GetFilter() string {
+ if m != nil {
+ return m.Filter
+ }
+ return ""
+}
+
+func (m *ListNotificationChannelsRequest) GetOrderBy() string {
+ if m != nil {
+ return m.OrderBy
+ }
+ return ""
+}
+
+func (m *ListNotificationChannelsRequest) GetPageSize() int32 {
+ if m != nil {
+ return m.PageSize
+ }
+ return 0
+}
+
+func (m *ListNotificationChannelsRequest) GetPageToken() string {
+ if m != nil {
+ return m.PageToken
+ }
+ return ""
+}
+
+// The `ListNotificationChannels` response.
+type ListNotificationChannelsResponse struct {
+ // The notification channels defined for the specified project.
+ NotificationChannels []*NotificationChannel `protobuf:"bytes,3,rep,name=notification_channels,json=notificationChannels" json:"notification_channels,omitempty"`
+ // If not empty, indicates that there may be more results that match
+ // the request. Use the value in the `page_token` field in a
+ // subsequent request to fetch the next set of results. If empty,
+ // all results have been returned.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"`
+}
+
+func (m *ListNotificationChannelsResponse) Reset() { *m = ListNotificationChannelsResponse{} }
+func (m *ListNotificationChannelsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListNotificationChannelsResponse) ProtoMessage() {}
+func (*ListNotificationChannelsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor9, []int{5}
+}
+
+func (m *ListNotificationChannelsResponse) GetNotificationChannels() []*NotificationChannel {
+ if m != nil {
+ return m.NotificationChannels
+ }
+ return nil
+}
+
+func (m *ListNotificationChannelsResponse) GetNextPageToken() string {
+ if m != nil {
+ return m.NextPageToken
+ }
+ return ""
+}
+
+// The `GetNotificationChannel` request.
+type GetNotificationChannelRequest struct {
+ // The channel for which to execute the request. The format is
+ // `projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`.
+ Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"`
+}
+
+func (m *GetNotificationChannelRequest) Reset() { *m = GetNotificationChannelRequest{} }
+func (m *GetNotificationChannelRequest) String() string { return proto.CompactTextString(m) }
+func (*GetNotificationChannelRequest) ProtoMessage() {}
+func (*GetNotificationChannelRequest) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{6} }
+
+func (m *GetNotificationChannelRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// The `UpdateNotificationChannel` request.
+type UpdateNotificationChannelRequest struct {
+ // The fields to update.
+ UpdateMask *google_protobuf6.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"`
+ // A description of the changes to be applied to the specified
+ // notification channel. The description must provide a definition for
+ // fields to be updated; the names of these fields should also be
+ // included in the `update_mask`.
+ NotificationChannel *NotificationChannel `protobuf:"bytes,3,opt,name=notification_channel,json=notificationChannel" json:"notification_channel,omitempty"`
+}
+
+func (m *UpdateNotificationChannelRequest) Reset() { *m = UpdateNotificationChannelRequest{} }
+func (m *UpdateNotificationChannelRequest) String() string { return proto.CompactTextString(m) }
+func (*UpdateNotificationChannelRequest) ProtoMessage() {}
+func (*UpdateNotificationChannelRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor9, []int{7}
+}
+
+func (m *UpdateNotificationChannelRequest) GetUpdateMask() *google_protobuf6.FieldMask {
+ if m != nil {
+ return m.UpdateMask
+ }
+ return nil
+}
+
+func (m *UpdateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel {
+ if m != nil {
+ return m.NotificationChannel
+ }
+ return nil
+}
+
+// The `DeleteNotificationChannel` request.
+type DeleteNotificationChannelRequest struct {
+ // The channel for which to execute the request. The format is
+ // `projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`.
+ Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"`
+ // If true, the notification channel will be deleted regardless of its
+ // use in alert policies (the policies will be updated to remove the
+ // channel). If false, channels that are still referenced by an existing
+ // alerting policy will fail to be deleted in a delete operation.
+ Force bool `protobuf:"varint,5,opt,name=force" json:"force,omitempty"`
+}
+
+func (m *DeleteNotificationChannelRequest) Reset() { *m = DeleteNotificationChannelRequest{} }
+func (m *DeleteNotificationChannelRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteNotificationChannelRequest) ProtoMessage() {}
+func (*DeleteNotificationChannelRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor9, []int{8}
+}
+
+func (m *DeleteNotificationChannelRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *DeleteNotificationChannelRequest) GetForce() bool {
+ if m != nil {
+ return m.Force
+ }
+ return false
+}
+
+// The `SendNotificationChannelVerificationCode` request.
+type SendNotificationChannelVerificationCodeRequest struct {
+ // The notification channel to which to send a verification code.
+ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+}
+
+func (m *SendNotificationChannelVerificationCodeRequest) Reset() {
+ *m = SendNotificationChannelVerificationCodeRequest{}
+}
+func (m *SendNotificationChannelVerificationCodeRequest) String() string {
+ return proto.CompactTextString(m)
+}
+func (*SendNotificationChannelVerificationCodeRequest) ProtoMessage() {}
+func (*SendNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor9, []int{9}
+}
+
+func (m *SendNotificationChannelVerificationCodeRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// The `GetNotificationChannelVerificationCode` request.
+type GetNotificationChannelVerificationCodeRequest struct {
+ // The notification channel for which a verification code is to be generated
+ // and retrieved. This must name a channel that is already verified; if
+ // the specified channel is not verified, the request will fail.
+ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ // The desired expiration time. If specified, the API will guarantee that
+ // the returned code will not be valid after the specified timestamp;
+ // however, the API cannot guarantee that the returned code will be
+ // valid for at least as long as the requested time (the API puts an upper
+ // bound on the amount of time for which a code may be valid). If omitted,
+ // a default expiration will be used, which may be less than the max
+ // permissible expiration (so specifying an expiration may extend the
+ // code's lifetime over omitting an expiration, even though the API does
+ // impose an upper limit on the maximum expiration that is permitted).
+ ExpireTime *google_protobuf2.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime" json:"expire_time,omitempty"`
+}
+
+func (m *GetNotificationChannelVerificationCodeRequest) Reset() {
+ *m = GetNotificationChannelVerificationCodeRequest{}
+}
+func (m *GetNotificationChannelVerificationCodeRequest) String() string {
+ return proto.CompactTextString(m)
+}
+func (*GetNotificationChannelVerificationCodeRequest) ProtoMessage() {}
+func (*GetNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor9, []int{10}
+}
+
+func (m *GetNotificationChannelVerificationCodeRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *GetNotificationChannelVerificationCodeRequest) GetExpireTime() *google_protobuf2.Timestamp {
+ if m != nil {
+ return m.ExpireTime
+ }
+ return nil
+}
+
+// The `GetNotificationChannelVerificationCode` request.
+type GetNotificationChannelVerificationCodeResponse struct {
+ // The verification code, which may be used to verify other channels
+ // that have an equivalent identity (i.e. other channels of the same
+ // type with the same fingerprint such as other email channels with
+ // the same email address or other sms channels with the same number).
+ Code string `protobuf:"bytes,1,opt,name=code" json:"code,omitempty"`
+ // The expiration time associated with the code that was returned. If
+ // an expiration was provided in the request, this is the minimum of the
+ // requested expiration in the request and the max permitted expiration.
+ ExpireTime *google_protobuf2.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime" json:"expire_time,omitempty"`
+}
+
+func (m *GetNotificationChannelVerificationCodeResponse) Reset() {
+ *m = GetNotificationChannelVerificationCodeResponse{}
+}
+func (m *GetNotificationChannelVerificationCodeResponse) String() string {
+ return proto.CompactTextString(m)
+}
+func (*GetNotificationChannelVerificationCodeResponse) ProtoMessage() {}
+func (*GetNotificationChannelVerificationCodeResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor9, []int{11}
+}
+
+func (m *GetNotificationChannelVerificationCodeResponse) GetCode() string {
+ if m != nil {
+ return m.Code
+ }
+ return ""
+}
+
+func (m *GetNotificationChannelVerificationCodeResponse) GetExpireTime() *google_protobuf2.Timestamp {
+ if m != nil {
+ return m.ExpireTime
+ }
+ return nil
+}
+
+// The `VerifyNotificationChannel` request.
+type VerifyNotificationChannelRequest struct {
+ // The notification channel to verify.
+ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ // The verification code that was delivered to the channel as
+ // a result of invoking the `SendNotificationChannelVerificationCode` API
+ // method or that was retrieved from a verified channel via
+ // `GetNotificationChannelVerificationCode`. For example, one might have
+ // "G-123456" or "TKNZGhhd2EyN3I1MnRnMjRv" (in general, one is only
+ // guaranteed that the code is valid UTF-8; one should not
+ // make any assumptions regarding the structure or format of the code).
+ Code string `protobuf:"bytes,2,opt,name=code" json:"code,omitempty"`
+}
+
+func (m *VerifyNotificationChannelRequest) Reset() { *m = VerifyNotificationChannelRequest{} }
+func (m *VerifyNotificationChannelRequest) String() string { return proto.CompactTextString(m) }
+func (*VerifyNotificationChannelRequest) ProtoMessage() {}
+func (*VerifyNotificationChannelRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor9, []int{12}
+}
+
+func (m *VerifyNotificationChannelRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *VerifyNotificationChannelRequest) GetCode() string {
+ if m != nil {
+ return m.Code
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*ListNotificationChannelDescriptorsRequest)(nil), "google.monitoring.v3.ListNotificationChannelDescriptorsRequest")
+ proto.RegisterType((*ListNotificationChannelDescriptorsResponse)(nil), "google.monitoring.v3.ListNotificationChannelDescriptorsResponse")
+ proto.RegisterType((*GetNotificationChannelDescriptorRequest)(nil), "google.monitoring.v3.GetNotificationChannelDescriptorRequest")
+ proto.RegisterType((*CreateNotificationChannelRequest)(nil), "google.monitoring.v3.CreateNotificationChannelRequest")
+ proto.RegisterType((*ListNotificationChannelsRequest)(nil), "google.monitoring.v3.ListNotificationChannelsRequest")
+ proto.RegisterType((*ListNotificationChannelsResponse)(nil), "google.monitoring.v3.ListNotificationChannelsResponse")
+ proto.RegisterType((*GetNotificationChannelRequest)(nil), "google.monitoring.v3.GetNotificationChannelRequest")
+ proto.RegisterType((*UpdateNotificationChannelRequest)(nil), "google.monitoring.v3.UpdateNotificationChannelRequest")
+ proto.RegisterType((*DeleteNotificationChannelRequest)(nil), "google.monitoring.v3.DeleteNotificationChannelRequest")
+ proto.RegisterType((*SendNotificationChannelVerificationCodeRequest)(nil), "google.monitoring.v3.SendNotificationChannelVerificationCodeRequest")
+ proto.RegisterType((*GetNotificationChannelVerificationCodeRequest)(nil), "google.monitoring.v3.GetNotificationChannelVerificationCodeRequest")
+ proto.RegisterType((*GetNotificationChannelVerificationCodeResponse)(nil), "google.monitoring.v3.GetNotificationChannelVerificationCodeResponse")
+ proto.RegisterType((*VerifyNotificationChannelRequest)(nil), "google.monitoring.v3.VerifyNotificationChannelRequest")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for NotificationChannelService service
+
+type NotificationChannelServiceClient interface {
+ // Lists the descriptors for supported channel types. The use of descriptors
+ // makes it possible for new channel types to be dynamically added.
+ ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error)
+ // Gets a single channel descriptor. The descriptor indicates which fields
+ // are expected / permitted for a notification channel of the given type.
+ GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error)
+ // Lists the notification channels that have been created for the project.
+ ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error)
+ // Gets a single notification channel. The channel includes the relevant
+ // configuration details with which the channel was created. However, the
+ // response may truncate or omit passwords, API keys, or other private key
+ // matter and thus the response may not be 100% identical to the information
+ // that was supplied in the call to the create method.
+ GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error)
+ // Creates a new notification channel, representing a single notification
+ // endpoint such as an email address, SMS number, or pagerduty service.
+ CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error)
+ // Updates a notification channel. Fields not specified in the field mask
+ // remain unchanged.
+ UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error)
+ // Deletes a notification channel.
+ DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error)
+ // Causes a verification code to be delivered to the channel. The code
+ // can then be supplied in `VerifyNotificationChannel` to verify the channel.
+ SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error)
+ // Requests a verification code for an already verified channel that can then
+ // be used in a call to VerifyNotificationChannel() on a different channel
+ // with an equivalent identity in the same or in a different project. This
+ // makes it possible to copy a channel between projects without requiring
+ // manual reverification of the channel. If the channel is not in the
+ // verified state, this method will fail (in other words, this may only be
+ // used if the SendNotificationChannelVerificationCode and
+ // VerifyNotificationChannel paths have already been used to put the given
+ // channel into the verified state).
+ //
+ // There is no guarantee that the verification codes returned by this method
+ // will be of a similar structure or form as the ones that are delivered
+ // to the channel via SendNotificationChannelVerificationCode; while
+ // VerifyNotificationChannel() will recognize both the codes delivered via
+ // SendNotificationChannelVerificationCode() and returned from
+ // GetNotificationChannelVerificationCode(), it is typically the case that
+ // the verification codes delivered via
+ // SendNotificationChannelVerificationCode() will be shorter and also
+ // have a shorter expiration (e.g. codes such as "G-123456") whereas
+ // GetVerificationCode() will typically return a much longer, websafe base
+ // 64 encoded string that has a longer expiration time.
+ GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error)
+ // Verifies a `NotificationChannel` by proving receipt of the code
+ // delivered to the channel as a result of calling
+ // `SendNotificationChannelVerificationCode`.
+ VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error)
+}
+
+type notificationChannelServiceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewNotificationChannelServiceClient(cc *grpc.ClientConn) NotificationChannelServiceClient {
+ return ¬ificationChannelServiceClient{cc}
+}
+
+func (c *notificationChannelServiceClient) ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error) {
+ out := new(ListNotificationChannelDescriptorsResponse)
+ err := grpc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error) {
+ out := new(NotificationChannelDescriptor)
+ err := grpc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error) {
+ out := new(ListNotificationChannelsResponse)
+ err := grpc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) {
+ out := new(NotificationChannel)
+ err := grpc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) {
+ out := new(NotificationChannel)
+ err := grpc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) {
+ out := new(NotificationChannel)
+ err := grpc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) {
+ out := new(google_protobuf5.Empty)
+ err := grpc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) {
+ out := new(google_protobuf5.Empty)
+ err := grpc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) {
+ out := new(GetNotificationChannelVerificationCodeResponse)
+ err := grpc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) {
+ out := new(NotificationChannel)
+ err := grpc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for NotificationChannelService service
+
+type NotificationChannelServiceServer interface {
+ // Lists the descriptors for supported channel types. The use of descriptors
+ // makes it possible for new channel types to be dynamically added.
+ ListNotificationChannelDescriptors(context.Context, *ListNotificationChannelDescriptorsRequest) (*ListNotificationChannelDescriptorsResponse, error)
+ // Gets a single channel descriptor. The descriptor indicates which fields
+ // are expected / permitted for a notification channel of the given type.
+ GetNotificationChannelDescriptor(context.Context, *GetNotificationChannelDescriptorRequest) (*NotificationChannelDescriptor, error)
+ // Lists the notification channels that have been created for the project.
+ ListNotificationChannels(context.Context, *ListNotificationChannelsRequest) (*ListNotificationChannelsResponse, error)
+ // Gets a single notification channel. The channel includes the relevant
+ // configuration details with which the channel was created. However, the
+ // response may truncate or omit passwords, API keys, or other private key
+ // matter and thus the response may not be 100% identical to the information
+ // that was supplied in the call to the create method.
+ GetNotificationChannel(context.Context, *GetNotificationChannelRequest) (*NotificationChannel, error)
+ // Creates a new notification channel, representing a single notification
+ // endpoint such as an email address, SMS number, or pagerduty service.
+ CreateNotificationChannel(context.Context, *CreateNotificationChannelRequest) (*NotificationChannel, error)
+ // Updates a notification channel. Fields not specified in the field mask
+ // remain unchanged.
+ UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error)
+ // Deletes a notification channel.
+ DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*google_protobuf5.Empty, error)
+ // Causes a verification code to be delivered to the channel. The code
+ // can then be supplied in `VerifyNotificationChannel` to verify the channel.
+ SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*google_protobuf5.Empty, error)
+ // Requests a verification code for an already verified channel that can then
+ // be used in a call to VerifyNotificationChannel() on a different channel
+ // with an equivalent identity in the same or in a different project. This
+ // makes it possible to copy a channel between projects without requiring
+ // manual reverification of the channel. If the channel is not in the
+ // verified state, this method will fail (in other words, this may only be
+ // used if the SendNotificationChannelVerificationCode and
+ // VerifyNotificationChannel paths have already been used to put the given
+ // channel into the verified state).
+ //
+ // There is no guarantee that the verification codes returned by this method
+ // will be of a similar structure or form as the ones that are delivered
+ // to the channel via SendNotificationChannelVerificationCode; while
+ // VerifyNotificationChannel() will recognize both the codes delivered via
+ // SendNotificationChannelVerificationCode() and returned from
+ // GetNotificationChannelVerificationCode(), it is typically the case that
+ // the verification codes delivered via
+ // SendNotificationChannelVerificationCode() will be shorter and also
+ // have a shorter expiration (e.g. codes such as "G-123456") whereas
+ // GetVerificationCode() will typically return a much longer, websafe base
+ // 64 encoded string that has a longer expiration time.
+ GetNotificationChannelVerificationCode(context.Context, *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error)
+ // Verifies a `NotificationChannel` by proving receipt of the code
+ // delivered to the channel as a result of calling
+ // `SendNotificationChannelVerificationCode`.
+ VerifyNotificationChannel(context.Context, *VerifyNotificationChannelRequest) (*NotificationChannel, error)
+}
+
+func RegisterNotificationChannelServiceServer(s *grpc.Server, srv NotificationChannelServiceServer) {
+ s.RegisterService(&_NotificationChannelService_serviceDesc, srv)
+}
+
+func _NotificationChannelService_ListNotificationChannelDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListNotificationChannelDescriptorsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, req.(*ListNotificationChannelDescriptorsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_GetNotificationChannelDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetNotificationChannelDescriptorRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, req.(*GetNotificationChannelDescriptorRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_ListNotificationChannels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListNotificationChannelsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, req.(*ListNotificationChannelsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_GetNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetNotificationChannelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, req.(*GetNotificationChannelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_CreateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateNotificationChannelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, req.(*CreateNotificationChannelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_UpdateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateNotificationChannelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, req.(*UpdateNotificationChannelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_DeleteNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteNotificationChannelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, req.(*DeleteNotificationChannelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_SendNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SendNotificationChannelVerificationCodeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, req.(*SendNotificationChannelVerificationCodeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_GetNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetNotificationChannelVerificationCodeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, req.(*GetNotificationChannelVerificationCodeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_VerifyNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VerifyNotificationChannelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, req.(*VerifyNotificationChannelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _NotificationChannelService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.NotificationChannelService",
+ HandlerType: (*NotificationChannelServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListNotificationChannelDescriptors",
+ Handler: _NotificationChannelService_ListNotificationChannelDescriptors_Handler,
+ },
+ {
+ MethodName: "GetNotificationChannelDescriptor",
+ Handler: _NotificationChannelService_GetNotificationChannelDescriptor_Handler,
+ },
+ {
+ MethodName: "ListNotificationChannels",
+ Handler: _NotificationChannelService_ListNotificationChannels_Handler,
+ },
+ {
+ MethodName: "GetNotificationChannel",
+ Handler: _NotificationChannelService_GetNotificationChannel_Handler,
+ },
+ {
+ MethodName: "CreateNotificationChannel",
+ Handler: _NotificationChannelService_CreateNotificationChannel_Handler,
+ },
+ {
+ MethodName: "UpdateNotificationChannel",
+ Handler: _NotificationChannelService_UpdateNotificationChannel_Handler,
+ },
+ {
+ MethodName: "DeleteNotificationChannel",
+ Handler: _NotificationChannelService_DeleteNotificationChannel_Handler,
+ },
+ {
+ MethodName: "SendNotificationChannelVerificationCode",
+ Handler: _NotificationChannelService_SendNotificationChannelVerificationCode_Handler,
+ },
+ {
+ MethodName: "GetNotificationChannelVerificationCode",
+ Handler: _NotificationChannelService_GetNotificationChannelVerificationCode_Handler,
+ },
+ {
+ MethodName: "VerifyNotificationChannel",
+ Handler: _NotificationChannelService_VerifyNotificationChannel_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/notification_service.proto",
+}
+
+func init() { proto.RegisterFile("google/monitoring/v3/notification_service.proto", fileDescriptor9) }
+
+var fileDescriptor9 = []byte{
+ // 1011 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x41, 0x6f, 0xdc, 0x44,
+ 0x14, 0xd6, 0xec, 0x26, 0x69, 0xfa, 0x22, 0x04, 0x9a, 0x86, 0xc8, 0xbb, 0xa5, 0xaa, 0xe5, 0x43,
+ 0x93, 0xae, 0x8a, 0x2d, 0xad, 0x4b, 0x84, 0x52, 0x52, 0xda, 0x64, 0xdb, 0x22, 0x48, 0x51, 0xb4,
+ 0x29, 0x91, 0x40, 0x11, 0x2b, 0xc7, 0x7e, 0x6b, 0x4c, 0x76, 0x67, 0x8c, 0x3d, 0x89, 0x9a, 0x56,
+ 0x95, 0x0a, 0x7f, 0x01, 0xfe, 0x00, 0x12, 0xa7, 0x1e, 0x10, 0x67, 0x50, 0x39, 0x23, 0xae, 0x08,
+ 0xae, 0x5c, 0xe0, 0x7f, 0x20, 0x8f, 0xbd, 0xd9, 0xcd, 0x66, 0xbc, 0x6b, 0x37, 0xdc, 0x3c, 0xf3,
+ 0xde, 0xbc, 0xf7, 0xbd, 0xef, 0x7d, 0x9e, 0x67, 0x83, 0xe5, 0x73, 0xee, 0xf7, 0xd0, 0xea, 0x73,
+ 0x16, 0x08, 0x1e, 0x05, 0xcc, 0xb7, 0x8e, 0x6c, 0x8b, 0x71, 0x11, 0x74, 0x03, 0xd7, 0x11, 0x01,
+ 0x67, 0x9d, 0x18, 0xa3, 0xa3, 0xc0, 0x45, 0x33, 0x8c, 0xb8, 0xe0, 0x74, 0x31, 0x3d, 0x60, 0x0e,
+ 0x0f, 0x98, 0x47, 0x76, 0xfd, 0xad, 0x2c, 0x8c, 0x13, 0x06, 0x96, 0xc3, 0x18, 0x17, 0xf2, 0x68,
+ 0x9c, 0x9e, 0xa9, 0x2f, 0x4f, 0x4d, 0x92, 0x39, 0x5e, 0xce, 0x1c, 0xe5, 0x6a, 0xff, 0xb0, 0x6b,
+ 0x61, 0x3f, 0x14, 0xc7, 0x99, 0x51, 0x1f, 0x37, 0x76, 0x03, 0xec, 0x79, 0x9d, 0xbe, 0x13, 0x1f,
+ 0x64, 0x1e, 0x57, 0xc7, 0x3d, 0x44, 0xd0, 0xc7, 0x58, 0x38, 0xfd, 0x30, 0x75, 0x30, 0x9e, 0xc2,
+ 0xf5, 0xad, 0x20, 0x16, 0x1f, 0x8f, 0x64, 0xde, 0xfc, 0xc2, 0x61, 0x0c, 0x7b, 0x2d, 0x8c, 0xdd,
+ 0x28, 0x08, 0x05, 0x8f, 0xe2, 0x36, 0x7e, 0x75, 0x88, 0xb1, 0xa0, 0x14, 0x66, 0x98, 0xd3, 0x47,
+ 0x6d, 0x46, 0x27, 0x2b, 0x17, 0xdb, 0xf2, 0x99, 0x5e, 0x86, 0x8b, 0xa1, 0xe3, 0x63, 0x27, 0x0e,
+ 0x9e, 0xa0, 0x56, 0xd1, 0xc9, 0xca, 0x6c, 0x7b, 0x3e, 0xd9, 0xd8, 0x09, 0x9e, 0x20, 0xbd, 0x02,
+ 0x20, 0x8d, 0x82, 0x1f, 0x20, 0xd3, 0xaa, 0xf2, 0x98, 0x74, 0x7f, 0x94, 0x6c, 0x18, 0x3f, 0x13,
+ 0x68, 0x14, 0xc9, 0x1e, 0x87, 0x9c, 0xc5, 0x48, 0x3d, 0xb8, 0xe4, 0xa6, 0xd6, 0x8e, 0x37, 0x34,
+ 0x6b, 0x44, 0xaf, 0xae, 0x2c, 0x34, 0x6d, 0x53, 0xd5, 0x06, 0x73, 0x62, 0xe8, 0x36, 0x75, 0xcf,
+ 0x64, 0xa3, 0xd7, 0xe0, 0x75, 0x86, 0x8f, 0x45, 0x67, 0x04, 0x78, 0x45, 0x02, 0x7f, 0x2d, 0xd9,
+ 0xde, 0x3e, 0x01, 0xbf, 0x0e, 0xcb, 0x0f, 0x70, 0x32, 0xf4, 0x71, 0xde, 0xaa, 0x43, 0xde, 0x8c,
+ 0xef, 0x08, 0xe8, 0x9b, 0x11, 0x3a, 0x02, 0x15, 0x21, 0x26, 0x1c, 0xa4, 0x7b, 0xb0, 0x78, 0x4a,
+ 0x8c, 0x59, 0x09, 0x12, 0xe4, 0x42, 0xf3, 0x7a, 0x61, 0x1a, 0xda, 0x97, 0xd8, 0xd9, 0x4d, 0xe3,
+ 0x07, 0x02, 0x57, 0x73, 0x5a, 0x72, 0x46, 0x06, 0xb3, 0x23, 0xa8, 0x96, 0x60, 0xae, 0x1b, 0xf4,
+ 0x04, 0x46, 0xda, 0x9c, 0xdc, 0xcd, 0x56, 0xb4, 0x06, 0xf3, 0x3c, 0xf2, 0x30, 0xea, 0xec, 0x1f,
+ 0x6b, 0x17, 0xa4, 0xe5, 0x82, 0x5c, 0x6f, 0x1c, 0x9f, 0x56, 0x4e, 0x75, 0xa2, 0x72, 0x66, 0xc6,
+ 0x95, 0xf3, 0x82, 0x80, 0x9e, 0x0f, 0x33, 0xd3, 0xcb, 0xe7, 0xf0, 0xa6, 0x8a, 0xa9, 0x58, 0xab,
+ 0x4a, 0xc5, 0x94, 0xa0, 0x6a, 0x51, 0x41, 0x55, 0x71, 0xa5, 0xd8, 0x70, 0x45, 0xad, 0x94, 0x49,
+ 0xfa, 0x78, 0x49, 0x40, 0xff, 0x24, 0xf4, 0x26, 0xeb, 0xe3, 0x16, 0x2c, 0x1c, 0x4a, 0x1f, 0xf9,
+ 0xce, 0x67, 0x12, 0xa8, 0x0f, 0xea, 0x1a, 0xbc, 0xf4, 0xe6, 0xfd, 0xe4, 0x5a, 0x78, 0xe8, 0xc4,
+ 0x07, 0x6d, 0x48, 0xdd, 0x93, 0xe7, 0x5c, 0x21, 0x55, 0xff, 0x17, 0x21, 0x6d, 0x81, 0xde, 0xc2,
+ 0x1e, 0x96, 0x96, 0xf7, 0x22, 0xcc, 0x76, 0x79, 0xe4, 0xa6, 0xea, 0x9a, 0x6f, 0xa7, 0x0b, 0xa3,
+ 0x05, 0xe6, 0x0e, 0x32, 0x4f, 0x11, 0x6b, 0x17, 0xa3, 0xe1, 0x16, 0xf7, 0x70, 0x3c, 0x36, 0x19,
+ 0xe1, 0xf4, 0x39, 0x81, 0xb7, 0xd5, 0x9d, 0x28, 0x11, 0x25, 0x21, 0x1d, 0x1f, 0x87, 0x41, 0x84,
+ 0x9d, 0xe4, 0x32, 0xcd, 0x25, 0xfd, 0xd1, 0xe0, 0xa6, 0x6d, 0x43, 0xea, 0x9e, 0x6c, 0x18, 0x5f,
+ 0x13, 0x30, 0x8b, 0x42, 0xc8, 0x64, 0x4c, 0x61, 0xc6, 0xe5, 0xde, 0x09, 0x86, 0xe4, 0xf9, 0x7c,
+ 0x18, 0x3e, 0x04, 0x5d, 0x26, 0x3b, 0x2e, 0xd0, 0x9a, 0xd1, 0xc2, 0x07, 0x40, 0x2a, 0x43, 0x20,
+ 0xcd, 0x5f, 0xde, 0x80, 0xba, 0x22, 0xcc, 0x4e, 0x3a, 0x21, 0xe9, 0xbf, 0x04, 0x8c, 0xe9, 0x37,
+ 0x3c, 0x7d, 0x5f, 0x2d, 0xb6, 0xc2, 0x93, 0xa9, 0x7e, 0xe7, 0xd5, 0x03, 0xa4, 0x2c, 0x1b, 0xef,
+ 0x7d, 0xf3, 0xc7, 0x3f, 0xdf, 0x56, 0x56, 0xe9, 0xcd, 0x64, 0x10, 0x3f, 0x4d, 0xea, 0x5d, 0x0f,
+ 0x23, 0xfe, 0x25, 0xba, 0x22, 0xb6, 0x1a, 0xcf, 0x2c, 0x36, 0xb9, 0x80, 0xbf, 0x08, 0xe8, 0xd3,
+ 0xa6, 0x01, 0x5d, 0x57, 0x83, 0x2c, 0x38, 0x45, 0xea, 0xaf, 0x32, 0xe1, 0x8c, 0xdb, 0xb2, 0xac,
+ 0x77, 0xe9, 0xaa, 0xaa, 0xac, 0x29, 0x55, 0x59, 0x8d, 0x67, 0xf4, 0x25, 0x01, 0x2d, 0xef, 0xa2,
+ 0xa5, 0xef, 0x94, 0x62, 0xfd, 0xa4, 0x59, 0xab, 0x65, 0x8f, 0x65, 0x2d, 0x6a, 0xca, 0x5a, 0x6e,
+ 0xd0, 0x46, 0xe1, 0x16, 0xc5, 0xf4, 0x47, 0x02, 0x4b, 0x6a, 0x82, 0xa9, 0x5d, 0xa6, 0x1d, 0x03,
+ 0xec, 0xc5, 0xaf, 0x45, 0xe3, 0xa6, 0x84, 0x6b, 0xd2, 0x1b, 0x45, 0xa9, 0x97, 0x84, 0xff, 0x46,
+ 0xa0, 0x96, 0xfb, 0x5d, 0x40, 0x73, 0xa8, 0x9b, 0xf6, 0x21, 0x51, 0x06, 0xf6, 0x07, 0x12, 0xf6,
+ 0x86, 0x51, 0x82, 0xe5, 0x35, 0xe5, 0x20, 0xa1, 0x7f, 0x13, 0xa8, 0xe5, 0x8e, 0xb0, 0xbc, 0x52,
+ 0xa6, 0xcd, 0xbc, 0x32, 0xa5, 0x74, 0x64, 0x29, 0x9f, 0x36, 0xef, 0xa6, 0xa5, 0x28, 0x30, 0x9a,
+ 0x05, 0xdb, 0x92, 0x53, 0xe1, 0xf7, 0x04, 0x6a, 0xb9, 0x53, 0x2e, 0xaf, 0xc2, 0x69, 0x63, 0xb1,
+ 0xbe, 0x74, 0xe6, 0x1e, 0xbf, 0x97, 0x7c, 0xf4, 0x0f, 0x04, 0xd5, 0x28, 0x27, 0xa8, 0x3f, 0x09,
+ 0x2c, 0x17, 0x9c, 0x9d, 0xb4, 0xa5, 0x46, 0x5c, 0x6e, 0xf4, 0xe6, 0xe2, 0xdf, 0x92, 0xf8, 0xef,
+ 0x1b, 0x77, 0xcb, 0xe0, 0x5f, 0x8b, 0x91, 0x79, 0xe3, 0x99, 0xd6, 0x48, 0x83, 0x3e, 0xaf, 0xc0,
+ 0xb5, 0x62, 0x93, 0x94, 0x6e, 0x96, 0x79, 0xd3, 0xf3, 0xaa, 0x6a, 0x9d, 0x2f, 0x48, 0x76, 0x87,
+ 0x7d, 0x24, 0x39, 0xb8, 0x67, 0xdc, 0x29, 0xc5, 0x81, 0x8f, 0x42, 0x45, 0xc1, 0xaf, 0x04, 0x6a,
+ 0xb9, 0x93, 0x3c, 0x4f, 0x7e, 0xd3, 0x46, 0x7f, 0x99, 0x17, 0x2c, 0x9b, 0x2e, 0x86, 0x5d, 0xaa,
+ 0x9a, 0x23, 0x89, 0x60, 0x8d, 0x34, 0x36, 0x7e, 0x22, 0xa0, 0xb9, 0xbc, 0xaf, 0x4c, 0xb8, 0xa1,
+ 0x8d, 0x66, 0xcc, 0x3e, 0x28, 0xb6, 0x13, 0x45, 0x6d, 0x93, 0xcf, 0x6e, 0x67, 0x27, 0x7c, 0xde,
+ 0x73, 0x98, 0x6f, 0xf2, 0xc8, 0xb7, 0x7c, 0x64, 0x52, 0x6f, 0xd9, 0xff, 0xbb, 0x13, 0x06, 0xf1,
+ 0xe9, 0xdf, 0xeb, 0x5b, 0xc3, 0xd5, 0x8b, 0x4a, 0xfd, 0x41, 0x1a, 0x60, 0xb3, 0xc7, 0x0f, 0x3d,
+ 0xf3, 0xe1, 0x30, 0xf1, 0xae, 0xfd, 0xfb, 0xc0, 0xb8, 0x27, 0x8d, 0x7b, 0x43, 0xe3, 0xde, 0xae,
+ 0xbd, 0x3f, 0x27, 0x93, 0xd8, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x3b, 0xf3, 0x96, 0xf5, 0x27,
+ 0x10, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go
index 042cddd0..9357d352 100644
--- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go
@@ -51,7 +51,7 @@ var UptimeCheckRegion_value = map[string]int32{
func (x UptimeCheckRegion) String() string {
return proto.EnumName(UptimeCheckRegion_name, int32(x))
}
-func (UptimeCheckRegion) EnumDescriptor() ([]byte, []int) { return fileDescriptor5, []int{0} }
+func (UptimeCheckRegion) EnumDescriptor() ([]byte, []int) { return fileDescriptor10, []int{0} }
// The supported resource types that can be used as values of
// group_resource.resource_type. gae_app and uptime_url are not allowed
@@ -81,7 +81,7 @@ var GroupResourceType_value = map[string]int32{
func (x GroupResourceType) String() string {
return proto.EnumName(GroupResourceType_name, int32(x))
}
-func (GroupResourceType) EnumDescriptor() ([]byte, []int) { return fileDescriptor5, []int{1} }
+func (GroupResourceType) EnumDescriptor() ([]byte, []int) { return fileDescriptor10, []int{1} }
// This message configures which resources and services to monitor for
// availability.
@@ -136,7 +136,7 @@ type UptimeCheckConfig struct {
func (m *UptimeCheckConfig) Reset() { *m = UptimeCheckConfig{} }
func (m *UptimeCheckConfig) String() string { return proto.CompactTextString(m) }
func (*UptimeCheckConfig) ProtoMessage() {}
-func (*UptimeCheckConfig) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0} }
+func (*UptimeCheckConfig) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{0} }
type isUptimeCheckConfig_Resource interface {
isUptimeCheckConfig_Resource()
@@ -391,7 +391,7 @@ func (m *UptimeCheckConfig_ResourceGroup) Reset() { *m = UptimeCheckConf
func (m *UptimeCheckConfig_ResourceGroup) String() string { return proto.CompactTextString(m) }
func (*UptimeCheckConfig_ResourceGroup) ProtoMessage() {}
func (*UptimeCheckConfig_ResourceGroup) Descriptor() ([]byte, []int) {
- return fileDescriptor5, []int{0, 0}
+ return fileDescriptor10, []int{0, 0}
}
func (m *UptimeCheckConfig_ResourceGroup) GetGroupId() string {
@@ -443,7 +443,7 @@ type UptimeCheckConfig_HttpCheck struct {
func (m *UptimeCheckConfig_HttpCheck) Reset() { *m = UptimeCheckConfig_HttpCheck{} }
func (m *UptimeCheckConfig_HttpCheck) String() string { return proto.CompactTextString(m) }
func (*UptimeCheckConfig_HttpCheck) ProtoMessage() {}
-func (*UptimeCheckConfig_HttpCheck) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0, 1} }
+func (*UptimeCheckConfig_HttpCheck) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{0, 1} }
func (m *UptimeCheckConfig_HttpCheck) GetUseSsl() bool {
if m != nil {
@@ -505,7 +505,7 @@ func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) String() string {
}
func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoMessage() {}
func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) Descriptor() ([]byte, []int) {
- return fileDescriptor5, []int{0, 1, 0}
+ return fileDescriptor10, []int{0, 1, 0}
}
func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetUsername() string {
@@ -533,7 +533,7 @@ type UptimeCheckConfig_TcpCheck struct {
func (m *UptimeCheckConfig_TcpCheck) Reset() { *m = UptimeCheckConfig_TcpCheck{} }
func (m *UptimeCheckConfig_TcpCheck) String() string { return proto.CompactTextString(m) }
func (*UptimeCheckConfig_TcpCheck) ProtoMessage() {}
-func (*UptimeCheckConfig_TcpCheck) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0, 2} }
+func (*UptimeCheckConfig_TcpCheck) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{0, 2} }
func (m *UptimeCheckConfig_TcpCheck) GetPort() int32 {
if m != nil {
@@ -554,7 +554,7 @@ func (m *UptimeCheckConfig_ContentMatcher) Reset() { *m = UptimeCheckCon
func (m *UptimeCheckConfig_ContentMatcher) String() string { return proto.CompactTextString(m) }
func (*UptimeCheckConfig_ContentMatcher) ProtoMessage() {}
func (*UptimeCheckConfig_ContentMatcher) Descriptor() ([]byte, []int) {
- return fileDescriptor5, []int{0, 3}
+ return fileDescriptor10, []int{0, 3}
}
func (m *UptimeCheckConfig_ContentMatcher) GetContent() string {
@@ -583,7 +583,7 @@ func (m *UptimeCheckConfig_InternalChecker) Reset() { *m = UptimeCheckCo
func (m *UptimeCheckConfig_InternalChecker) String() string { return proto.CompactTextString(m) }
func (*UptimeCheckConfig_InternalChecker) ProtoMessage() {}
func (*UptimeCheckConfig_InternalChecker) Descriptor() ([]byte, []int) {
- return fileDescriptor5, []int{0, 4}
+ return fileDescriptor10, []int{0, 4}
}
func (m *UptimeCheckConfig_InternalChecker) GetProjectId() string {
@@ -641,7 +641,7 @@ type UptimeCheckIp struct {
func (m *UptimeCheckIp) Reset() { *m = UptimeCheckIp{} }
func (m *UptimeCheckIp) String() string { return proto.CompactTextString(m) }
func (*UptimeCheckIp) ProtoMessage() {}
-func (*UptimeCheckIp) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{1} }
+func (*UptimeCheckIp) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{1} }
func (m *UptimeCheckIp) GetRegion() UptimeCheckRegion {
if m != nil {
@@ -677,9 +677,9 @@ func init() {
proto.RegisterEnum("google.monitoring.v3.GroupResourceType", GroupResourceType_name, GroupResourceType_value)
}
-func init() { proto.RegisterFile("google/monitoring/v3/uptime.proto", fileDescriptor5) }
+func init() { proto.RegisterFile("google/monitoring/v3/uptime.proto", fileDescriptor10) }
-var fileDescriptor5 = []byte{
+var fileDescriptor10 = []byte{
// 1021 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xdd, 0x4e, 0xe3, 0x46,
0x14, 0x5e, 0x13, 0xc8, 0xcf, 0x21, 0xb0, 0x66, 0x4a, 0xdb, 0x60, 0x89, 0x15, 0xbb, 0xbd, 0x28,
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go
index 5e2fb2a7..a2ee162d 100644
--- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go
@@ -7,8 +7,8 @@ import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
-import google_protobuf4 "github.com/golang/protobuf/ptypes/empty"
-import google_protobuf5 "google.golang.org/genproto/protobuf/field_mask"
+import google_protobuf5 "github.com/golang/protobuf/ptypes/empty"
+import google_protobuf6 "google.golang.org/genproto/protobuf/field_mask"
import (
context "golang.org/x/net/context"
@@ -40,7 +40,7 @@ type ListUptimeCheckConfigsRequest struct {
func (m *ListUptimeCheckConfigsRequest) Reset() { *m = ListUptimeCheckConfigsRequest{} }
func (m *ListUptimeCheckConfigsRequest) String() string { return proto.CompactTextString(m) }
func (*ListUptimeCheckConfigsRequest) ProtoMessage() {}
-func (*ListUptimeCheckConfigsRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{0} }
+func (*ListUptimeCheckConfigsRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{0} }
func (m *ListUptimeCheckConfigsRequest) GetParent() string {
if m != nil {
@@ -78,7 +78,7 @@ type ListUptimeCheckConfigsResponse struct {
func (m *ListUptimeCheckConfigsResponse) Reset() { *m = ListUptimeCheckConfigsResponse{} }
func (m *ListUptimeCheckConfigsResponse) String() string { return proto.CompactTextString(m) }
func (*ListUptimeCheckConfigsResponse) ProtoMessage() {}
-func (*ListUptimeCheckConfigsResponse) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{1} }
+func (*ListUptimeCheckConfigsResponse) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{1} }
func (m *ListUptimeCheckConfigsResponse) GetUptimeCheckConfigs() []*UptimeCheckConfig {
if m != nil {
@@ -105,7 +105,7 @@ type GetUptimeCheckConfigRequest struct {
func (m *GetUptimeCheckConfigRequest) Reset() { *m = GetUptimeCheckConfigRequest{} }
func (m *GetUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) }
func (*GetUptimeCheckConfigRequest) ProtoMessage() {}
-func (*GetUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{2} }
+func (*GetUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{2} }
func (m *GetUptimeCheckConfigRequest) GetName() string {
if m != nil {
@@ -127,7 +127,7 @@ type CreateUptimeCheckConfigRequest struct {
func (m *CreateUptimeCheckConfigRequest) Reset() { *m = CreateUptimeCheckConfigRequest{} }
func (m *CreateUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) }
func (*CreateUptimeCheckConfigRequest) ProtoMessage() {}
-func (*CreateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{3} }
+func (*CreateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{3} }
func (m *CreateUptimeCheckConfigRequest) GetParent() string {
if m != nil {
@@ -149,7 +149,7 @@ type UpdateUptimeCheckConfigRequest struct {
// configuration are updated with values from the new configuration. If this
// field is empty, then the current configuration is completely replaced with
// the new configuration.
- UpdateMask *google_protobuf5.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"`
+ UpdateMask *google_protobuf6.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"`
// Required. If an `"updateMask"` has been specified, this field gives
// the values for the set of fields mentioned in the `"updateMask"`. If an
// `"updateMask"` has not been given, this uptime check configuration replaces
@@ -163,9 +163,9 @@ type UpdateUptimeCheckConfigRequest struct {
func (m *UpdateUptimeCheckConfigRequest) Reset() { *m = UpdateUptimeCheckConfigRequest{} }
func (m *UpdateUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) }
func (*UpdateUptimeCheckConfigRequest) ProtoMessage() {}
-func (*UpdateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{4} }
+func (*UpdateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{4} }
-func (m *UpdateUptimeCheckConfigRequest) GetUpdateMask() *google_protobuf5.FieldMask {
+func (m *UpdateUptimeCheckConfigRequest) GetUpdateMask() *google_protobuf6.FieldMask {
if m != nil {
return m.UpdateMask
}
@@ -190,7 +190,7 @@ type DeleteUptimeCheckConfigRequest struct {
func (m *DeleteUptimeCheckConfigRequest) Reset() { *m = DeleteUptimeCheckConfigRequest{} }
func (m *DeleteUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteUptimeCheckConfigRequest) ProtoMessage() {}
-func (*DeleteUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{5} }
+func (*DeleteUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{5} }
func (m *DeleteUptimeCheckConfigRequest) GetName() string {
if m != nil {
@@ -217,7 +217,7 @@ type ListUptimeCheckIpsRequest struct {
func (m *ListUptimeCheckIpsRequest) Reset() { *m = ListUptimeCheckIpsRequest{} }
func (m *ListUptimeCheckIpsRequest) String() string { return proto.CompactTextString(m) }
func (*ListUptimeCheckIpsRequest) ProtoMessage() {}
-func (*ListUptimeCheckIpsRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{6} }
+func (*ListUptimeCheckIpsRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{6} }
func (m *ListUptimeCheckIpsRequest) GetPageSize() int32 {
if m != nil {
@@ -250,7 +250,7 @@ type ListUptimeCheckIpsResponse struct {
func (m *ListUptimeCheckIpsResponse) Reset() { *m = ListUptimeCheckIpsResponse{} }
func (m *ListUptimeCheckIpsResponse) String() string { return proto.CompactTextString(m) }
func (*ListUptimeCheckIpsResponse) ProtoMessage() {}
-func (*ListUptimeCheckIpsResponse) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{7} }
+func (*ListUptimeCheckIpsResponse) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{7} }
func (m *ListUptimeCheckIpsResponse) GetUptimeCheckIps() []*UptimeCheckIp {
if m != nil {
@@ -303,7 +303,7 @@ type UptimeCheckServiceClient interface {
// Deletes an uptime check configuration. Note that this method will fail
// if the uptime check configuration is referenced by an alert policy or
// other dependent configs that would be rendered invalid by the deletion.
- DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error)
+ DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error)
// Returns the list of IPs that checkers run from
ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error)
}
@@ -352,8 +352,8 @@ func (c *uptimeCheckServiceClient) UpdateUptimeCheckConfig(ctx context.Context,
return out, nil
}
-func (c *uptimeCheckServiceClient) DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) {
- out := new(google_protobuf4.Empty)
+func (c *uptimeCheckServiceClient) DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) {
+ out := new(google_protobuf5.Empty)
err := grpc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", in, out, c.cc, opts...)
if err != nil {
return nil, err
@@ -388,7 +388,7 @@ type UptimeCheckServiceServer interface {
// Deletes an uptime check configuration. Note that this method will fail
// if the uptime check configuration is referenced by an alert policy or
// other dependent configs that would be rendered invalid by the deletion.
- DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*google_protobuf4.Empty, error)
+ DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*google_protobuf5.Empty, error)
// Returns the list of IPs that checkers run from
ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error)
}
@@ -538,9 +538,9 @@ var _UptimeCheckService_serviceDesc = grpc.ServiceDesc{
Metadata: "google/monitoring/v3/uptime_service.proto",
}
-func init() { proto.RegisterFile("google/monitoring/v3/uptime_service.proto", fileDescriptor6) }
+func init() { proto.RegisterFile("google/monitoring/v3/uptime_service.proto", fileDescriptor11) }
-var fileDescriptor6 = []byte{
+var fileDescriptor11 = []byte{
// 735 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdf, 0x4e, 0x13, 0x4f,
0x14, 0xce, 0xb4, 0xfc, 0x08, 0x1c, 0xf2, 0xf3, 0xcf, 0xd8, 0x40, 0x5d, 0xa4, 0xa9, 0x35, 0x51,
diff --git a/vendor/vendor.json b/vendor/vendor.json
index 962ee879..0a8d5813 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -3,10 +3,38 @@
"ignore": "test",
"package": [
{
- "checksumSHA1": "j/0+O4eJ99Jrb9un/bwi5npLyOM=",
+ "checksumSHA1": "b1KgRkWqz0RmrEBK6IJ8kOJva6w=",
"path": "cloud.google.com/go/compute/metadata",
- "revision": "d194aef7ed539aeb64b43432a945431beb75360c",
- "revisionTime": "2018-10-24T17:17:36Z"
+ "revision": "e6e4ae65f1354d2a13cc42f03bfd6b33c11f3cb2",
+ "revisionTime": "2019-04-08T22:14:06Z"
+ },
+ {
+ "checksumSHA1": "LXUebrKkcPspQt/lLmOx8ovqR4E=",
+ "path": "cloud.google.com/go/monitoring/apiv3",
+ "revision": "e6e4ae65f1354d2a13cc42f03bfd6b33c11f3cb2",
+ "revisionTime": "2019-04-08T22:14:06Z"
+ },
+ {
+ "checksumSHA1": "mG9ijDaRFqgWp+Vzdpfh0D1znvg=",
+ "path": "cloud.google.com/go/trace/apiv2",
+ "revision": "e6e4ae65f1354d2a13cc42f03bfd6b33c11f3cb2",
+ "revisionTime": "2019-04-08T22:14:06Z"
+ },
+ {
+ "checksumSHA1": "Y6odF8TOAgbNzxerPWY/IIyXda8=",
+ "path": "contrib.go.opencensus.io/exporter/stackdriver",
+ "revision": "2b7f4fc93daf5ec3048fa4fc1c15573466711a17",
+ "revisionTime": "2018-11-14T00:04:00Z",
+ "version": "v0.8.0",
+ "versionExact": "v0.8.0"
+ },
+ {
+ "checksumSHA1": "BU6wj58GW/Fu2PHO15esGZXkkCY=",
+ "path": "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource",
+ "revision": "2b7f4fc93daf5ec3048fa4fc1c15573466711a17",
+ "revisionTime": "2018-11-14T00:04:00Z",
+ "version": "v0.8.0",
+ "versionExact": "v0.8.0"
},
{
"checksumSHA1": "oIt4tXgFYnZJBsCac1BQLnTWALM=",
@@ -416,12 +444,24 @@
"revision": "bbd03ef6da3a115852eaf24c8a1c46aeb39aa175",
"revisionTime": "2018-02-02T18:43:18Z"
},
+ {
+ "checksumSHA1": "Ylq6kq3KWBy6mu68oyEwenhNMdg=",
+ "path": "github.com/golang/protobuf/ptypes/struct",
+ "revision": "bbd03ef6da3a115852eaf24c8a1c46aeb39aa175",
+ "revisionTime": "2018-02-02T18:43:18Z"
+ },
{
"checksumSHA1": "O2ItP5rmfrgxPufhjJXbFlXuyL8=",
"path": "github.com/golang/protobuf/ptypes/timestamp",
"revision": "bbd03ef6da3a115852eaf24c8a1c46aeb39aa175",
"revisionTime": "2018-02-02T18:43:18Z"
},
+ {
+ "checksumSHA1": "kcdAvzEm6mbIP7E/2+yBVwcSTbY=",
+ "path": "github.com/golang/protobuf/ptypes/wrappers",
+ "revision": "bbd03ef6da3a115852eaf24c8a1c46aeb39aa175",
+ "revisionTime": "2018-02-02T18:43:18Z"
+ },
{
"checksumSHA1": "/yFfUp3tGt6cK22UVzbq8SjPDCU=",
"origin": "k8s.io/client-go/1.5/vendor/github.com/google/gofuzz",
@@ -429,6 +469,12 @@
"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
"revisionTime": "2016-09-30T00:14:02Z"
},
+ {
+ "checksumSHA1": "Np15I2tN6ZWdS7SNJWW7OO1pAak=",
+ "path": "github.com/googleapis/gax-go/v2",
+ "revision": "beaecbbdd8af86aa3acf14180d53828ce69400b2",
+ "revisionTime": "2019-03-05T15:30:21Z"
+ },
{
"checksumSHA1": "ndG2xmm2bas4/t+kIEUp2xIni8c=",
"path": "github.com/gophercloud/gophercloud",
@@ -898,6 +944,12 @@
"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
"revisionTime": "2016-09-30T00:14:02Z"
},
+ {
+ "checksumSHA1": "x2GBAYCmQlV8IOwIfXPlHO3gkVY=",
+ "path": "go.opencensus.io",
+ "revision": "75c0cca22312e51bfd4fafdbe9197ae399e18b38",
+ "revisionTime": "2019-04-08T22:24:43Z"
+ },
{
"checksumSHA1": "rrTaYIL5Ljr5KLwrNYGKGKBZai8=",
"path": "go.opencensus.io/exporter/prometheus",
@@ -1025,34 +1077,34 @@
"revisionTime": "2017-06-08T03:40:07Z"
},
{
- "checksumSHA1": "mktBVED98G2vv+OKcSgtnFVZC1Y=",
+ "checksumSHA1": "/94OVlOstzQP2qidGqEKOXBrzNA=",
"path": "golang.org/x/oauth2",
- "revision": "65a8d08c6292395d47053be10b3c5e91960def76",
- "revisionTime": "2016-06-07T03:33:14Z"
+ "revision": "9f3314589c9a9136388751d9adae6b0ed400978a",
+ "revisionTime": "2019-03-29T22:47:46Z"
},
{
- "checksumSHA1": "2rk6lthfQa5Rfydj8j7+dilKGbo=",
+ "checksumSHA1": "7fFUz5XB2aaqP1GWCU3jXZkzw5s=",
"path": "golang.org/x/oauth2/google",
- "revision": "65a8d08c6292395d47053be10b3c5e91960def76",
- "revisionTime": "2016-06-07T03:33:14Z"
+ "revision": "9f3314589c9a9136388751d9adae6b0ed400978a",
+ "revisionTime": "2019-03-29T22:47:46Z"
},
{
- "checksumSHA1": "W/GiDqzsagBnR7/yEvxatMhUDBs=",
+ "checksumSHA1": "rnUL+5Yzlt+Ky2dlB61VqfhBOb8=",
"path": "golang.org/x/oauth2/internal",
- "revision": "65a8d08c6292395d47053be10b3c5e91960def76",
- "revisionTime": "2016-06-07T03:33:14Z"
+ "revision": "9f3314589c9a9136388751d9adae6b0ed400978a",
+ "revisionTime": "2019-03-29T22:47:46Z"
},
{
- "checksumSHA1": "CPTYHWrVL4jA0B1IuC0hvgcE2AQ=",
+ "checksumSHA1": "huVltYnXdRFDJLgp/ZP9IALzG7g=",
"path": "golang.org/x/oauth2/jws",
- "revision": "65a8d08c6292395d47053be10b3c5e91960def76",
- "revisionTime": "2016-06-07T03:33:14Z"
+ "revision": "9f3314589c9a9136388751d9adae6b0ed400978a",
+ "revisionTime": "2019-03-29T22:47:46Z"
},
{
- "checksumSHA1": "xifBSq0Pn6pIoPA/o3tyzq8X4Ds=",
+ "checksumSHA1": "mwzfgZFAlEkfNHK2OYqnhIvbI+g=",
"path": "golang.org/x/oauth2/jwt",
- "revision": "65a8d08c6292395d47053be10b3c5e91960def76",
- "revisionTime": "2016-06-07T03:33:14Z"
+ "revision": "9f3314589c9a9136388751d9adae6b0ed400978a",
+ "revisionTime": "2019-03-29T22:47:46Z"
},
{
"checksumSHA1": "S0DP7Pn7sZUmXc55IzZnNvERu6s=",
@@ -1060,6 +1112,12 @@
"revision": "450f422ab23cf9881c94e2db30cac0eb1b7cf80c",
"revisionTime": "2016-12-05T22:39:15Z"
},
+ {
+ "checksumSHA1": "tI1YsCNEjyDW/aGNtunZDyjN9MA=",
+ "path": "golang.org/x/sync/semaphore",
+ "revision": "e225da77a7e68af35c70ccbf71af2b83e6acac3c",
+ "revisionTime": "2019-02-15T22:36:53Z"
+ },
{
"checksumSHA1": "k8nyiVwbKd7N/Lo9z85y6v/03NE=",
"path": "golang.org/x/sys/unix",
@@ -1165,6 +1223,60 @@
"revision": "65b0d8655182691ad23b4fac11e6f7b897d9b634",
"revisionTime": "2018-01-23T00:03:20Z"
},
+ {
+ "checksumSHA1": "Mr2fXhMRzlQCgANFm91s536pG7E=",
+ "path": "google.golang.org/api/googleapi/transport",
+ "revision": "6d1d6519126dbfb2fb9f98b6e4d3cf84a32c105b",
+ "revisionTime": "2019-04-10T00:07:50Z"
+ },
+ {
+ "checksumSHA1": "6Tg4dDJKzoSrAA5beVknvnjluOU=",
+ "path": "google.golang.org/api/internal",
+ "revision": "6d1d6519126dbfb2fb9f98b6e4d3cf84a32c105b",
+ "revisionTime": "2019-04-10T00:07:50Z"
+ },
+ {
+ "checksumSHA1": "zh9AcT6oNvhnOqb7w7njY48TkvI=",
+ "path": "google.golang.org/api/iterator",
+ "revision": "6d1d6519126dbfb2fb9f98b6e4d3cf84a32c105b",
+ "revisionTime": "2019-04-10T00:07:50Z"
+ },
+ {
+ "checksumSHA1": "2AyxThTPscWdy49fGsU2tg0Uyw8=",
+ "path": "google.golang.org/api/option",
+ "revision": "6d1d6519126dbfb2fb9f98b6e4d3cf84a32c105b",
+ "revisionTime": "2019-04-10T00:07:50Z"
+ },
+ {
+ "checksumSHA1": "3MKQfzebrexFB4O7eRWdonIHlng=",
+ "path": "google.golang.org/api/support/bundler",
+ "revision": "6d1d6519126dbfb2fb9f98b6e4d3cf84a32c105b",
+ "revisionTime": "2019-04-10T00:07:50Z"
+ },
+ {
+ "checksumSHA1": "hOQM3ns9t81o566ge8UNFEtoXX8=",
+ "path": "google.golang.org/api/transport",
+ "revision": "6d1d6519126dbfb2fb9f98b6e4d3cf84a32c105b",
+ "revisionTime": "2019-04-10T00:07:50Z"
+ },
+ {
+ "checksumSHA1": "/yK+EuA2fmO0HLHmJgvqlWIi3Bo=",
+ "path": "google.golang.org/api/transport/grpc",
+ "revision": "6d1d6519126dbfb2fb9f98b6e4d3cf84a32c105b",
+ "revisionTime": "2019-04-10T00:07:50Z"
+ },
+ {
+ "checksumSHA1": "a+PkIGoiF+p+ghmjmA9gUkMFwYQ=",
+ "path": "google.golang.org/api/transport/http",
+ "revision": "6d1d6519126dbfb2fb9f98b6e4d3cf84a32c105b",
+ "revisionTime": "2019-04-10T00:07:50Z"
+ },
+ {
+ "checksumSHA1": "sJcKCvjPtoysqyelsB2CQzC5oQI=",
+ "path": "google.golang.org/api/transport/http/internal/propagation",
+ "revision": "6d1d6519126dbfb2fb9f98b6e4d3cf84a32c105b",
+ "revisionTime": "2019-04-10T00:07:50Z"
+ },
{
"checksumSHA1": "N3KZEuQ9O1QwJXcCJbe7Czwroo4=",
"path": "google.golang.org/appengine",
@@ -1213,12 +1325,24 @@
"revision": "4f7eeb5305a4ba1966344836ba4af9996b7b4e05",
"revisionTime": "2016-08-19T23:33:10Z"
},
+ {
+ "checksumSHA1": "HOXg4L2NYsuRGJ8AO4JnPN4EHJs=",
+ "path": "google.golang.org/appengine/internal/socket",
+ "revision": "54a98f90d1c46b7731eb8fb305d2a321c30ef610",
+ "revisionTime": "2019-02-15T20:43:44Z"
+ },
{
"checksumSHA1": "QtAbHtHmDzcf6vOV9eqlCpKgjiw=",
"path": "google.golang.org/appengine/internal/urlfetch",
"revision": "a2f4131514e563cedfdb6e7d267df9ad48591e93",
"revisionTime": "2017-05-22T22:48:38Z"
},
+ {
+ "checksumSHA1": "MharNMGnQusRPdmBYXDxz2cCHPU=",
+ "path": "google.golang.org/appengine/socket",
+ "revision": "54a98f90d1c46b7731eb8fb305d2a321c30ef610",
+ "revisionTime": "2019-02-15T20:43:44Z"
+ },
{
"checksumSHA1": "akOV9pYnCbcPA8wJUutSQVibdyg=",
"path": "google.golang.org/appengine/urlfetch",
@@ -1262,16 +1386,22 @@
"revisionTime": "2018-02-06T00:51:23Z"
},
{
- "checksumSHA1": "+R6PE+vsmj4UhfSKEnzicISaVIM=",
+ "checksumSHA1": "3LxgquY6RHEmpAx84g7CCDEdBwQ=",
"path": "google.golang.org/genproto/googleapis/api/monitoredres",
- "revision": "2b5a72b8730b0b16380010cfe5286c42108d88e7",
- "revisionTime": "2018-02-06T00:51:23Z"
+ "revision": "64821d5d210748c883cd2b809589555ae4654203",
+ "revisionTime": "2019-04-04T17:22:33Z"
+ },
+ {
+ "checksumSHA1": "xrrg1u2NhjEEehjMTFItS+xao2M=",
+ "path": "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2",
+ "revision": "64821d5d210748c883cd2b809589555ae4654203",
+ "revisionTime": "2019-04-04T17:22:33Z"
},
{
- "checksumSHA1": "kfGa26sminYYrB8Q9i2q2CeAPF4=",
+ "checksumSHA1": "V7VPn79/CR4r2DA9lQiayg3t0OY=",
"path": "google.golang.org/genproto/googleapis/monitoring/v3",
- "revision": "2b5a72b8730b0b16380010cfe5286c42108d88e7",
- "revisionTime": "2018-02-06T00:51:23Z"
+ "revision": "df60624c1e9b9d2973e889c7a1cff73155da81c4",
+ "revisionTime": "2018-03-06T02:09:42Z"
},
{
"checksumSHA1": "Tc3BU26zThLzcyqbVtiSEp7EpU8=",